diff --git a/.ddqa/config.toml b/.ddqa/config.toml
index 02913ac890130..e24656da4b464 100644
--- a/.ddqa/config.toml
+++ b/.ddqa/config.toml
@@ -78,6 +78,7 @@ jira_issue_type = "Task"
jira_statuses = ["To Do", "In Progress", "Done"]
github_team = "opentelemetry"
github_labels = ["team/opentelemetry"]
+exclude_members = ["ancostas", "Maascamp"]
[teams."eBPF Platform"]
jira_project = "EBPF"
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 15af633890318..fbdd00e85974f 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -98,7 +98,7 @@
/.gitlab/common/test_infra_version.yml @DataDog/agent-devx-loops @DataDog/agent-devx-infra
/.gitlab/e2e/e2e.yml @DataDog/container-integrations @DataDog/agent-devx-loops
-/.gitlab/e2e/k8s_e2e.yml @DataDog/container-integrations @DataDog/agent-devx-loops
+/.gitlab/e2e_k8s/e2e_k8s.yml @DataDog/container-integrations @DataDog/agent-devx-loops
/.gitlab/e2e/install_packages @DataDog/agent-delivery
/.gitlab/container_build/fakeintake.yml @DataDog/agent-e2e-testing @DataDog/agent-devx-loops
/.gitlab/binary_build/fakeintake.yml @DataDog/agent-e2e-testing @DataDog/agent-devx-loops
@@ -205,6 +205,7 @@
/cmd/system-probe/modules/service_discover* @DataDog/apm-onboarding @DataDog/universal-service-monitoring
/cmd/system-probe/modules/language_detection* @DataDog/processes @DataDog/universal-service-monitoring
/cmd/system-probe/runtime/ @DataDog/agent-security
+/cmd/system-probe/modules/dynamic_instrumentation* @DataDog/debugger
/cmd/system-probe/windows/ @DataDog/windows-kernel-integrations
/cmd/system-probe/windows_resources/ @DataDog/windows-kernel-integrations
/cmd/system-probe/main_windows*.go @DataDog/windows-kernel-integrations
@@ -425,6 +426,7 @@
/pkg/util/orchestrator/ @DataDog/container-app
/pkg/util/podman/ @DataDog/container-integrations
/pkg/util/prometheus @DataDog/container-integrations
+/pkg/util/tagger @DataDog/container-platform
/pkg/util/trivy/ @DataDog/container-integrations @DataDog/agent-security
/pkg/util/uuid/ @DataDog/agent-shared-components
/pkg/util/cgroups/ @DataDog/container-integrations
diff --git a/.github/workflows/add_milestone.yml b/.github/workflows/add_milestone.yml
index ef43c0869e896..cc647378a5460 100644
--- a/.github/workflows/add_milestone.yml
+++ b/.github/workflows/add_milestone.yml
@@ -8,11 +8,15 @@ on:
- main
- "[0-9]+.[0-9]+.x"
+permissions: {}
+
jobs:
add-milestone-pr:
name: Add Milestone on PR
if: github.event.pull_request.merged == true
runs-on: ubuntu-latest
+ permissions:
+ pull-requests: write
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GH_REPO: ${{ github.repository }}
diff --git a/.github/workflows/backport-pr.yml b/.github/workflows/backport-pr.yml
index 188cd54aadb8d..3ee7eeeb276b4 100644
--- a/.github/workflows/backport-pr.yml
+++ b/.github/workflows/backport-pr.yml
@@ -5,6 +5,8 @@ on:
- closed
- labeled
+permissions: {}
+
jobs:
backport:
name: Backport PR
@@ -18,6 +20,9 @@ jobs:
&& contains(github.event.label.name, 'backport')
)
)
+ permissions:
+ contents: write
+ pull-requests: write
steps:
- uses: actions/create-github-app-token@31c86eb3b33c9b601a1f60f98dcbfd1d70f379b4 # v1.10.3
id: app-token
diff --git a/.github/workflows/buildimages-update.yml b/.github/workflows/buildimages-update.yml
index 9a04aceed38e4..523018890c0f6 100644
--- a/.github/workflows/buildimages-update.yml
+++ b/.github/workflows/buildimages-update.yml
@@ -24,6 +24,8 @@ on:
required: true
type: boolean
+permissions: {}
+
jobs:
open-go-update-pr:
runs-on: ubuntu-latest
diff --git a/.github/workflows/chase_release_managers.yml b/.github/workflows/chase_release_managers.yml
index 652746f1e93a9..bcf922f93d575 100644
--- a/.github/workflows/chase_release_managers.yml
+++ b/.github/workflows/chase_release_managers.yml
@@ -8,6 +8,7 @@ on:
required: true
type: string
+permissions: {}
jobs:
create_release_schedule:
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
index 4613f73359f16..748cd3e5aaeba 100644
--- a/.github/workflows/codeql-analysis.yml
+++ b/.github/workflows/codeql-analysis.yml
@@ -10,9 +10,13 @@ on:
- main
- "[0-9]+.[0-9]+.x"
+permissions: {}
+
jobs:
CodeQL-Build:
runs-on: ubuntu-20.04
+ permissions:
+ security-events: write
strategy:
matrix:
language: ["go", "javascript", "python", "cpp"]
diff --git a/.github/workflows/create_rc_pr.yml b/.github/workflows/create_rc_pr.yml
index 0d190cb7bb606..bfed4df0f66b4 100644
--- a/.github/workflows/create_rc_pr.yml
+++ b/.github/workflows/create_rc_pr.yml
@@ -9,6 +9,8 @@ on:
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+permissions: {}
+
jobs:
find_release_branches:
runs-on: ubuntu-latest
@@ -48,6 +50,9 @@ jobs:
create_rc_pr:
runs-on: ubuntu-latest
needs: find_release_branches
+ permissions:
+ contents: write
+ pull-requests: write
strategy:
matrix:
value: ${{fromJSON(needs.find_release_branches.outputs.branches)}}
diff --git a/.github/workflows/create_release_schedule.yml b/.github/workflows/create_release_schedule.yml
index e19372fa8f1d7..4fc749d9b280c 100644
--- a/.github/workflows/create_release_schedule.yml
+++ b/.github/workflows/create_release_schedule.yml
@@ -12,6 +12,8 @@ on:
required: true
type: string
+permissions: {}
+
jobs:
create_release_schedule:
diff --git a/.github/workflows/cws-btfhub-sync.yml b/.github/workflows/cws-btfhub-sync.yml
index 970a0fef308f8..2e3152fb10763 100644
--- a/.github/workflows/cws-btfhub-sync.yml
+++ b/.github/workflows/cws-btfhub-sync.yml
@@ -16,6 +16,8 @@ on:
schedule:
- cron: '30 4 * * 5' # at 4:30 UTC on Friday
+permissions: {}
+
jobs:
generate:
runs-on: ubuntu-latest
@@ -91,6 +93,9 @@ jobs:
combine:
needs: generate
runs-on: ubuntu-latest
+ permissions:
+ contents: write
+ pull-requests: write
steps:
- name: Checkout datadog-agent repository
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
diff --git a/.github/workflows/datadog-static-analysis.yml b/.github/workflows/datadog-static-analysis.yml
index c738e875c2991..e7030c87f71a9 100644
--- a/.github/workflows/datadog-static-analysis.yml
+++ b/.github/workflows/datadog-static-analysis.yml
@@ -2,6 +2,8 @@ on: [push]
name: Datadog Static Analysis
+permissions: {}
+
jobs:
static-analysis:
if: github.triggering_actor != 'dependabot[bot]'
diff --git a/.github/workflows/do-not-merge.yml b/.github/workflows/do-not-merge.yml
index 13886c696f679..a21f9e03d1fb2 100644
--- a/.github/workflows/do-not-merge.yml
+++ b/.github/workflows/do-not-merge.yml
@@ -10,6 +10,8 @@ on:
branches:
- mq-working-branch-*
+permissions: {}
+
jobs:
do-not-merge:
if: ${{ contains(github.event.*.labels.*.name, 'do-not-merge/hold') || contains(github.event.*.labels.*.name, 'do-not-merge/WIP') }}
diff --git a/.github/workflows/docs-dev.yml b/.github/workflows/docs-dev.yml
index 7dba335ed58b7..4ce377865f81b 100644
--- a/.github/workflows/docs-dev.yml
+++ b/.github/workflows/docs-dev.yml
@@ -14,6 +14,8 @@ on:
- docs/**
- .github/workflows/docs-dev.yml
+permissions: {}
+
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ github.event_name == 'pull_request' && true || false }}
@@ -54,6 +56,8 @@ jobs:
publish:
runs-on: ubuntu-latest
+ permissions:
+ contents: write
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
needs:
diff --git a/.github/workflows/external-contributor.yml b/.github/workflows/external-contributor.yml
index 3cc35d4cc12e2..d4092f41492ae 100644
--- a/.github/workflows/external-contributor.yml
+++ b/.github/workflows/external-contributor.yml
@@ -6,10 +6,14 @@ on:
pull_request_target:
types: [opened, reopened]
+permissions: {}
+
jobs:
external-contributor-prs:
name: Handle Fork PRs
runs-on: ubuntu-latest
+ permissions:
+ pull-requests: write
if: github.event.pull_request.head.repo.full_name != github.repository
steps:
- name: Checkout repository
diff --git a/.github/workflows/go-update-commenter.yml b/.github/workflows/go-update-commenter.yml
index 9925fba1614ca..ff1d104c691ec 100644
--- a/.github/workflows/go-update-commenter.yml
+++ b/.github/workflows/go-update-commenter.yml
@@ -5,6 +5,8 @@ on:
# Only run on PR label events (in particular not on every commit)
types: [ labeled ]
+permissions: {}
+
jobs:
old-versions-match:
# Only run if the PR is labeled with 'go-update'
diff --git a/.github/workflows/gohai.yml b/.github/workflows/gohai.yml
index bb20f0e0104df..a328f67c5b853 100644
--- a/.github/workflows/gohai.yml
+++ b/.github/workflows/gohai.yml
@@ -12,6 +12,8 @@ on:
paths:
- "pkg/gohai/**"
+permissions: {}
+
jobs:
gohai_test:
strategy:
diff --git a/.github/workflows/label-analysis.yml b/.github/workflows/label-analysis.yml
index 08980653d1d83..1f0601757941f 100644
--- a/.github/workflows/label-analysis.yml
+++ b/.github/workflows/label-analysis.yml
@@ -13,10 +13,14 @@ env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GH_REPO: ${{ github.repository }}
+permissions: {}
+
jobs:
assign-team-label:
if: github.triggering_actor != 'dd-devflow[bot]'
runs-on: ubuntu-latest
+ permissions:
+ pull-requests: write
steps:
- name: Checkout repository
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml
index 5cade58e6495c..b92075b895975 100644
--- a/.github/workflows/labeler.yml
+++ b/.github/workflows/labeler.yml
@@ -6,6 +6,8 @@ on:
- main
- "[0-9]+.[0-9]+.x"
+permissions: {}
+
jobs:
label:
permissions:
diff --git a/.github/workflows/markdown-lint-check.yml b/.github/workflows/markdown-lint-check.yml
index 94386e05e6621..6ade9a5ec8560 100644
--- a/.github/workflows/markdown-lint-check.yml
+++ b/.github/workflows/markdown-lint-check.yml
@@ -3,6 +3,8 @@ name: Check Markdown links
on:
pull_request:
+permissions: {}
+
jobs:
markdown-link-check:
runs-on: ubuntu-latest
diff --git a/.github/workflows/serverless-benchmarks.yml b/.github/workflows/serverless-benchmarks.yml
index 37742948b0d36..40ac0953e42a4 100644
--- a/.github/workflows/serverless-benchmarks.yml
+++ b/.github/workflows/serverless-benchmarks.yml
@@ -14,6 +14,8 @@ concurrency:
group: ${{ github.workflow }}/PR#${{ github.event.pull_request.number }}
cancel-in-progress: true
+permissions: {}
+
jobs:
baseline:
name: Baseline
diff --git a/.github/workflows/serverless-binary-size.yml b/.github/workflows/serverless-binary-size.yml
index 56818495bd7cb..c8880d3306d01 100644
--- a/.github/workflows/serverless-binary-size.yml
+++ b/.github/workflows/serverless-binary-size.yml
@@ -6,6 +6,8 @@ on:
env:
SIZE_ALLOWANCE: fromJSON(1000000) # 1 MB
+permissions: {}
+
jobs:
comment:
runs-on: ubuntu-latest
@@ -138,6 +140,18 @@ jobs:
name: dependency-graphs
path: go/src/github.com/DataDog/datadog-lambda-extension/graphs
+ - name: Write message
+ id: write
+ if: steps.should.outputs.should_run == 'true'
+ env:
+ VAR_COLD_START: ${{ steps.compare.outputs.coldstart }}
+ VAR_DIFF: ${{ steps.compare.outputs.diff }}
+ VAR_DEPS: ${{ steps.deps.outputs.deps }}
+ VAR_RUN_ID: ${{ github.run_id }}
+ run: |
+ cd go/src/github.com/DataDog/datadog-agent
+ ./test/integration/serverless_perf/write_message.sh
+
- name: Post comment
uses: marocchino/sticky-pull-request-comment@331f8f5b4215f0445d3c07b4967662a32a2d3e31 # v2.9.0
if: steps.should.outputs.should_run == 'true'
@@ -145,21 +159,4 @@ jobs:
header: serverless-binary-size
hide_and_recreate: true
hide_classify: "RESOLVED"
- message: |
- :warning::rotating_light: Warning, this pull request increases the binary size of serverless extension by ${{ steps.compare.outputs.diff }} bytes. Each MB of binary size increase means about 10ms of additional cold start time, so this pull request would increase cold start time by ${{ steps.compare.outputs.coldstart }}ms.
-
- If you have questions, we are happy to help, come visit us in the [#serverless](https://dd.slack.com/archives/CBWDFKWV8) slack channel and provide a link to this comment.
-
-
- Debug info
-
- These dependencies were added to the serverless extension by this pull request:
-
- ```
- ${{ steps.deps.outputs.deps }}
- ```
-
- View dependency graphs for each added dependency in the [artifacts section](https://github.com/DataDog/datadog-agent/actions/runs/${{ github.run_id }}#artifacts) of the github action.
-
- We suggest you consider adding the `!serverless` build tag to remove any new dependencies not needed in the serverless extension.
-
+ path: ${{ steps.write.outputs.filename }}
diff --git a/.github/workflows/serverless-integration.yml b/.github/workflows/serverless-integration.yml
index 8bd8459b6c52f..c2866e77f69b4 100644
--- a/.github/workflows/serverless-integration.yml
+++ b/.github/workflows/serverless-integration.yml
@@ -12,6 +12,8 @@ on:
schedule:
- cron: '0 14 * * *' # cron schedule uses UTC timezone. Run tests at the beginning of the day in US-East
+permissions: {}
+
jobs:
test:
runs-on: ubuntu-latest
diff --git a/.github/workflows/slapr.yml b/.github/workflows/slapr.yml
index 48be5e393fd38..e88d67945b5a2 100644
--- a/.github/workflows/slapr.yml
+++ b/.github/workflows/slapr.yml
@@ -7,6 +7,8 @@
# pull_request:
# types: [closed]
#
+# permissions: {}
+#
# jobs:
# run_slapr_datadog_agent:
# runs-on: ubuntu-latest
diff --git a/.gitignore b/.gitignore
index ab4936e5d46b1..83c6d28794cff 100644
--- a/.gitignore
+++ b/.gitignore
@@ -112,16 +112,6 @@ pkg/process/config/logs
*.ninja
compile_commands.json
pkg/ebpf/bytecode/build/**/*.d
-pkg/ebpf/bytecode/runtime/conntrack.go
-pkg/ebpf/bytecode/runtime/http.go
-pkg/ebpf/bytecode/runtime/usm.go
-pkg/ebpf/bytecode/runtime/shared-libraries.go
-pkg/ebpf/bytecode/runtime/offsetguess-test.go
-pkg/ebpf/bytecode/runtime/oom-kill.go
-pkg/ebpf/bytecode/runtime/runtime-security.go
-pkg/ebpf/bytecode/runtime/tcp-queue-length.go
-pkg/ebpf/bytecode/runtime/tracer.go
-pkg/ebpf/bytecode/runtime/logdebug-test.go
pkg/security/tests/syscall_tester/**/*.d
# dsd artifacts
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index c50880e298007..a813d836841a8 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -49,6 +49,7 @@ include:
default:
retry:
max: 2
+ exit_codes: 42
when:
- runner_system_failure
- stuck_or_timeout_failure
@@ -196,59 +197,60 @@ variables:
# List of parameters retrieved from AWS SSM
# They must be defined as environment variables in the GitLab CI/CD settings, to ease rotation if needed
- AGENT_QA_PROFILE_SSM_NAME: ci.datadog-agent.agent-qa-profile # agent-devx-infra
- API_KEY_ORG2_SSM_NAME: ci.datadog-agent.datadog_api_key_org2 # agent-devx-infra
- API_KEY_DDDEV_SSM_NAME: ci.datadog-agent.datadog_api_key # agent-devx-infra
- APP_KEY_ORG2_SSM_NAME: ci.datadog-agent.datadog_app_key_org2 # agent-devx-infra
- CHANGELOG_COMMIT_SHA_SSM_NAME: ci.datadog-agent.gitlab_changelog_commit_sha # agent-devx-infra
- CHOCOLATEY_API_KEY_SSM_NAME: ci.datadog-agent.chocolatey_api_key # windows-agent
- CODECOV_TOKEN_SSM_NAME: ci.datadog-agent.codecov_token # agent-devx-infra
- DEB_GPG_KEY_SSM_NAME: ci.datadog-agent.deb_signing_private_key_${DEB_GPG_KEY_ID} # agent-delivery
- DEB_SIGNING_PASSPHRASE_SSM_NAME: ci.datadog-agent.deb_signing_key_passphrase_${DEB_GPG_KEY_ID} # agent-delivery
- DOCKER_REGISTRY_LOGIN_SSM_KEY: ci.datadog-agent.docker_hub_login # container-integrations
- DOCKER_REGISTRY_PWD_SSM_KEY: ci.datadog-agent.docker_hub_pwd # container-integrations
- E2E_TESTS_API_KEY_SSM_NAME: ci.datadog-agent.e2e_tests_api_key # agent-devx-loops
- E2E_TESTS_APP_KEY_SSM_NAME: ci.datadog-agent.e2e_tests_app_key # agent-devx-loops
- E2E_TESTS_RC_KEY_SSM_NAME: ci.datadog-agent.e2e_tests_rc_key # agent-devx-loops
+ AGENT_QA_PROFILE: ci.datadog-agent.agent-qa-profile # agent-devx-infra
+ API_KEY_ORG2: ci.datadog-agent.datadog_api_key_org2 # agent-devx-infra
+ API_KEY_DDDEV: ci.datadog-agent.datadog_api_key # agent-devx-infra
+ APP_KEY_ORG2: ci.datadog-agent.datadog_app_key_org2 # agent-devx-infra
+ CHANGELOG_COMMIT_SHA: ci.datadog-agent.gitlab_changelog_commit_sha # agent-devx-infra
+ CHOCOLATEY_API_KEY: ci.datadog-agent.chocolatey_api_key # windows-agent
+ CODECOV_TOKEN: ci.datadog-agent.codecov_token # agent-devx-infra
+ DEB_GPG_KEY: ci.datadog-agent.deb_signing_private_key_${DEB_GPG_KEY_ID} # agent-delivery
+ DEB_SIGNING_PASSPHRASE: ci.datadog-agent.deb_signing_key_passphrase_${DEB_GPG_KEY_ID} # agent-delivery
+ DOCKER_REGISTRY_LOGIN: ci.datadog-agent.docker_hub_login # container-integrations
+ DOCKER_REGISTRY_PWD: ci.datadog-agent.docker_hub_pwd # container-integrations
+ E2E_TESTS_API_KEY: ci.datadog-agent.e2e_tests_api_key # agent-devx-loops
+ E2E_TESTS_APP_KEY: ci.datadog-agent.e2e_tests_app_key # agent-devx-loops
+ E2E_TESTS_RC_KEY: ci.datadog-agent.e2e_tests_rc_key # agent-devx-loops
E2E_TESTS_AZURE_CLIENT_ID: ci.datadog-agent.e2e_tests_azure_client_id # agent-devx-loops
E2E_TESTS_AZURE_CLIENT_SECRET: ci.datadog-agent.e2e_tests_azure_client_secret # agent-devx-loops
E2E_TESTS_AZURE_TENANT_ID: ci.datadog-agent.e2e_tests_azure_tenant_id # agent-devx-loops
E2E_TESTS_AZURE_SUBSCRIPTION_ID: ci.datadog-agent.e2e_tests_azure_subscription_id # agent-devx-loops
- KITCHEN_EC2_SSH_KEY_SSM_NAME: ci.datadog-agent.aws_ec2_kitchen_ssh_key # agent-devx-loops
- KITCHEN_AZURE_CLIENT_ID_SSM_NAME: ci.datadog-agent.azure_kitchen_client_id # agent-devx-loops
- KITCHEN_AZURE_CLIENT_SECRET_SSM_NAME: ci.datadog-agent.azure_kitchen_client_secret # agent-devx-loops
- KITCHEN_AZURE_SUBSCRIPTION_ID_SSM_NAME: ci.datadog-agent.azure_kitchen_subscription_id # agent-devx-loops
- KITCHEN_AZURE_TENANT_ID_SSM_NAME: ci.datadog-agent.azure_kitchen_tenant_id # agent-devx-loops
- GITHUB_PR_COMMENTER_APP_KEY_SSM_NAME: pr-commenter.github_app_key # agent-devx-infra
- GITHUB_PR_COMMENTER_INTEGRATION_ID_SSM_NAME: pr-commenter.github_integration_id # agent-devx-infra
- GITHUB_PR_COMMENTER_INSTALLATION_ID_SSM_NAME: pr-commenter.github_installation_id # agent-devx-infra
- GITLAB_SCHEDULER_TOKEN_SSM_NAME: ci.datadog-agent.gitlab_pipelines_scheduler_token # ci-cd
- GITLAB_READ_API_TOKEN_SSM_NAME: ci.datadog-agent.gitlab_read_api_token # ci-cd
- GITLAB_FULL_API_TOKEN_SSM_NAME: ci.datadog-agent.gitlab_full_api_token # ci-cd
- INSTALL_SCRIPT_API_KEY_SSM_NAME: ci.agent-linux-install-script.datadog_api_key_2 # agent-delivery
- JIRA_READ_API_TOKEN_SSM_NAME: ci.datadog-agent.jira_read_api_token # agent-devx-infra
- AGENT_GITHUB_APP_ID_SSM_NAME: ci.datadog-agent.platform-github-app-id # agent-devx-infra
- AGENT_GITHUB_INSTALLATION_ID_SSM_NAME: ci.datadog-agent.platform-github-app-installation-id # agent-devx-infra
- AGENT_GITHUB_KEY_SSM_NAME: ci.datadog-agent.platform-github-app-key # agent-devx-infra
- MACOS_GITHUB_APP_ID_SSM_NAME: ci.datadog-agent.macos_github_app_id # agent-devx-infra
- MACOS_GITHUB_INSTALLATION_ID_SSM_NAME: ci.datadog-agent.macos_github_installation_id # agent-devx-infra
- MACOS_GITHUB_KEY_SSM_NAME: ci.datadog-agent.macos_github_key_b64 # agent-devx-infra
- MACOS_GITHUB_APP_ID_2_SSM_NAME: ci.datadog-agent.macos_github_app_id_2 # agent-devx-infra
- MACOS_GITHUB_INSTALLATION_ID_2_SSM_NAME: ci.datadog-agent.macos_github_installation_id_2 # agent-devx-infra
- MACOS_GITHUB_KEY_2_SSM_NAME: ci.datadog-agent.macos_github_key_b64_2 # agent-devx-infra
- RPM_GPG_KEY_SSM_NAME: ci.datadog-agent.rpm_signing_private_key_${RPM_GPG_KEY_ID} # agent-delivery
- RPM_SIGNING_PASSPHRASE_SSM_NAME: ci.datadog-agent.rpm_signing_key_passphrase_${RPM_GPG_KEY_ID} # agent-delivery
- SLACK_AGENT_CI_TOKEN_SSM_NAME: ci.datadog-agent.slack_agent_ci_token # agent-devx-infra
- SMP_ACCOUNT_ID_SSM_NAME: ci.datadog-agent.single-machine-performance-account-id # single-machine-performance
- SMP_AGENT_TEAM_ID_SSM_NAME: ci.datadog-agent.single-machine-performance-agent-team-id # single-machine-performance
- SMP_API_SSM_NAME: ci.datadog-agent.single-machine-performance-api # single-machine-performance
- SMP_BOT_ACCESS_KEY_SSM_NAME: ci.datadog-agent.single-machine-performance-bot-access-key # single-machine-performance
- SMP_BOT_ACCESS_KEY_ID_SSM_NAME: ci.datadog-agent.single-machine-performance-bot-access-key-id # single-machine-performance
- SSH_KEY_SSM_NAME: ci.datadog-agent.ssh_key # system-probe
- SSH_KEY_RSA_SSM_NAME: ci.datadog-agent.ssh_key_rsa # agent-devx-loops
- SSH_PUBLIC_KEY_RSA_SSM_NAME: ci.datadog-agent.ssh_public_key_rsa # agent-devx-loops
- VCPKG_BLOB_SAS_URL_SSM_NAME: ci.datadog-agent-buildimages.vcpkg_blob_sas_url # windows-agent
- WINGET_PAT_SSM_NAME: ci.datadog-agent.winget_pat # windows-agent
+ E2E_TESTS_GCP_CREDENTIALS: ci.datadog-agent.e2e_tests_gcp_credentials # agent-devx-loops
+ KITCHEN_EC2_SSH_KEY: ci.datadog-agent.aws_ec2_kitchen_ssh_key # agent-devx-loops
+ KITCHEN_AZURE_CLIENT_ID: ci.datadog-agent.azure_kitchen_client_id # agent-devx-loops
+ KITCHEN_AZURE_CLIENT_SECRET: ci.datadog-agent.azure_kitchen_client_secret # agent-devx-loops
+ KITCHEN_AZURE_SUBSCRIPTION_ID: ci.datadog-agent.azure_kitchen_subscription_id # agent-devx-loops
+ KITCHEN_AZURE_TENANT_ID: ci.datadog-agent.azure_kitchen_tenant_id # agent-devx-loops
+ GITHUB_PR_COMMENTER_APP_KEY: pr-commenter.github_app_key # agent-devx-infra
+ GITHUB_PR_COMMENTER_INTEGRATION_ID: pr-commenter.github_integration_id # agent-devx-infra
+ GITHUB_PR_COMMENTER_INSTALLATION_ID: pr-commenter.github_installation_id # agent-devx-infra
+ GITLAB_SCHEDULER_TOKEN: ci.datadog-agent.gitlab_pipelines_scheduler_token # ci-cd
+ GITLAB_READ_API_TOKEN: ci.datadog-agent.gitlab_read_api_token # ci-cd
+ GITLAB_FULL_API_TOKEN: ci.datadog-agent.gitlab_full_api_token # ci-cd
+ INSTALL_SCRIPT_API_KEY: ci.agent-linux-install-script.datadog_api_key_2 # agent-delivery
+ JIRA_READ_API_TOKEN: ci.datadog-agent.jira_read_api_token # agent-devx-infra
+ AGENT_GITHUB_APP_ID: ci.datadog-agent.platform-github-app-id # agent-devx-infra
+ AGENT_GITHUB_INSTALLATION_ID: ci.datadog-agent.platform-github-app-installation-id # agent-devx-infra
+ AGENT_GITHUB_KEY: ci.datadog-agent.platform-github-app-key # agent-devx-infra
+ MACOS_GITHUB_APP_ID: ci.datadog-agent.macos_github_app_id # agent-devx-infra
+ MACOS_GITHUB_INSTALLATION_ID: ci.datadog-agent.macos_github_installation_id # agent-devx-infra
+ MACOS_GITHUB_KEY: ci.datadog-agent.macos_github_key_b64 # agent-devx-infra
+ MACOS_GITHUB_APP_ID_2: ci.datadog-agent.macos_github_app_id_2 # agent-devx-infra
+ MACOS_GITHUB_INSTALLATION_ID_2: ci.datadog-agent.macos_github_installation_id_2 # agent-devx-infra
+ MACOS_GITHUB_KEY_2: ci.datadog-agent.macos_github_key_b64_2 # agent-devx-infra
+ RPM_GPG_KEY: ci.datadog-agent.rpm_signing_private_key_${RPM_GPG_KEY_ID} # agent-delivery
+ RPM_SIGNING_PASSPHRASE: ci.datadog-agent.rpm_signing_key_passphrase_${RPM_GPG_KEY_ID} # agent-delivery
+ SLACK_AGENT_CI_TOKEN: ci.datadog-agent.slack_agent_ci_token # agent-devx-infra
+ SMP_ACCOUNT_ID: ci.datadog-agent.single-machine-performance-account-id # single-machine-performance
+ SMP_AGENT_TEAM_ID: ci.datadog-agent.single-machine-performance-agent-team-id # single-machine-performance
+ SMP_API: ci.datadog-agent.single-machine-performance-api # single-machine-performance
+ SMP_BOT_ACCESS_KEY: ci.datadog-agent.single-machine-performance-bot-access-key # single-machine-performance
+ SMP_BOT_ACCESS_KEY_ID: ci.datadog-agent.single-machine-performance-bot-access-key-id # single-machine-performance
+ SSH_KEY: ci.datadog-agent.ssh_key # system-probe
+ SSH_KEY_RSA: ci.datadog-agent.ssh_key_rsa # agent-devx-loops
+ SSH_PUBLIC_KEY_RSA: ci.datadog-agent.ssh_public_key_rsa # agent-devx-loops
+ VCPKG_BLOB_SAS_URL: ci.datadog-agent-buildimages.vcpkg_blob_sas_url # windows-agent
+ WINGET_PAT: ci.datadog-agent.winget_pat # windows-agent
DD_PKG_VERSION: "latest"
@@ -259,6 +261,7 @@ variables:
RESTORE_CACHE_ATTEMPTS: 2
# Feature flags
FF_SCRIPT_SECTIONS: 1 # Prevent multiline scripts log collapsing, see https://gitlab.com/gitlab-org/gitlab-runner/-/issues/3392
+ FF_KUBERNETES_HONOR_ENTRYPOINT: true # Honor the entrypoint in the Docker image when running Kubernetes jobs
#
# Condition mixins for simplification of rules
@@ -698,6 +701,7 @@ workflow:
- .gitlab/functional_test/security_agent.yml
- .gitlab/kernel_matrix_testing/security_agent.yml
- .gitlab/kernel_matrix_testing/common.yml
+ - .gitlab/source_test/ebpf.yml
- test/new-e2e/system-probe/**/*
- test/new-e2e/scenarios/system-probe/**/*
- test/new-e2e/pkg/runner/**/*
@@ -741,6 +745,7 @@ workflow:
- pkg/util/kernel/**/*
- .gitlab/kernel_matrix_testing/system_probe.yml
- .gitlab/kernel_matrix_testing/common.yml
+ - .gitlab/source_test/ebpf.yml
- test/new-e2e/system-probe/**/*
- test/new-e2e/scenarios/system-probe/**/*
- test/new-e2e/pkg/runner/**/*
diff --git a/.gitlab/.pre/cancel-prev-pipelines.yml b/.gitlab/.pre/cancel-prev-pipelines.yml
index afbdde2dbd51b..488820ac33544 100644
--- a/.gitlab/.pre/cancel-prev-pipelines.yml
+++ b/.gitlab/.pre/cancel-prev-pipelines.yml
@@ -14,6 +14,5 @@ cancel-prev-pipelines:
when: never
- when: on_success
script:
- - source /root/.bashrc
- - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME)
+ - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN)
- inv pipeline.auto-cancel-previous-pipelines
diff --git a/.gitlab/.pre/test_gitlab_configuration.yml b/.gitlab/.pre/test_gitlab_configuration.yml
index 529cc1c8d2956..1c17aa088a4a6 100644
--- a/.gitlab/.pre/test_gitlab_configuration.yml
+++ b/.gitlab/.pre/test_gitlab_configuration.yml
@@ -5,8 +5,7 @@ test_gitlab_configuration:
rules:
- !reference [.on_gitlab_changes]
script:
- - source /root/.bashrc
- - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_FULL_API_TOKEN_SSM_NAME)
+ - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_FULL_API_TOKEN)
- inv -e linter.gitlab-ci
- inv -e linter.job-change-path
- inv -e linter.gitlab-change-paths
@@ -20,7 +19,7 @@ test_gitlab_compare_to:
- !reference [.on_gitlab_changes]
script:
- source /root/.bashrc
- - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_FULL_API_TOKEN_SSM_NAME)
+ - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_FULL_API_TOKEN)
- !reference [.setup_agent_github_app]
- pip install -r tasks/requirements.txt
- inv pipeline.compare-to-itself
diff --git a/.gitlab/binary_build/cluster_agent.yml b/.gitlab/binary_build/cluster_agent.yml
index ea2eac369c8ac..b20ff9d10331e 100644
--- a/.gitlab/binary_build/cluster_agent.yml
+++ b/.gitlab/binary_build/cluster_agent.yml
@@ -23,7 +23,6 @@ cluster_agent-build_amd64:
variables:
ARCH: amd64
before_script:
- - source /root/.bashrc
- !reference [.retrieve_linux_go_deps]
cluster_agent-build_arm64:
@@ -36,5 +35,4 @@ cluster_agent-build_arm64:
variables:
ARCH: arm64
before_script:
- - source /root/.bashrc
- !reference [.retrieve_linux_go_deps]
diff --git a/.gitlab/binary_build/cluster_agent_cloudfoundry.yml b/.gitlab/binary_build/cluster_agent_cloudfoundry.yml
index 501c0d8dd3d38..873a471a3b85a 100644
--- a/.gitlab/binary_build/cluster_agent_cloudfoundry.yml
+++ b/.gitlab/binary_build/cluster_agent_cloudfoundry.yml
@@ -15,7 +15,6 @@ cluster_agent_cloudfoundry-build_amd64:
ARCH: amd64
KUBERNETES_CPU_REQUEST: 4
before_script:
- - source /root/.bashrc
- !reference [.retrieve_linux_go_deps]
script:
- inv check-go-version
diff --git a/.gitlab/binary_build/cws_instrumentation.yml b/.gitlab/binary_build/cws_instrumentation.yml
index b6d517df2f52c..787be00f814cb 100644
--- a/.gitlab/binary_build/cws_instrumentation.yml
+++ b/.gitlab/binary_build/cws_instrumentation.yml
@@ -17,7 +17,6 @@ cws_instrumentation-build_amd64:
variables:
ARCH: amd64
before_script:
- - source /root/.bashrc
- !reference [.retrieve_linux_go_deps]
cws_instrumentation-build_arm64:
@@ -30,5 +29,4 @@ cws_instrumentation-build_arm64:
variables:
ARCH: arm64
before_script:
- - source /root/.bashrc
- !reference [.retrieve_linux_go_deps]
diff --git a/.gitlab/binary_build/linux.yml b/.gitlab/binary_build/linux.yml
index d8644d63a2c9c..56ae035b566e4 100644
--- a/.gitlab/binary_build/linux.yml
+++ b/.gitlab/binary_build/linux.yml
@@ -1,16 +1,12 @@
---
build_dogstatsd_static-binary_x64:
stage: binary_build
- rules:
- - !reference [.except_mergequeue]
- - when: on_success
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
tags: ["arch:amd64"]
needs: ["lint_linux-x64", "go_deps"]
variables:
ARCH: amd64
before_script:
- - source /root/.bashrc
- !reference [.retrieve_linux_go_deps]
script:
- inv check-go-version
@@ -28,7 +24,6 @@ build_dogstatsd_static-binary_arm64:
variables:
ARCH: arm64
before_script:
- - source /root/.bashrc
- !reference [.retrieve_linux_go_deps]
script:
- inv check-go-version
@@ -44,7 +39,6 @@ build_dogstatsd-binary_x64:
tags: ["arch:amd64"]
needs: ["lint_linux-x64", "go_deps"]
before_script:
- - source /root/.bashrc
- !reference [.retrieve_linux_go_deps]
script:
- inv check-go-version
@@ -62,7 +56,6 @@ build_dogstatsd-binary_arm64:
variables:
ARCH: arm64
before_script:
- - source /root/.bashrc
- !reference [.retrieve_linux_go_deps]
script:
- inv check-go-version
@@ -83,7 +76,6 @@ build_iot_agent-binary_x64:
before_script:
- !reference [.retrieve_linux_go_deps]
script:
- - source /root/.bashrc
- inv check-go-version
- inv -e agent.build --flavor iot --major-version 7
- $S3_CP_CMD $CI_PROJECT_DIR/$AGENT_BINARIES_DIR/agent $S3_ARTIFACTS_URI/iot/agent
@@ -100,7 +92,6 @@ build_iot_agent-binary_arm64:
before_script:
- !reference [.retrieve_linux_go_deps]
script:
- - source /root/.bashrc
- inv check-go-version
- inv -e agent.build --flavor iot --major-version 7
diff --git a/.gitlab/binary_build/serverless.yml b/.gitlab/binary_build/serverless.yml
index 8861528211fab..fa626581965be 100644
--- a/.gitlab/binary_build/serverless.yml
+++ b/.gitlab/binary_build/serverless.yml
@@ -5,7 +5,6 @@
- !reference [.except_mergequeue]
- when: on_success
before_script:
- - source /root/.bashrc
- !reference [.retrieve_linux_go_deps]
script:
- inv check-go-version
diff --git a/.gitlab/choco_deploy/choco_deploy.yml b/.gitlab/choco_deploy/choco_deploy.yml
index 86b63e251e362..715387a08f961 100644
--- a/.gitlab/choco_deploy/choco_deploy.yml
+++ b/.gitlab/choco_deploy/choco_deploy.yml
@@ -10,7 +10,11 @@ publish_choco_7_x64:
variables:
ARCH: "x64"
before_script:
- - $chocolateyApiKey=$(& "$CI_PROJECT_DIR\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:CHOCOLATEY_API_KEY_SSM_NAME")
+ - $tmpfile = [System.IO.Path]::GetTempFileName()
+ - (& "$CI_PROJECT_DIR\tools\ci\fetch_secret.ps1" "$Env:CHOCOLATEY_API_KEY" "$tmpfile")
+ - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" }
+ - $chocolateyApiKey=$(cat "$tmpfile")
+ - Remove-Item "$tmpfile"
script:
- '$_instance_id = (iwr -UseBasicParsing http://169.254.169.254/latest/meta-data/instance-id).content ; Write-Host "Running on instance $($_instance_id)"'
- $ErrorActionPreference = "Stop"
diff --git a/.gitlab/common/container_publish_job_templates.yml b/.gitlab/common/container_publish_job_templates.yml
index ed119645aa883..e87bc2d37860e 100644
--- a/.gitlab/common/container_publish_job_templates.yml
+++ b/.gitlab/common/container_publish_job_templates.yml
@@ -13,8 +13,7 @@
IMG_VARIABLES: ""
IMG_SIGNING: ""
script: # We can't use the 'trigger' keyword on manual jobs, otherwise they can't be run if the pipeline fails and is retried
- - source /root/.bashrc
- - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME)
+ - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN)
- |
if [[ "$BUCKET_BRANCH" == "nightly" && ( "$IMG_SOURCES" =~ "$SRC_AGENT" || "$IMG_SOURCES" =~ "$SRC_DCA" || "$IMG_SOURCES" =~ "$SRC_CWS_INSTRUMENTATION" || "$IMG_VARIABLES" =~ "$SRC_AGENT" || "$IMG_VARIABLES" =~ "$SRC_DCA" || "$IMG_VARIABLES" =~ "$SRC_CWS_INSTRUMENTATION" ) ]]; then
export ECR_RELEASE_SUFFIX="-nightly"
diff --git a/.gitlab/common/shared.yml b/.gitlab/common/shared.yml
index 1df106e9b4c08..bb1d7e2198518 100644
--- a/.gitlab/common/shared.yml
+++ b/.gitlab/common/shared.yml
@@ -21,30 +21,30 @@
.setup_deb_signing_key: &setup_deb_signing_key
- set +x
- - DEB_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DEB_GPG_KEY_SSM_NAME)
+ - DEB_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DEB_GPG_KEY)
- printf -- "${DEB_GPG_KEY}" | gpg --import --batch
- - export DEB_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DEB_SIGNING_PASSPHRASE_SSM_NAME)
+ - export DEB_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DEB_SIGNING_PASSPHRASE)
.setup_macos_github_app:
# GitHub App rate-limits are per-app.
# This balances the requests made to GitHub between the two apps we have set up.
- |
if [[ "$(( RANDOM % 2 ))" == "1" ]]; then
- export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_KEY_SSM_NAME)
- export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_APP_ID_SSM_NAME)
- export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_INSTALLATION_ID_SSM_NAME)
+ export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_KEY)
+ export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_APP_ID)
+ export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_INSTALLATION_ID)
echo "Using GitHub App instance 1"
else
- export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_KEY_2_SSM_NAME)
- export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_APP_ID_2_SSM_NAME)
- export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_INSTALLATION_ID_2_SSM_NAME)
+ export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_KEY_2)
+ export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_APP_ID_2)
+ export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_INSTALLATION_ID_2)
echo "Using GitHub App instance 2"
fi
.setup_agent_github_app:
- - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $AGENT_GITHUB_KEY_SSM_NAME)
- - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $AGENT_GITHUB_APP_ID_SSM_NAME)
- - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $AGENT_GITHUB_INSTALLATION_ID_SSM_NAME)
+ - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_GITHUB_KEY)
+ - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_GITHUB_APP_ID)
+ - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_GITHUB_INSTALLATION_ID)
- echo "Using agent GitHub App"
# Install `dd-pkg` and lint packages produced by Omnibus, supports only deb and rpm packages
diff --git a/.gitlab/common/test_infra_version.yml b/.gitlab/common/test_infra_version.yml
index 9a29e393a30ce..d095cd25f8513 100644
--- a/.gitlab/common/test_infra_version.yml
+++ b/.gitlab/common/test_infra_version.yml
@@ -4,4 +4,4 @@ variables:
# and check the job creating the image to make sure you have the right SHA prefix
TEST_INFRA_DEFINITIONS_BUILDIMAGES_SUFFIX: ""
# Make sure to update test-infra-definitions version in go.mod as well
- TEST_INFRA_DEFINITIONS_BUILDIMAGES: c0ecdf254c23
+ TEST_INFRA_DEFINITIONS_BUILDIMAGES: ce6a4aad9299
diff --git a/.gitlab/container_build/docker_linux.yml b/.gitlab/container_build/docker_linux.yml
index d12b6894b57de..3d93f364b430e 100644
--- a/.gitlab/container_build/docker_linux.yml
+++ b/.gitlab/container_build/docker_linux.yml
@@ -13,8 +13,8 @@
fi
- TARGET_TAG=${IMAGE}${ECR_RELEASE_SUFFIX}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}$TAG_SUFFIX-$ARCH
# DockerHub login for build to limit rate limit when pulling base images
- - DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_LOGIN_SSM_KEY)
- - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_PWD_SSM_KEY | docker login --username "$DOCKER_REGISTRY_LOGIN" --password-stdin "$DOCKER_REGISTRY_URL"
+ - DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_LOGIN)
+ - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_PWD | docker login --username "$DOCKER_REGISTRY_LOGIN" --password-stdin "$DOCKER_REGISTRY_URL"
# Build image, use target none label to avoid replication
- docker buildx build --no-cache --push --pull --platform linux/$ARCH --build-arg CIBUILD=true --build-arg GENERAL_ARTIFACTS_CACHE_BUCKET_URL=${GENERAL_ARTIFACTS_CACHE_BUCKET_URL} $BUILD_ARG --build-arg DD_GIT_REPOSITORY_URL=https://github.com/DataDog/datadog-agent --build-arg DD_GIT_COMMIT_SHA=${CI_COMMIT_SHA} --file $BUILD_CONTEXT/Dockerfile --tag ${TARGET_TAG} --label "org.opencontainers.image.created=$(date --rfc-3339=seconds)" --label "org.opencontainers.image.authors=Datadog " --label "org.opencontainers.image.source=https://github.com/DataDog/datadog-agent" --label "org.opencontainers.image.version=$(inv agent.version)" --label "org.opencontainers.image.revision=${CI_COMMIT_SHA}" --label "org.opencontainers.image.vendor=Datadog, Inc." --label "target=none" $BUILD_CONTEXT
# Squash image
diff --git a/.gitlab/container_build/docker_windows.yml b/.gitlab/container_build/docker_windows.yml
index 6d7a365b22ee4..af2a6a84bfde6 100644
--- a/.gitlab/container_build/docker_windows.yml
+++ b/.gitlab/container_build/docker_windows.yml
@@ -29,8 +29,8 @@
-e SIGN_WINDOWS_DD_WCS=true
-e CI_PIPELINE_ID=${CI_PIPELINE_ID}
-e CI_PROJECT_NAME=${CI_PROJECT_NAME}
- -e DOCKER_REGISTRY_LOGIN_SSM_KEY=${DOCKER_REGISTRY_LOGIN_SSM_KEY}
- -e DOCKER_REGISTRY_PWD_SSM_KEY=${DOCKER_REGISTRY_PWD_SSM_KEY}
+ -e DOCKER_REGISTRY_LOGIN=${DOCKER_REGISTRY_LOGIN}
+ -e DOCKER_REGISTRY_PWD=${DOCKER_REGISTRY_PWD}
-v "$(Get-Location):C:\mnt"
-v \\.\pipe\docker_engine:\\.\pipe\docker_engine 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/windows_${Env:VARIANT}_x64${Env:DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${Env:DATADOG_AGENT_WINBUILDIMAGES}
powershell
diff --git a/.gitlab/container_build/fakeintake.yml b/.gitlab/container_build/fakeintake.yml
index 334d6e73e78ca..92a2f94da565c 100644
--- a/.gitlab/container_build/fakeintake.yml
+++ b/.gitlab/container_build/fakeintake.yml
@@ -15,7 +15,7 @@ docker_build_fakeintake:
BUILD_CONTEXT: .
script:
# DockerHub login for build to limit rate limit when pulling base images
- - DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_LOGIN_SSM_KEY)
- - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_PWD_SSM_KEY | docker login --username "$DOCKER_REGISTRY_LOGIN" --password-stdin "$DOCKER_REGISTRY_URL"
+ - DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_LOGIN)
+ - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_PWD | docker login --username "$DOCKER_REGISTRY_LOGIN" --password-stdin "$DOCKER_REGISTRY_URL"
- docker buildx build --push --pull --platform ${PLATFORMS} --file ${DOCKERFILE} --tag ${TARGET} $BUILD_CONTEXT
retry: 2
diff --git a/.gitlab/deploy_containers/deploy_containers_a7.yml b/.gitlab/deploy_containers/deploy_containers_a7.yml
index b63cd6cc74972..5d3fdac92be9a 100644
--- a/.gitlab/deploy_containers/deploy_containers_a7.yml
+++ b/.gitlab/deploy_containers/deploy_containers_a7.yml
@@ -25,7 +25,6 @@ include:
stage: deploy_containers
dependencies: []
before_script:
- - source /root/.bashrc
- if [[ "$VERSION" == "" ]]; then export VERSION="$(inv agent.version --major-version 7 --url-safe --pipeline-id $PARENT_PIPELINE_ID)"; fi
- export IMG_BASE_SRC="${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}"
- export IMG_LINUX_SOURCES="${IMG_BASE_SRC}-7${JMX}-amd64,${IMG_BASE_SRC}-7${JMX}-arm64"
@@ -66,7 +65,6 @@ deploy_containers-dogstatsd:
!reference [.manual_on_deploy_auto_on_rc]
dependencies: []
before_script:
- - source /root/.bashrc
- export VERSION="$(inv agent.version --major-version 7 --url-safe --pipeline-id $PARENT_PIPELINE_ID)"
- export IMG_SOURCES="${SRC_DSD}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-amd64,${SRC_DSD}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-arm64"
- export IMG_DESTINATIONS="${DSD_REPOSITORY}:${VERSION}"
@@ -98,8 +96,6 @@ deploy_containers-ot:
IMG_REGISTRIES: public
VERSION: 7
dependencies: []
- before_script:
- - source /root/.bashrc
parallel:
matrix:
- IMG_SOURCES: ${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-ot-beta-amd64,${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-ot-beta-arm64
diff --git a/.gitlab/deploy_cws_instrumentation/deploy_cws_instrumentation.yml b/.gitlab/deploy_cws_instrumentation/deploy_cws_instrumentation.yml
index 47b3566f320a1..2d7301d70bca4 100644
--- a/.gitlab/deploy_cws_instrumentation/deploy_cws_instrumentation.yml
+++ b/.gitlab/deploy_cws_instrumentation/deploy_cws_instrumentation.yml
@@ -11,7 +11,6 @@ include:
stage: deploy_cws_instrumentation
dependencies: []
before_script:
- - source /root/.bashrc
- if [[ "$VERSION" == "" ]]; then export VERSION="$(inv agent.version --major-version 7 --url-safe)"; fi
- if [[ "$CWS_INSTRUMENTATION_REPOSITORY" == "" ]]; then export CWS_INSTRUMENTATION_REPOSITORY="cws-instrumentation"; fi
- export IMG_BASE_SRC="${SRC_CWS_INSTRUMENTATION}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}"
diff --git a/.gitlab/deploy_dca/deploy_dca.yml b/.gitlab/deploy_dca/deploy_dca.yml
index 5db81a7297552..5065744f8e315 100644
--- a/.gitlab/deploy_dca/deploy_dca.yml
+++ b/.gitlab/deploy_dca/deploy_dca.yml
@@ -15,7 +15,6 @@ include:
- job: "docker_build_cluster_agent_arm64"
artifacts: false
before_script:
- - source /root/.bashrc
- if [[ "$VERSION" == "" ]]; then export VERSION="$(inv agent.version --major-version 7 --url-safe)"; fi
- if [[ "$CLUSTER_AGENT_REPOSITORY" == "" ]]; then export CLUSTER_AGENT_REPOSITORY="cluster-agent"; fi
- export IMG_BASE_SRC="${SRC_DCA}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}"
diff --git a/.gitlab/deploy_packages/winget.yml b/.gitlab/deploy_packages/winget.yml
index 0d0cd80b7b981..a35239c948381 100644
--- a/.gitlab/deploy_packages/winget.yml
+++ b/.gitlab/deploy_packages/winget.yml
@@ -10,7 +10,11 @@ publish_winget_7_x64:
variables:
ARCH: "x64"
before_script:
- - $wingetPat=$(& "$CI_PROJECT_DIR\tools\ci\aws_ssm_get_wrapper.ps1" $Env:WINGET_PAT_SSM_NAME)
+ - $tmpfile = [System.IO.Path]::GetTempFileName()
+ - (& "$CI_PROJECT_DIR\tools\ci\fetch_secret.ps1" "$Env:WINGET_PAT" "$tmpfile")
+ - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" }
+ - $wingetPat=$(cat "$tmpfile")
+ - Remove-Item "$tmpfile"
script:
- '$_instance_id = (iwr -UseBasicParsing http://169.254.169.254/latest/meta-data/instance-id).content ; Write-Host "Running on instance $($_instance_id)"'
- $ErrorActionPreference = "Stop"
diff --git a/.gitlab/deps_fetch/deps_fetch.yml b/.gitlab/deps_fetch/deps_fetch.yml
index 617a272ab2cb3..4fde8af699088 100644
--- a/.gitlab/deps_fetch/deps_fetch.yml
+++ b/.gitlab/deps_fetch/deps_fetch.yml
@@ -40,7 +40,6 @@ go_deps:
# If the cache already contains the dependencies, don't redownload them
# but still provide the artifact that's expected for the other jobs to run
- if [ -f modcache.tar.xz ]; then exit 0; fi
- - source /root/.bashrc
- inv -e deps --verbose
- cd $GOPATH/pkg/mod/ && tar c -I "pxz -T${KUBERNETES_CPU_REQUEST}" -f $CI_PROJECT_DIR/modcache.tar.xz .
artifacts:
@@ -60,7 +59,6 @@ go_tools_deps:
extends: .cache
script:
- if [ -f modcache_tools.tar.xz ]; then exit 0; fi
- - source /root/.bashrc
- inv -e download-tools
- cd $GOPATH/pkg/mod/ && tar c -I "pxz -T${KUBERNETES_CPU_REQUEST}" -f $CI_PROJECT_DIR/modcache_tools.tar.xz .
artifacts:
diff --git a/.gitlab/e2e/e2e.yml b/.gitlab/e2e/e2e.yml
index 257e8d255a35b..8cb38a1e6f869 100644
--- a/.gitlab/e2e/e2e.yml
+++ b/.gitlab/e2e/e2e.yml
@@ -1,7 +1,6 @@
---
# e2e stage
# Contains test jobs based on the new-e2e tests framework
-
.new_e2e_template:
stage: e2e
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/test-infra-definitions/runner$TEST_INFRA_DEFINITIONS_BUILDIMAGES_SUFFIX:$TEST_INFRA_DEFINITIONS_BUILDIMAGES
@@ -12,19 +11,22 @@
- !reference [.retrieve_linux_go_e2e_deps]
# Setup AWS Credentials
- mkdir -p ~/.aws
- - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $AGENT_QA_PROFILE_SSM_NAME >> ~/.aws/config
+ - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_PROFILE >> ~/.aws/config
- export AWS_PROFILE=agent-qa-ci
# Now all `aws` commands target the agent-qa profile
- - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SSH_PUBLIC_KEY_RSA_SSM_NAME > $E2E_PUBLIC_KEY_PATH
- - touch $E2E_PRIVATE_KEY_PATH && chmod 600 $E2E_PRIVATE_KEY_PATH && $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SSH_KEY_RSA_SSM_NAME > $E2E_PRIVATE_KEY_PATH
+ - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SSH_PUBLIC_KEY_RSA > $E2E_PUBLIC_KEY_PATH
+ - touch $E2E_PRIVATE_KEY_PATH && chmod 600 $E2E_PRIVATE_KEY_PATH && $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SSH_KEY_RSA > $E2E_PRIVATE_KEY_PATH
# Use S3 backend
- pulumi login "s3://dd-pulumi-state?region=us-east-1&awssdk=v2&profile=$AWS_PROFILE"
# Setup Azure credentials. https://www.pulumi.com/registry/packages/azure-native/installation-configuration/#set-configuration-using-pulumi-config
# The app is called `agent-e2e-tests`
- - export ARM_CLIENT_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $E2E_TESTS_AZURE_CLIENT_ID)
- - export ARM_CLIENT_SECRET=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $E2E_TESTS_AZURE_CLIENT_SECRET)
- - export ARM_TENANT_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $E2E_TESTS_AZURE_TENANT_ID)
- - export ARM_SUBSCRIPTION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $E2E_TESTS_AZURE_SUBSCRIPTION_ID)
+ - export ARM_CLIENT_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_AZURE_CLIENT_ID)
+ - export ARM_CLIENT_SECRET=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_AZURE_CLIENT_SECRET)
+ - export ARM_TENANT_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_AZURE_TENANT_ID)
+ - export ARM_SUBSCRIPTION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_AZURE_SUBSCRIPTION_ID)
+ # Setup GCP credentials. https://www.pulumi.com/registry/packages/gcp/installation-configuration/
+ # The service account is called `agent-e2e-tests`
+ - export GOOGLE_CREDENTIALS=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_GCP_CREDENTIALS)
# Generate external links to CI VISIBILITY, used by artifacts:reports:annotations
- inv -e gitlab.generate-ci-visibility-links --output=$EXTERNAL_LINKS_PATH
variables:
@@ -158,6 +160,20 @@ new-e2e-windows-service-test:
TEAM: windows-agent
EXTRA_PARAMS: --run TestServiceBehavior
+# Temporary job for hunting a crash
+new-e2e-windows-service-test-nofim:
+ extends: .new_e2e_template
+ needs:
+ - !reference [.needs_new_e2e_template]
+ - deploy_windows_testing-a7
+ rules:
+ - !reference [.on_windows_service_or_e2e_changes]
+ - !reference [.manual]
+ variables:
+ TARGETS: ./tests/windows/service-test
+ TEAM: windows-agent
+ EXTRA_PARAMS: --run TestNoFIMServiceBehavior
+
new-e2e-language-detection:
extends: .new_e2e_template_needs_deb_x64
rules:
@@ -321,6 +337,7 @@ new-e2e-installer:
TARGETS: ./tests/installer
TEAM: fleet
FLEET_INSTALL_METHOD: "install_script"
+ allow_failure: true # incident-30484
new-e2e-installer-ansible:
extends: .new_e2e_template
@@ -343,6 +360,7 @@ new-e2e-installer-ansible:
TARGETS: ./tests/installer
TEAM: fleet
FLEET_INSTALL_METHOD: "ansible"
+ allow_failure: true # incident-30484
new-e2e-ndm-netflow:
extends: .new_e2e_template
@@ -455,7 +473,7 @@ generate-flakes-finder-pipeline:
- qa_agent
tags: ["arch:amd64"]
script:
- - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_FULL_API_TOKEN_SSM_NAME)
+ - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_FULL_API_TOKEN)
- inv -e testwasher.generate-flake-finder-pipeline
artifacts:
paths:
diff --git a/.gitlab/e2e_install_packages/common.yml b/.gitlab/e2e_install_packages/common.yml
index 90777eb5ec269..d1457ff8f7e20 100644
--- a/.gitlab/e2e_install_packages/common.yml
+++ b/.gitlab/e2e_install_packages/common.yml
@@ -33,7 +33,7 @@
- START_MAJOR_VERSION: [5, 6]
END_MAJOR_VERSION: [6]
script:
- - export DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $INSTALL_SCRIPT_API_KEY_SSM_NAME)
+ - export DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $INSTALL_SCRIPT_API_KEY)
- inv -e new-e2e-tests.run --targets $TARGETS --junit-tar "junit-${CI_JOB_ID}.tgz" ${EXTRA_PARAMS} --src-agent-version $START_MAJOR_VERSION --dest-agent-version $END_MAJOR_VERSION
.new-e2e_script_upgrade7:
@@ -47,7 +47,7 @@
- START_MAJOR_VERSION: [5, 6, 7]
END_MAJOR_VERSION: [7]
script:
- - export DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $INSTALL_SCRIPT_API_KEY_SSM_NAME )
+ - export DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $INSTALL_SCRIPT_API_KEY )
- inv -e new-e2e-tests.run --targets $TARGETS --junit-tar "junit-${CI_JOB_ID}.tgz" ${EXTRA_PARAMS} --src-agent-version $START_MAJOR_VERSION --dest-agent-version $END_MAJOR_VERSION
.new-e2e_rpm:
@@ -57,5 +57,5 @@
TEAM: agent-delivery
EXTRA_PARAMS: --osversion $E2E_OSVERS --platform $E2E_PLATFORM --arch $E2E_ARCH
script:
- - export DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $INSTALL_SCRIPT_API_KEY_SSM_NAME)
+ - export DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $INSTALL_SCRIPT_API_KEY)
- inv -e new-e2e-tests.run --targets $TARGETS --junit-tar "junit-${CI_JOB_ID}.tgz" ${EXTRA_PARAMS}
diff --git a/.gitlab/e2e_install_packages/windows.yml b/.gitlab/e2e_install_packages/windows.yml
index ccf453543998c..70603614abca2 100644
--- a/.gitlab/e2e_install_packages/windows.yml
+++ b/.gitlab/e2e_install_packages/windows.yml
@@ -39,6 +39,7 @@
- E2E_MSI_TEST: TestInstall
- E2E_MSI_TEST: TestRepair
- E2E_MSI_TEST: TestUpgrade
+ - E2E_MSI_TEST: TestUpgradeRollback
- E2E_MSI_TEST: TestUpgradeRollbackWithoutCWS
- E2E_MSI_TEST: TestUpgradeChangeUser
- E2E_MSI_TEST: TestUpgradeFromV5
@@ -61,6 +62,7 @@
- E2E_MSI_TEST: TestInstall
- E2E_MSI_TEST: TestRepair
- E2E_MSI_TEST: TestUpgrade
+ - E2E_MSI_TEST: TestUpgradeRollback
- E2E_MSI_TEST: TestUpgradeRollbackWithoutCWS
- E2E_MSI_TEST: TestUpgradeChangeUser
- E2E_MSI_TEST: TestUpgradeFromV5
diff --git a/.gitlab/e2e_k8s/e2e_k8s.yml b/.gitlab/e2e_k8s/e2e_k8s.yml
index 2c3faca866165..606c4e6b1bc9f 100644
--- a/.gitlab/e2e_k8s/e2e_k8s.yml
+++ b/.gitlab/e2e_k8s/e2e_k8s.yml
@@ -11,16 +11,16 @@
variables:
LANG: C.UTF-8
before_script:
- - export DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_LOGIN_SSM_KEY)
- - export DOCKER_REGISTRY_PWD=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_PWD_SSM_KEY)
- - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_DDDEV_SSM_NAME)
+ - export DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_LOGIN)
+ - export DOCKER_REGISTRY_PWD=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_PWD)
+ - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_DDDEV)
.k8s-e2e-cws-cspm-init:
- set +x
- export DATADOG_AGENT_SITE=datadoghq.com
- - export DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $E2E_TESTS_API_KEY_SSM_NAME)
- - export DATADOG_AGENT_APP_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $E2E_TESTS_APP_KEY_SSM_NAME)
- - export DATADOG_AGENT_RC_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $E2E_TESTS_RC_KEY_SSM_NAME)
+ - export DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_API_KEY)
+ - export DATADOG_AGENT_APP_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_APP_KEY)
+ - export DATADOG_AGENT_RC_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_RC_KEY)
.k8s_e2e_template_needs_dev:
extends: .k8s_e2e_template
@@ -68,15 +68,3 @@ k8s-e2e-cspm-main:
retry: 1
variables:
ARGO_WORKFLOW: cspm
-
-k8s-e2e-otlp-dev:
- extends: .k8s_e2e_template_dev
- rules: !reference [.on_dev_branch_manual]
- variables:
- ARGO_WORKFLOW: otlp
-
-k8s-e2e-otlp-main:
- extends: .k8s_e2e_template_main
- rules: !reference [.on_main]
- variables:
- ARGO_WORKFLOW: otlp
diff --git a/.gitlab/functional_test/regression_detector.yml b/.gitlab/functional_test/regression_detector.yml
index 3819ff4626d4d..51ef2cc71dcb8 100644
--- a/.gitlab/functional_test/regression_detector.yml
+++ b/.gitlab/functional_test/regression_detector.yml
@@ -42,12 +42,12 @@ single-machine-performance-regression_detector:
- echo "Merge base is ${SMP_MERGE_BASE}"
# Setup AWS credentials for single-machine-performance AWS account
- AWS_NAMED_PROFILE="single-machine-performance"
- - SMP_ACCOUNT_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SMP_ACCOUNT_ID_SSM_NAME)
+ - SMP_ACCOUNT_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SMP_ACCOUNT_ID)
- SMP_ECR_URL=${SMP_ACCOUNT_ID}.dkr.ecr.us-west-2.amazonaws.com
- - SMP_AGENT_TEAM_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SMP_AGENT_TEAM_ID_SSM_NAME)
- - SMP_API=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SMP_API_SSM_NAME)
- - aws configure set aws_access_key_id $($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SMP_BOT_ACCESS_KEY_ID_SSM_NAME) --profile ${AWS_NAMED_PROFILE}
- - aws configure set aws_secret_access_key $($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SMP_BOT_ACCESS_KEY_SSM_NAME) --profile ${AWS_NAMED_PROFILE}
+ - SMP_AGENT_TEAM_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SMP_AGENT_TEAM_ID)
+ - SMP_API=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SMP_API)
+ - aws configure set aws_access_key_id $($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SMP_BOT_ACCESS_KEY_ID) --profile ${AWS_NAMED_PROFILE}
+ - aws configure set aws_secret_access_key $($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SMP_BOT_ACCESS_KEY) --profile ${AWS_NAMED_PROFILE}
- aws configure set region us-west-2 --profile ${AWS_NAMED_PROFILE}
# Download smp binary and prepare it for use
- aws --profile single-machine-performance s3 cp s3://smp-cli-releases/v${SMP_VERSION}/x86_64-unknown-linux-gnu/smp smp
diff --git a/.gitlab/install_script_testing/install_script_testing.yml b/.gitlab/install_script_testing/install_script_testing.yml
index 45f93afe64309..1c24c0ebe9401 100644
--- a/.gitlab/install_script_testing/install_script_testing.yml
+++ b/.gitlab/install_script_testing/install_script_testing.yml
@@ -4,9 +4,8 @@ test_install_script:
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
tags: ["arch:amd64"]
script:
- - source /root/.bashrc
- set +x
- - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME)
+ - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN)
- export TESTING_APT_URL=$DEB_TESTING_S3_BUCKET
- export TESTING_YUM_URL=$RPM_TESTING_S3_BUCKET
- export TEST_PIPELINE_ID=$CI_PIPELINE_ID
diff --git a/.gitlab/integration_test/dogstatsd.yml b/.gitlab/integration_test/dogstatsd.yml
index 5e5484df1024f..ab1862d716dae 100644
--- a/.gitlab/integration_test/dogstatsd.yml
+++ b/.gitlab/integration_test/dogstatsd.yml
@@ -4,14 +4,10 @@
dogstatsd_x64_size_test:
stage: integration_test
- rules:
- - !reference [.except_mergequeue]
- - when: on_success
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
tags: ["arch:amd64"]
needs: ["build_dogstatsd_static-binary_x64"]
before_script:
- - source /root/.bashrc
- mkdir -p $STATIC_BINARIES_DIR
- $S3_CP_CMD $S3_ARTIFACTS_URI/static/dogstatsd.amd64 $STATIC_BINARIES_DIR/dogstatsd
script:
diff --git a/.gitlab/integration_test/windows.yml b/.gitlab/integration_test/windows.yml
index a68f86ab8a16a..127454f00688f 100644
--- a/.gitlab/integration_test/windows.yml
+++ b/.gitlab/integration_test/windows.yml
@@ -7,7 +7,11 @@
needs: ["go_deps", "go_tools_deps"]
tags: ["runner:windows-docker", "windowsversion:1809"]
before_script:
- - $vcpkgBlobSaSUrl=$(& "$CI_PROJECT_DIR\tools\ci\aws_ssm_get_wrapper.ps1" $Env:VCPKG_BLOB_SAS_URL_SSM_NAME)
+ - $tmpfile = [System.IO.Path]::GetTempFileName()
+ - (& "$CI_PROJECT_DIR\tools\ci\fetch_secret.ps1" "$Env:VCPKG_BLOB_SAS_URL" "$tmpfile")
+ - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" }
+ - $vcpkgBlobSaSUrl=$(cat "$tmpfile")
+ - Remove-Item "$tmpfile"
script:
- $ErrorActionPreference = "Stop"
- '$_instance_id = (iwr -UseBasicParsing http://169.254.169.254/latest/meta-data/instance-id).content ; Write-Host "Running on instance $($_instance_id)"'
diff --git a/.gitlab/internal_image_deploy/internal_image_deploy.yml b/.gitlab/internal_image_deploy/internal_image_deploy.yml
index f4cb34de1588a..a39917217f5ae 100644
--- a/.gitlab/internal_image_deploy/internal_image_deploy.yml
+++ b/.gitlab/internal_image_deploy/internal_image_deploy.yml
@@ -22,8 +22,7 @@ docker_trigger_internal:
TMPL_SRC_REPO: ci/datadog-agent/agent
RELEASE_STAGING: "true"
script:
- - source /root/.bashrc
- - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME)
+ - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN)
- if [ "$BUCKET_BRANCH" = "beta" ] || [ "$BUCKET_BRANCH" = "stable" ]; then TMPL_SRC_REPO="${TMPL_SRC_REPO}-release"; fi
- |
if [ "$BUCKET_BRANCH" = "nightly" ]; then
@@ -69,7 +68,7 @@ docker_trigger_internal-ot:
RELEASE_STAGING: "true"
script:
- source /root/.bashrc
- - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME)
+ - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN)
- if [ "$BUCKET_BRANCH" = "beta" ] || [ "$BUCKET_BRANCH" = "stable" ]; then TMPL_SRC_REPO="${TMPL_SRC_REPO}-release"; fi
- |
if [ "$BUCKET_BRANCH" = "nightly" ]; then
@@ -115,8 +114,7 @@ docker_trigger_cluster_agent_internal:
RELEASE_STAGING: "true"
RELEASE_PROD: "true"
script:
- - source /root/.bashrc
- - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME)
+ - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN)
- if [ "$BUCKET_BRANCH" = "beta" ] || [ "$BUCKET_BRANCH" = "stable" ]; then TMPL_SRC_REPO="${TMPL_SRC_REPO}-release"; fi
- |
if [ "$BUCKET_BRANCH" = "nightly" ]; then
@@ -162,8 +160,7 @@ docker_trigger_cws_instrumentation_internal:
RELEASE_STAGING: "true"
RELEASE_PROD: "true"
script:
- - source /root/.bashrc
- - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME)
+ - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN)
- if [ "$BUCKET_BRANCH" = "beta" ] || [ "$BUCKET_BRANCH" = "stable" ]; then TMPL_SRC_REPO="${TMPL_SRC_REPO}-release"; fi
- |
if [ "$BUCKET_BRANCH" = "nightly" ]; then
diff --git a/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml b/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml
index 0ecef941fe652..3c9f414fae51c 100644
--- a/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml
+++ b/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml
@@ -36,8 +36,7 @@ internal_kubernetes_deploy_experimental:
EXPLICIT_WORKFLOWS: "//workflows:beta_builds.agents_nightly.staging-deploy.publish,//workflows:beta_builds.agents_nightly.staging-validate.publish,//workflows:beta_builds.agents_nightly.prod-wait-business-hours.publish,//workflows:beta_builds.agents_nightly.prod-deploy.publish,//workflows:beta_builds.agents_nightly.prod-validate.publish,//workflows:beta_builds.agents_nightly.publish-image-confirmation.publish"
BUNDLE_VERSION_OVERRIDE: "v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}"
script:
- - source /root/.bashrc
- - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME)
+ - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN)
- "inv pipeline.trigger-child-pipeline --project-name DataDog/k8s-datadog-agent-ops --git-ref main
--variable OPTION_AUTOMATIC_ROLLOUT
--variable EXPLICIT_WORKFLOWS
diff --git a/.gitlab/internal_kubernetes_deploy/rc_kubernetes_deploy.yml b/.gitlab/internal_kubernetes_deploy/rc_kubernetes_deploy.yml
index d3069ea0b320f..179e1b64cbcd5 100644
--- a/.gitlab/internal_kubernetes_deploy/rc_kubernetes_deploy.yml
+++ b/.gitlab/internal_kubernetes_deploy/rc_kubernetes_deploy.yml
@@ -22,8 +22,7 @@ rc_kubernetes_deploy:
EXPLICIT_WORKFLOWS: "//workflows:deploy_rc.agents_rc"
AGENT_IMAGE_TAG: $CI_COMMIT_REF_NAME
script:
- - source /root/.bashrc
- - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME)
+ - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN)
- "inv pipeline.trigger-child-pipeline --project-name DataDog/k8s-datadog-agent-ops --git-ref main
--variable OPTION_AUTOMATIC_ROLLOUT
--variable EXPLICIT_WORKFLOWS
diff --git a/.gitlab/kernel_matrix_testing/common.yml b/.gitlab/kernel_matrix_testing/common.yml
index a75a9b9b8b04e..1dce667e038ab 100644
--- a/.gitlab/kernel_matrix_testing/common.yml
+++ b/.gitlab/kernel_matrix_testing/common.yml
@@ -29,7 +29,7 @@
.write_ssh_key_file:
- touch $AWS_EC2_SSH_KEY_FILE && chmod 600 $AWS_EC2_SSH_KEY_FILE
- - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SSH_KEY_SSM_NAME > $AWS_EC2_SSH_KEY_FILE
+ - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SSH_KEY > $AWS_EC2_SSH_KEY_FILE
# Without the newline ssh silently fails and moves on to try other auth methods
- echo "" >> $AWS_EC2_SSH_KEY_FILE
- chmod 600 $AWS_EC2_SSH_KEY_FILE
@@ -47,7 +47,7 @@
.kmt_new_profile:
- mkdir -p ~/.aws
- - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $AGENT_QA_PROFILE_SSM_NAME >> ~/.aws/config
+ - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_PROFILE >> ~/.aws/config
- export AWS_PROFILE=agent-qa-ci
.define_if_collect_complexity:
@@ -60,7 +60,7 @@
- echo "COLLECT_COMPLEXITY=${COLLECT_COMPLEXITY}"
.collect_outcomes_kmt:
- - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME)
+ - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2)
- export MICRO_VM_IP=$(jq --exit-status --arg TAG $TAG --arg ARCH $ARCH --arg TEST_SET $TEST_SET -r '.[$ARCH].microvms | map(select(."vmset-tags"| index($TEST_SET))) | map(select(.tag==$TAG)) | .[].ip' $CI_PROJECT_DIR/stack.output)
# Collect setup-ddvm systemd service logs
- mkdir -p $CI_PROJECT_DIR/logs
@@ -114,7 +114,7 @@
scp $DD_AGENT_TESTING_DIR/kmt-dockers-$ARCH.tar.gz metal_instance:/opt/kernel-version-testing
fi
after_script:
- - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME)
+ - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2)
- !reference [.tag_kmt_ci_job]
variables:
AWS_EC2_SSH_KEY_FILE: $CI_PROJECT_DIR/ssh_key
@@ -143,7 +143,7 @@
KUBERNETES_MEMORY_LIMIT: "16Gi"
VMCONFIG_FILE: "${CI_PROJECT_DIR}/vmconfig-${CI_PIPELINE_ID}-${ARCH}.json"
before_script:
- - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME)
+ - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2)
- !reference [.retrieve_linux_go_deps]
- !reference [.kmt_new_profile]
- !reference [.write_ssh_key_file]
@@ -157,7 +157,7 @@
- jq "." $CI_PROJECT_DIR/stack.output
- pulumi logout
after_script:
- - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME)
+ - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2)
- export AWS_PROFILE=agent-qa-ci
- !reference [.shared_filters_and_queries]
- mkdir -p $CI_PROJECT_DIR/libvirt/log/$ARCH $CI_PROJECT_DIR/libvirt/xml $CI_PROJECT_DIR/libvirt/qemu $CI_PROJECT_DIR/libvirt/dnsmasq
@@ -182,7 +182,7 @@
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/test-infra-definitions/runner$TEST_INFRA_DEFINITIONS_BUILDIMAGES_SUFFIX:$TEST_INFRA_DEFINITIONS_BUILDIMAGES
tags: ["arch:amd64"]
before_script:
- - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME)
+ - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN)
- !reference [.kmt_new_profile]
script:
- !reference [.shared_filters_and_queries]
@@ -199,7 +199,7 @@
aws ec2 terminate-instances --instance-ids "${INSTANCE_ID}"
fi
after_script:
- - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME)
+ - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2)
- !reference [.tag_kmt_ci_job]
# Manual cleanup jobs, these will be used to cleanup the instances after the tests
@@ -228,7 +228,7 @@
RETRY: 2
EXTERNAL_LINKS_PATH: external_links_$CI_JOB_ID.json
before_script:
- - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME)
+ - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2)
- !reference [.kmt_new_profile]
- !reference [.write_ssh_key_file]
- echo "CI_JOB_URL=${CI_JOB_URL}" >> $DD_AGENT_TESTING_DIR/job_env.txt
@@ -315,9 +315,9 @@ notify_ebpf_complexity_changes:
- python3 -m pip install tabulate # Required for printing the tables
- python3 -m pip install -r tasks/libs/requirements-github.txt
- |
- export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITHUB_PR_COMMENTER_APP_KEY_SSM_NAME | base64)
- export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITHUB_PR_COMMENTER_INTEGRATION_ID_SSM_NAME)
- export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITHUB_PR_COMMENTER_INSTALLATION_ID_SSM_NAME)
- export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_FULL_API_TOKEN_SSM_NAME)
+ export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITHUB_PR_COMMENTER_APP_KEY | base64)
+ export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITHUB_PR_COMMENTER_INTEGRATION_ID)
+ export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITHUB_PR_COMMENTER_INSTALLATION_ID)
+ export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_FULL_API_TOKEN)
script:
- inv -e ebpf.generate-complexity-summary-for-pr
diff --git a/.gitlab/kernel_matrix_testing/security_agent.yml b/.gitlab/kernel_matrix_testing/security_agent.yml
index 8df130eb83782..e70c923fcf24d 100644
--- a/.gitlab/kernel_matrix_testing/security_agent.yml
+++ b/.gitlab/kernel_matrix_testing/security_agent.yml
@@ -72,7 +72,7 @@ kmt_setup_env_secagent_x64:
# upload connector to metal instance
- scp $CI_PROJECT_DIR/connector-${ARCH} metal_instance:/home/ubuntu/connector
after_script:
- - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME)
+ - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2)
- !reference [.tag_kmt_ci_job]
variables:
AWS_EC2_SSH_KEY_FILE: $CI_PROJECT_DIR/ssh_key
diff --git a/.gitlab/kernel_matrix_testing/system_probe.yml b/.gitlab/kernel_matrix_testing/system_probe.yml
index f871f2aca10a1..f01de83cc7116 100644
--- a/.gitlab/kernel_matrix_testing/system_probe.yml
+++ b/.gitlab/kernel_matrix_testing/system_probe.yml
@@ -28,13 +28,13 @@ upload_dependencies_sysprobe_arm64:
stage: kernel_matrix_testing_prepare
script:
# DockerHub login for build to limit rate limit when pulling base images
- - DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_LOGIN_SSM_KEY)
- - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_PWD_SSM_KEY | crane auth login --username "$DOCKER_REGISTRY_LOGIN" --password-stdin "$DOCKER_REGISTRY_URL"
+ - DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_LOGIN)
+ - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_PWD | crane auth login --username "$DOCKER_REGISTRY_LOGIN" --password-stdin "$DOCKER_REGISTRY_URL"
# Pull base images
- mkdir $KMT_DOCKERS
- inv -e system-probe.save-test-dockers --use-crane --output-dir $KMT_DOCKERS --arch $ARCH
after_script:
- - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME)
+ - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2)
- !reference [.tag_kmt_ci_job]
artifacts:
expire_in: 1 day
@@ -81,7 +81,7 @@ pull_test_dockers_arm64:
- !reference [.setup_ssh_config]
- scp $CI_PROJECT_DIR/kmt-deps/ci/$ARCH/$ARCHIVE_NAME metal_instance:/opt/kernel-version-testing/
after_script:
- - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME)
+ - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2)
- !reference [.tag_kmt_ci_job]
variables:
DEPENDENCIES: $CI_PROJECT_DIR/kmt-deps/ci/$ARCH/btfs
@@ -160,7 +160,7 @@ kmt_setup_env_sysprobe_x64:
# upload connector to metal instance
- scp $CI_PROJECT_DIR/connector-${ARCH} metal_instance:/home/ubuntu/connector
after_script:
- - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME)
+ - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2)
- !reference [.tag_kmt_ci_job]
variables:
AWS_EC2_SSH_KEY_FILE: $CI_PROJECT_DIR/ssh_key
diff --git a/.gitlab/kitchen_deploy/kitchen_deploy.yml b/.gitlab/kitchen_deploy/kitchen_deploy.yml
index 4184c08c53f8b..1fd79c1796e7d 100644
--- a/.gitlab/kitchen_deploy/kitchen_deploy.yml
+++ b/.gitlab/kitchen_deploy/kitchen_deploy.yml
@@ -3,13 +3,13 @@
# Contains jobs which deploy Agent package to testing repsoitories that are used in kitchen tests.
.setup_rpm_signing_key: &setup_rpm_signing_key
- - RPM_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_GPG_KEY_SSM_NAME)
+ - RPM_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $RPM_GPG_KEY)
- printf -- "$RPM_GPG_KEY" | gpg --import --batch
- - export RPM_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_SIGNING_PASSPHRASE_SSM_NAME)
+ - export RPM_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $RPM_SIGNING_PASSPHRASE)
.setup_apt_signing_key: &setup_apt_signing_key
- - APT_SIGNING_PRIVATE_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DEB_GPG_KEY_SSM_NAME)
- - APT_SIGNING_KEY_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DEB_SIGNING_PASSPHRASE_SSM_NAME)
+ - APT_SIGNING_PRIVATE_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DEB_GPG_KEY)
+ - APT_SIGNING_KEY_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DEB_SIGNING_PASSPHRASE)
- printf -- "$APT_SIGNING_PRIVATE_KEY" | gpg --import --batch
@@ -40,7 +40,6 @@
variables:
DD_PIPELINE_ID: $CI_PIPELINE_ID-a6
before_script:
- - source /root/.bashrc
- ls $OMNIBUS_PACKAGE_DIR
deploy_deb_testing-a6_x64:
@@ -85,7 +84,6 @@ deploy_deb_testing-a6_arm64:
variables:
DD_PIPELINE_ID: $CI_PIPELINE_ID-a7
before_script:
- - source /root/.bashrc
- ls $OMNIBUS_PACKAGE_DIR
deploy_deb_testing-a7_x64:
@@ -139,7 +137,6 @@ deploy_deb_testing-a7_arm64:
variables:
DD_PIPELINE_ID: $CI_PIPELINE_ID-a6
before_script:
- - source /root/.bashrc
- ls $OMNIBUS_PACKAGE_DIR
deploy_rpm_testing-a6_x64:
@@ -177,7 +174,6 @@ deploy_rpm_testing-a6_arm64:
variables:
DD_PIPELINE_ID: $CI_PIPELINE_ID-a7
before_script:
- - source /root/.bashrc
- ls $OMNIBUS_PACKAGE_DIR
deploy_rpm_testing-a7_x64:
@@ -226,7 +222,6 @@ deploy_suse_rpm_testing_x64-a6:
variables:
DD_PIPELINE_ID: $CI_PIPELINE_ID-a6
before_script:
- - source /root/.bashrc
- ls $OMNIBUS_PACKAGE_DIR_SUSE
script:
- *setup_rpm_signing_key
@@ -252,7 +247,6 @@ deploy_suse_rpm_testing_x64-a7:
variables:
DD_PIPELINE_ID: $CI_PIPELINE_ID-a7
before_script:
- - source /root/.bashrc
- ls $OMNIBUS_PACKAGE_DIR_SUSE
script:
- *setup_rpm_signing_key
@@ -271,7 +265,6 @@ deploy_suse_rpm_testing_arm64-a7:
variables:
DD_PIPELINE_ID: $CI_PIPELINE_ID-a7
before_script:
- - source /root/.bashrc
- ls $OMNIBUS_PACKAGE_DIR_SUSE
script:
- *setup_rpm_signing_key
@@ -288,7 +281,6 @@ deploy_windows_testing-a6:
tags: ["arch:amd64"]
needs: ["lint_windows-x64", "windows_msi_x64-a6"]
before_script:
- - source /root/.bashrc
- ls $OMNIBUS_PACKAGE_DIR
script:
- $S3_CP_CMD --recursive --exclude "*" --include "datadog-agent-6.*.msi" $OMNIBUS_PACKAGE_DIR s3://$WIN_S3_BUCKET/$WINDOWS_TESTING_S3_BUCKET_A6 --grants read=uri=http://acs.amazonaws.com/groups/global/AllUsers full=id=3a6e02b08553fd157ae3fb918945dd1eaae5a1aa818940381ef07a430cf25732
@@ -304,7 +296,6 @@ deploy_windows_testing-a7:
needs:
["lint_windows-x64", "windows_msi_and_bosh_zip_x64-a7", "windows-installer-amd64"]
before_script:
- - source /root/.bashrc
- ls $OMNIBUS_PACKAGE_DIR
script:
- $S3_CP_CMD
diff --git a/.gitlab/maintenance_jobs/docker.yml b/.gitlab/maintenance_jobs/docker.yml
index 43f8bded7ae2f..67a169f4dce8b 100644
--- a/.gitlab/maintenance_jobs/docker.yml
+++ b/.gitlab/maintenance_jobs/docker.yml
@@ -60,8 +60,8 @@ delete_docker_tag:
TAG: "" # tag name, for example "6.9.0"
ORGANIZATION: "datadog"
before_script:
- - DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_LOGIN_SSM_KEY)
- - PASS=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_PWD_SSM_KEY)
+ - DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_LOGIN)
+ - PASS=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_PWD)
- python3 -m pip install -r requirements.txt
- |
export DOCKER_TOKEN=`curl -s -H "Content-Type: application/json" -X POST -d '{"username": "'$DOCKER_REGISTRY_LOGIN'", "password": "'$PASS'"}' https://hub.docker.com/v2/users/login/ | python -c 'import sys, json; print(json.load(sys.stdin)["token"].strip())'`
diff --git a/.gitlab/maintenance_jobs/kitchen.yml b/.gitlab/maintenance_jobs/kitchen.yml
index b37355076f395..56cd45ef1fa9c 100644
--- a/.gitlab/maintenance_jobs/kitchen.yml
+++ b/.gitlab/maintenance_jobs/kitchen.yml
@@ -26,10 +26,10 @@ periodic_kitchen_cleanup_azure:
# the job to be run one at a time.
resource_group: azure_cleanup
script:
- - export ARM_SUBSCRIPTION_ID=`$CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_SUBSCRIPTION_ID_SSM_NAME`
- - export ARM_CLIENT_ID=`$CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_ID_SSM_NAME`
- - export ARM_CLIENT_SECRET=`$CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_SECRET_SSM_NAME`
- - export ARM_TENANT_ID=`$CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_TENANT_ID_SSM_NAME`
+ - export ARM_SUBSCRIPTION_ID=`$CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_SUBSCRIPTION_ID`
+ - export ARM_CLIENT_ID=`$CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_CLIENT_ID`
+ - export ARM_CLIENT_SECRET=`$CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_CLIENT_SECRET`
+ - export ARM_TENANT_ID=`$CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_TENANT_ID`
# Remove kitchen resources for all existing test suite prefixes
- RESOURCE_GROUP_PREFIX=kitchen-chef python3 /deploy_scripts/cleanup_azure.py
- RESOURCE_GROUP_PREFIX=kitchen-win python3 /deploy_scripts/cleanup_azure.py
diff --git a/.gitlab/notify/notify.yml b/.gitlab/notify/notify.yml
index 7c6e8aa580159..1f6f08ba9c6a3 100644
--- a/.gitlab/notify/notify.yml
+++ b/.gitlab/notify/notify.yml
@@ -25,8 +25,8 @@ notify:
resource_group: notification
timeout: 15 minutes # Added to prevent a stuck job blocking the resource_group defined above
script:
- - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_READ_API_TOKEN_SSM_NAME)
- - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME)
+ - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_READ_API_TOKEN)
+ - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2)
- python3 -m pip install -r requirements.txt -r tasks/libs/requirements-notifications.txt
- |
# Do not send notifications if this is a child pipeline of another repo
@@ -53,9 +53,8 @@ send_pipeline_stats:
when: always
dependencies: []
script:
- - source /root/.bashrc
- - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_READ_API_TOKEN_SSM_NAME)
- - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME)
+ - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_READ_API_TOKEN)
+ - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2)
- invoke -e notify.send-stats
notify_github:
@@ -79,7 +78,6 @@ notify_github:
dependencies: []
allow_failure: true
script:
- - source /root/.bashrc
- !reference [.install_pr_commenter]
- messagefile="$(mktemp)"
- echo "Use this command from [test-infra-definitions](https://github.com/DataDog/test-infra-definitions) to manually test this PR changes on a VM:" >> "$messagefile"
@@ -112,10 +110,10 @@ notify_gitlab_ci_changes:
- source /root/.bashrc
- python3 -m pip install -r tasks/libs/requirements-github.txt
- |
- export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITHUB_PR_COMMENTER_APP_KEY_SSM_NAME | base64)
- export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITHUB_PR_COMMENTER_INTEGRATION_ID_SSM_NAME)
- export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITHUB_PR_COMMENTER_INSTALLATION_ID_SSM_NAME)
- export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_FULL_API_TOKEN_SSM_NAME)
+ export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITHUB_PR_COMMENTER_APP_KEY | base64)
+ export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITHUB_PR_COMMENTER_INTEGRATION_ID)
+ export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITHUB_PR_COMMENTER_INSTALLATION_ID)
+ export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_FULL_API_TOKEN)
- inv -e notify.gitlab-ci-diff --pr-comment
.failure_summary_job:
@@ -127,10 +125,9 @@ notify_gitlab_ci_changes:
timeout: 15 minutes # Added to prevent a stuck job blocking the resource_group defined above
.failure_summary_setup:
- - source /root/.bashrc
- - export SLACK_API_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SLACK_AGENT_CI_TOKEN_SSM_NAME)
- - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_READ_API_TOKEN_SSM_NAME)
- - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME)
+ - export SLACK_API_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SLACK_AGENT_CI_TOKEN)
+ - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_READ_API_TOKEN)
+ - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2)
- python3 -m pip install -r requirements.txt -r tasks/libs/requirements-notifications.txt
# Upload failure summary data to S3 at the end of each main pipeline
diff --git a/.gitlab/package_build/dmg.yml b/.gitlab/package_build/dmg.yml
index cf07e07415e7d..02d0b830cc910 100644
--- a/.gitlab/package_build/dmg.yml
+++ b/.gitlab/package_build/dmg.yml
@@ -34,5 +34,4 @@ agent_dmg-x64-a7:
PYTHON_RUNTIMES: "3"
timeout: 6h
before_script:
- - source /root/.bashrc
- export RELEASE_VERSION=$RELEASE_VERSION_7
diff --git a/.gitlab/package_build/heroku.yml b/.gitlab/package_build/heroku.yml
index 337bc54932f56..c036313447986 100644
--- a/.gitlab/package_build/heroku.yml
+++ b/.gitlab/package_build/heroku.yml
@@ -14,7 +14,6 @@
"generate_minimized_btfs_x64",
]
script:
- - source /root/.bashrc
- !reference [.retrieve_linux_go_deps]
- !reference [.cache_omnibus_ruby_deps, setup]
- echo "About to build for $RELEASE_VERSION"
diff --git a/.gitlab/package_build/installer.yml b/.gitlab/package_build/installer.yml
index ac335677c8bad..67b3ad894369d 100644
--- a/.gitlab/package_build/installer.yml
+++ b/.gitlab/package_build/installer.yml
@@ -55,7 +55,6 @@ datadog-agent-oci-x64-a7:
PACKAGE_ARCH: amd64
DESTINATION_OCI: "datadog-agent-7-remote-updater-amd64.tar.xz"
before_script:
- - source /root/.bashrc
- export RELEASE_VERSION=$RELEASE_VERSION_7
datadog-agent-oci-arm64-a7:
@@ -79,7 +78,6 @@ datadog-agent-oci-arm64-a7:
PACKAGE_ARCH: arm64
DESTINATION_OCI: "datadog-agent-7-remote-updater-arm64.tar.xz"
before_script:
- - source /root/.bashrc
- export RELEASE_VERSION=$RELEASE_VERSION_7
#
@@ -87,7 +85,6 @@ datadog-agent-oci-arm64-a7:
#
.installer_build_common:
script:
- - source /root/.bashrc
- !reference [.retrieve_linux_go_deps]
- !reference [.cache_omnibus_ruby_deps, setup]
- echo "About to build for $RELEASE_VERSION"
@@ -146,7 +143,6 @@ installer-amd64-oci:
variables:
DESTINATION_FILE: "datadog-updater_7-amd64-oci.tar.xz"
before_script:
- - source /root/.bashrc
- export INSTALL_DIR=/opt/datadog-packages/datadog-installer/$(inv agent.version -u)-1
- export INSTALL_DIR_PARAM="--install-directory=$INSTALL_DIR"
@@ -155,7 +151,6 @@ installer-arm64-oci:
variables:
DESTINATION_FILE: "datadog-updater_7-arm64-oci.tar.xz"
before_script:
- - source /root/.bashrc
- export INSTALL_DIR=/opt/datadog-packages/datadog-installer/$(inv agent.version -u)-1
- export INSTALL_DIR_PARAM="--install-directory=$INSTALL_DIR"
@@ -188,7 +183,7 @@ windows-installer-amd64:
-e SIGN_WINDOWS_DD_WCS=true
-e S3_OMNIBUS_CACHE_BUCKET="$S3_OMNIBUS_CACHE_BUCKET"
-e USE_S3_CACHING="$USE_S3_CACHING"
- -e API_KEY_ORG2_SSM_NAME=${API_KEY_ORG2_SSM_NAME}
+ -e API_KEY_ORG2=${API_KEY_ORG2}
486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/windows_1809_${ARCH}${Env:DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${Env:DATADOG_AGENT_WINBUILDIMAGES}
c:\mnt\tasks\winbuildscripts\buildinstaller.bat
after_script:
diff --git a/.gitlab/package_build/linux.yml b/.gitlab/package_build/linux.yml
index db66682e4d356..04aafb966b0e7 100644
--- a/.gitlab/package_build/linux.yml
+++ b/.gitlab/package_build/linux.yml
@@ -4,7 +4,6 @@
- when: on_success
stage: package_build
script:
- - source /root/.bashrc
- echo "About to build for $RELEASE_VERSION"
- !reference [.retrieve_linux_go_deps]
- !reference [.cache_omnibus_ruby_deps, setup]
@@ -110,7 +109,6 @@ datadog-ot-agent-7-arm64:
extends: .agent_build_common
needs: ["go_mod_tidy_check", "go_deps"]
script:
- - source /root/.bashrc
- echo "About to build for $RELEASE_VERSION"
- !reference [.retrieve_linux_go_deps]
- !reference [.cache_omnibus_ruby_deps, setup]
@@ -149,7 +147,6 @@ iot-agent-armhf:
- when: on_success
stage: package_build
script:
- - source /root/.bashrc
- echo "About to build for $RELEASE_VERSION"
- !reference [.retrieve_linux_go_deps]
- !reference [.cache_omnibus_ruby_deps, setup]
diff --git a/.gitlab/package_build/windows.yml b/.gitlab/package_build/windows.yml
index 2d45a7b0ceecd..b54b3c33eb677 100644
--- a/.gitlab/package_build/windows.yml
+++ b/.gitlab/package_build/windows.yml
@@ -36,7 +36,7 @@
-e GO_VERSION_CHECK="true"
-e BUNDLE_MIRROR__RUBYGEMS__ORG=${BUNDLE_MIRROR__RUBYGEMS__ORG}
-e PIP_INDEX_URL=${PIP_INDEX_URL}
- -e API_KEY_ORG2_SSM_NAME=${API_KEY_ORG2_SSM_NAME}
+ -e API_KEY_ORG2=${API_KEY_ORG2}
486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/windows_1809_${ARCH}${Env:DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${Env:DATADOG_AGENT_WINBUILDIMAGES}
c:\mnt\tasks\winbuildscripts\buildwin.bat
- If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" }
@@ -123,7 +123,7 @@ windows_zip_agent_binaries_x64-a7:
-e USE_S3_CACHING="$USE_S3_CACHING"
-e BUNDLE_MIRROR__RUBYGEMS__ORG=${BUNDLE_MIRROR__RUBYGEMS__ORG}
-e PIP_INDEX_URL=${PIP_INDEX_URL}
- -e API_KEY_ORG2_SSM_NAME=${API_KEY_ORG2_SSM_NAME}
+ -e API_KEY_ORG2=${API_KEY_ORG2}
486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/windows_1809_${ARCH}${Env:DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${Env:DATADOG_AGENT_WINBUILDIMAGES}
c:\mnt\tasks\winbuildscripts\buildwin.bat
- If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" }
diff --git a/.gitlab/packaging/deb.yml b/.gitlab/packaging/deb.yml
index cd136706ff1e6..08a02809ed244 100644
--- a/.gitlab/packaging/deb.yml
+++ b/.gitlab/packaging/deb.yml
@@ -2,7 +2,6 @@
.package_deb_common:
stage: packaging
script:
- - source /root/.bashrc
- !reference [.cache_omnibus_ruby_deps, setup]
- echo "About to package for $RELEASE_VERSION"
- !reference [.setup_deb_signing_key]
@@ -88,7 +87,6 @@ agent_deb-arm64-a7:
.package_ot_deb_common:
extends: [.package_deb_common]
script:
- - source /root/.bashrc
- !reference [.cache_omnibus_ruby_deps, setup]
- echo "About to package for $RELEASE_VERSION"
- !reference [.setup_deb_signing_key]
@@ -143,7 +141,6 @@ installer_deb-arm64:
- when: on_success
stage: packaging
script:
- - source /root/.bashrc
- !reference [.cache_omnibus_ruby_deps, setup]
- echo "About to package for $RELEASE_VERSION"
- !reference [.setup_deb_signing_key]
diff --git a/.gitlab/packaging/oci.yml b/.gitlab/packaging/oci.yml
index 4598fa8050336..b2dcd8eaf740a 100644
--- a/.gitlab/packaging/oci.yml
+++ b/.gitlab/packaging/oci.yml
@@ -6,7 +6,6 @@
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
tags: ["arch:amd64"]
before_script:
- - source /root/.bashrc
- export PACKAGE_VERSION=$(inv agent.version --url-safe --major-version 7)-1
- export INSTALL_DIR=/opt/datadog-packages/${OCI_PRODUCT}/${PACKAGE_VERSION}
variables:
diff --git a/.gitlab/packaging/rpm.yml b/.gitlab/packaging/rpm.yml
index f337dc8124924..d03aa99f9212f 100644
--- a/.gitlab/packaging/rpm.yml
+++ b/.gitlab/packaging/rpm.yml
@@ -5,13 +5,12 @@
- !reference [.except_mergequeue]
- when: on_success
before_script:
- - source /root/.bashrc
script:
- echo "About to build for $RELEASE_VERSION"
- !reference [.cache_omnibus_ruby_deps, setup]
- - RPM_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_GPG_KEY_SSM_NAME)
+ - RPM_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $RPM_GPG_KEY)
- printf -- "$RPM_GPG_KEY" | gpg --import --batch
- - export RPM_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_SIGNING_PASSPHRASE_SSM_NAME)
+ - export RPM_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $RPM_SIGNING_PASSPHRASE)
- inv -e omnibus.build --release-version "$RELEASE_VERSION" --major-version "$AGENT_MAJOR_VERSION" --base-dir $OMNIBUS_BASE_DIR --skip-deps --target-project=${DD_PROJECT} ${OMNIBUS_EXTRA_ARGS}
- ls -la $OMNIBUS_PACKAGE_DIR/
- !reference [.lint_linux_packages]
@@ -136,12 +135,11 @@ installer_suse_rpm-arm64:
- !reference [.except_mergequeue]
- when: on_success
script:
- - source /root/.bashrc
- echo "About to build for $RELEASE_VERSION"
- !reference [.cache_omnibus_ruby_deps, setup]
- - RPM_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_GPG_KEY_SSM_NAME)
+ - RPM_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $RPM_GPG_KEY)
- printf -- "$RPM_GPG_KEY" | gpg --import --batch
- - export RPM_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_SIGNING_PASSPHRASE_SSM_NAME)
+ - export RPM_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $RPM_SIGNING_PASSPHRASE)
- inv -e omnibus.build --release-version "$RELEASE_VERSION" --base-dir $OMNIBUS_BASE_DIR --skip-deps --flavor=iot ${OMNIBUS_EXTRA_ARGS}
- ls -la $OMNIBUS_PACKAGE_DIR/
- !reference [.lint_linux_packages]
diff --git a/.gitlab/pkg_metrics/pkg_metrics.yml b/.gitlab/pkg_metrics/pkg_metrics.yml
index 8ff246b8c2667..94a48c2fe1004 100644
--- a/.gitlab/pkg_metrics/pkg_metrics.yml
+++ b/.gitlab/pkg_metrics/pkg_metrics.yml
@@ -56,10 +56,8 @@ send_pkg_size:
- job: iot_agent_suse-x64
optional: true
script:
- - source /root/.bashrc
-
# Get API key to send metrics
- - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME)
+ - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2)
# Allow failures: some packages are not always built, and therefore stats cannot be sent for them
- set +e
@@ -107,12 +105,9 @@ send_pkg_size:
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
tags: ["arch:amd64"]
script:
- - source /root/.bashrc
-
- ls -l $OMNIBUS_PACKAGE_DIR
- if [[ "${ARCH}" == "amd64" ]]; then ls -l $OMNIBUS_PACKAGE_DIR_SUSE; fi
- - source /root/.bashrc
- export failures=0
- export last_stable=$(inv release.get-release-json-value "last_stable::${MAJOR_VERSION}")
# Get stable packages from S3 buckets, send new package sizes & compare stable and new package sizes
diff --git a/.gitlab/post_rc_build/post_rc_tasks.yml b/.gitlab/post_rc_build/post_rc_tasks.yml
index 00efc95005fa5..8cfab2abbd124 100644
--- a/.gitlab/post_rc_build/post_rc_tasks.yml
+++ b/.gitlab/post_rc_build/post_rc_tasks.yml
@@ -11,8 +11,7 @@ update_rc_build_links:
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
tags: ["arch:amd64"]
script:
- - source /root/.bashrc
- - export ATLASSIAN_PASSWORD=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $JIRA_READ_API_TOKEN_SSM_NAME)
+ - export ATLASSIAN_PASSWORD=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $JIRA_READ_API_TOKEN)
- export ATLASSIAN_USERNAME=robot-jira-agentplatform@datadoghq.com
- python3 -m pip install -r tasks/requirements_release_tasks.txt
- PATCH=$(echo "$CI_COMMIT_REF_NAME" | cut -d'.' -f3 | cut -c1)
diff --git a/.gitlab/setup/setup.yml b/.gitlab/setup/setup.yml
index 3ef2bdd4dfeb0..7649437386917 100644
--- a/.gitlab/setup/setup.yml
+++ b/.gitlab/setup/setup.yml
@@ -4,7 +4,6 @@ setup_agent_version:
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
tags: ["arch:amd64"]
script:
- - source /root/.bashrc
- inv -e agent.version --cache-version
- $S3_CP_CMD $CI_PROJECT_DIR/agent-version.cache $S3_ARTIFACTS_URI/agent-version.cache
needs: []
@@ -17,18 +16,17 @@ github_rate_limit_info:
- !reference [.except_mergequeue]
- when: on_success
script:
- - source /root/.bashrc
- python3 -m pip install -r tasks/libs/requirements-github.txt datadog_api_client
# Send stats for app 1
- - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_KEY_SSM_NAME)
- - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_APP_ID_SSM_NAME)
- - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_INSTALLATION_ID_SSM_NAME)
- - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME)
+ - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_KEY)
+ - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_APP_ID)
+ - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_INSTALLATION_ID)
+ - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2)
- inv github.send-rate-limit-info-datadog --pipeline-id $CI_PIPELINE_ID --app-instance 1
# Send stats for app 2
- - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_KEY_2_SSM_NAME)
- - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_APP_ID_2_SSM_NAME)
- - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_INSTALLATION_ID_2_SSM_NAME)
- - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME)
+ - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_KEY_2)
+ - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_APP_ID_2)
+ - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_INSTALLATION_ID_2)
+ - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2)
- inv github.send-rate-limit-info-datadog --pipeline-id $CI_PIPELINE_ID --app-instance 2
allow_failure: true
diff --git a/.gitlab/source_test/ebpf.yml b/.gitlab/source_test/ebpf.yml
index 11dfc05354333..012406f1b85aa 100644
--- a/.gitlab/source_test/ebpf.yml
+++ b/.gitlab/source_test/ebpf.yml
@@ -16,7 +16,6 @@
before_script:
- !reference [.retrieve_linux_go_deps]
- !reference [.retrieve_linux_go_tools_deps]
- - source /root/.bashrc
script:
- inv -e install-tools
- inv -e system-probe.object-files
@@ -53,7 +52,6 @@ tests_ebpf_arm64:
paths:
- $CI_PROJECT_DIR/kmt-deps
before_script:
- - source /root/.bashrc
- !reference [.retrieve_linux_go_deps]
- !reference [.retrieve_linux_go_tools_deps]
- inv -e install-tools
@@ -87,7 +85,6 @@ prepare_sysprobe_ebpf_functional_tests_x64:
- $CI_PROJECT_DIR/kmt-deps
- $DD_AGENT_TESTING_DIR/site-cookbooks/dd-security-agent-check/files
before_script:
- - source /root/.bashrc
- !reference [.retrieve_linux_go_deps]
- !reference [.retrieve_linux_go_tools_deps]
- inv -e install-tools
diff --git a/.gitlab/source_test/go_generate_check.yml b/.gitlab/source_test/go_generate_check.yml
index ad4af5b2ee47c..51d3293a17370 100644
--- a/.gitlab/source_test/go_generate_check.yml
+++ b/.gitlab/source_test/go_generate_check.yml
@@ -10,7 +10,6 @@ security_go_generate_check:
before_script:
- !reference [.retrieve_linux_go_deps]
- !reference [.retrieve_linux_go_tools_deps]
- - source /root/.bashrc
- pip3 install wheel
- pip3 install -r docs/cloud-workload-security/scripts/requirements-docs.txt
- inv -e install-tools
diff --git a/.gitlab/source_test/golang_deps_diff.yml b/.gitlab/source_test/golang_deps_diff.yml
index 5a01ac2d74a13..491a99cd520f2 100644
--- a/.gitlab/source_test/golang_deps_diff.yml
+++ b/.gitlab/source_test/golang_deps_diff.yml
@@ -12,11 +12,10 @@ golang_deps_diff:
variables:
KUBERNETES_CPU_REQUEST: 4
before_script:
- - source /root/.bashrc
- !reference [.retrieve_linux_go_deps]
script:
# Get API key to send metrics
- - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME)
+ - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2)
- inv -e diff.go-deps --report-file=deps-report.md --report-metrics --git-ref "${CI_COMMIT_REF_NAME}"
artifacts:
paths:
@@ -32,6 +31,9 @@ golang_deps_commenter:
- !reference [.except_deploy]
- when: on_success
needs: ["golang_deps_diff"]
+ variables:
+ # Not using the entrypoint script for the pr-commenter image
+ FF_KUBERNETES_HONOR_ENTRYPOINT: false
script: # ignore error message about no PR, because it happens for dev branches without PRs
- echo "${CI_COMMIT_REF_NAME}"
- |
@@ -59,11 +61,10 @@ golang_deps_send_count_metrics:
- when: on_success
needs: ["go_deps"]
before_script:
- - source /root/.bashrc
- !reference [.retrieve_linux_go_deps]
script:
# Get API key to send metrics
- - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME)
+ - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2)
- inv -e go-deps.send-count-metrics --git-sha "${CI_COMMIT_SHA}" --git-ref "${CI_COMMIT_REF_NAME}"
golang_deps_test:
@@ -74,7 +75,6 @@ golang_deps_test:
- when: on_success
needs: ["go_deps"]
before_script:
- - source /root/.bashrc
- !reference [.retrieve_linux_go_deps]
script:
- inv -e go-deps.test-list
diff --git a/.gitlab/source_test/linux.yml b/.gitlab/source_test/linux.yml
index c4d57130833a5..4b49e23974b35 100644
--- a/.gitlab/source_test/linux.yml
+++ b/.gitlab/source_test/linux.yml
@@ -51,7 +51,7 @@
.upload_coverage:
# Upload coverage files to Codecov. Never fail on coverage upload.
- source /root/.bashrc
- - export CODECOV_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $CODECOV_TOKEN_SSM_NAME)
+ - export CODECOV_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $CODECOV_TOKEN)
- inv -e coverage.upload-to-codecov $COVERAGE_CACHE_FLAG || true
.linux_lint:
@@ -65,7 +65,6 @@
script:
- !reference [.retrieve_linux_go_deps]
- !reference [.retrieve_linux_go_tools_deps]
- - source /root/.bashrc && conda activate ddpy3
- inv -e rtloader.make --install-prefix=$CI_PROJECT_DIR/dev --python-runtimes "3"
- inv -e rtloader.install
- inv -e install-tools
@@ -252,7 +251,6 @@ go_mod_tidy_check:
extends: .linux_x64
needs: ["go_deps"]
before_script:
- - source /root/.bashrc
- !reference [.retrieve_linux_go_deps]
script:
- inv -e check-mod-tidy
@@ -269,7 +267,7 @@ new-e2e-unit-tests:
before_script:
# Setup AWS Credentials
- mkdir -p ~/.aws
- - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $AGENT_QA_PROFILE_SSM_NAME >> ~/.aws/config
+ - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_PROFILE >> ~/.aws/config
- export AWS_PROFILE=agent-qa-ci
# Use S3 backend
- pulumi login "s3://dd-pulumi-state?region=us-east-1&awssdk=v2&profile=$AWS_PROFILE"
@@ -282,4 +280,6 @@ new-e2e-unit-tests:
KUBERNETES_MEMORY_REQUEST: 12Gi
KUBERNETES_MEMORY_LIMIT: 16Gi
KUBERNETES_CPU_REQUEST: 6
+ # Not using the entrypoint script for the e2e runner image
+ FF_KUBERNETES_HONOR_ENTRYPOINT: false
timeout: 10m
diff --git a/.gitlab/source_test/macos.yml b/.gitlab/source_test/macos.yml
index 76adde53972bf..fe93fb8860e5b 100644
--- a/.gitlab/source_test/macos.yml
+++ b/.gitlab/source_test/macos.yml
@@ -11,7 +11,6 @@ tests_macos:
variables:
PYTHON_RUNTIMES: "3"
script:
- - source /root/.bashrc
- !reference [.setup_macos_github_app]
- $S3_CP_CMD $S3_ARTIFACTS_URI/agent-version.cache .
- export VERSION_CACHE_CONTENT=$(cat agent-version.cache | base64 -)
@@ -43,7 +42,6 @@ lint_macos:
PYTHON_RUNTIMES: "3"
timeout: 6h
script:
- - source /root/.bashrc
- !reference [.setup_macos_github_app]
- $S3_CP_CMD $S3_ARTIFACTS_URI/agent-version.cache .
- export VERSION_CACHE_CONTENT=$(cat agent-version.cache | base64 -)
diff --git a/.gitlab/source_test/notify.yml b/.gitlab/source_test/notify.yml
index aee23f82cfae7..097a12f564aff 100644
--- a/.gitlab/source_test/notify.yml
+++ b/.gitlab/source_test/notify.yml
@@ -7,7 +7,6 @@ unit_tests_notify:
- !reference [.except_disable_unit_tests]
- when: always
script:
- - source /root/.bashrc
- python3 -m pip install -r tasks/libs/requirements-github.txt
- !reference [.setup_agent_github_app]
- inv notify.unit-tests --pipeline-id $CI_PIPELINE_ID --pipeline-url $CI_PIPELINE_URL --branch-name $CI_COMMIT_REF_NAME
diff --git a/.gitlab/source_test/slack.yml b/.gitlab/source_test/slack.yml
index 920d88bed12ff..5d357a98d2446 100644
--- a/.gitlab/source_test/slack.yml
+++ b/.gitlab/source_test/slack.yml
@@ -9,6 +9,5 @@ slack_teams_channels_check:
- !reference [.except_mergequeue]
- when: on_success
script:
- - source /root/.bashrc
- python3 -m pip install codeowners -c tasks/libs/requirements-notifications.txt
- inv -e notify.check-teams
diff --git a/.gitlab/source_test/technical_linters.yml b/.gitlab/source_test/technical_linters.yml
index 5bf8f5fe25518..c7759eda331bc 100644
--- a/.gitlab/source_test/technical_linters.yml
+++ b/.gitlab/source_test/technical_linters.yml
@@ -4,7 +4,6 @@ lint_python:
tags: ["arch:amd64"]
needs: []
script:
- - source /root/.bashrc
- inv -e linter.python
lint_update_go:
@@ -13,7 +12,6 @@ lint_update_go:
tags: ["arch:amd64"]
needs: []
script:
- - source /root/.bashrc
- inv -e linter.update-go
validate_modules:
@@ -22,6 +20,5 @@ validate_modules:
tags: ["arch:amd64"]
needs: []
script:
- - source /root/.bashrc
- inv -e modules.validate
- inv -e modules.validate-used-by-otel
diff --git a/.gitlab/source_test/tooling_unit_tests.yml b/.gitlab/source_test/tooling_unit_tests.yml
index 25419b084d120..e7a7ab4e1c133 100644
--- a/.gitlab/source_test/tooling_unit_tests.yml
+++ b/.gitlab/source_test/tooling_unit_tests.yml
@@ -8,7 +8,6 @@ invoke_unit_tests:
rules:
- !reference [.on_invoke_tasks_changes]
script:
- - source /root/.bashrc
- python3 -m pip install -r tasks/libs/requirements-github.txt
- inv -e invoke-unit-tests.run
@@ -20,7 +19,6 @@ kitchen_invoke_unit_tests:
rules:
- !reference [.on_kitchen_invoke_tasks_changes]
script:
- - source /root/.bashrc
- python3 -m pip install -r tasks/libs/requirements-github.txt
- pushd test/kitchen
- inv -e kitchen.invoke-unit-tests
diff --git a/.gitlab/source_test/windows.yml b/.gitlab/source_test/windows.yml
index f6ff9f865360e..dfa1d0e2aaeb8 100644
--- a/.gitlab/source_test/windows.yml
+++ b/.gitlab/source_test/windows.yml
@@ -39,11 +39,11 @@
-e EXTRA_OPTS="${FAST_TESTS_FLAG}"
-e TEST_WASHER=true
-e GO_TEST_SKIP_FLAKE="${GO_TEST_SKIP_FLAKE}"
- -e API_KEY_ORG2_SSM_NAME="${API_KEY_ORG2_SSM_NAME}"
- -e CODECOV_TOKEN_SSM_NAME="${CODECOV_TOKEN_SSM_NAME}"
+ -e API_KEY_ORG2="${API_KEY_ORG2}"
+ -e CODECOV_TOKEN="${CODECOV_TOKEN}"
-e S3_PERMANENT_ARTIFACTS_URI="${S3_PERMANENT_ARTIFACTS_URI}"
-e COVERAGE_CACHE_FLAG="${COVERAGE_CACHE_FLAG}"
- -e GITLAB_TOKEN_SSM_NAME="${GITLAB_READ_API_TOKEN_SSM_NAME}"
+ -e GITLAB_TOKEN="${GITLAB_READ_API_TOKEN}"
486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/windows_1809_${ARCH}${Env:DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${Env:DATADOG_AGENT_WINBUILDIMAGES}
c:\mnt\tasks\winbuildscripts\unittests.bat
- If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" }
diff --git a/.gitlab/trigger_release/trigger_release.yml b/.gitlab/trigger_release/trigger_release.yml
index 11b204e7c7a9c..cf2a4b3591a84 100644
--- a/.gitlab/trigger_release/trigger_release.yml
+++ b/.gitlab/trigger_release/trigger_release.yml
@@ -18,9 +18,8 @@
script:
# agent-release-management creates pipeline for both Agent 6 and Agent 7
# when triggered with major version 7
- - source /root/.bashrc
- export RELEASE_VERSION=$(inv agent.version --major-version 7 --url-safe --omnibus-format)-1
- - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME)
+ - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN)
- 'inv pipeline.trigger-child-pipeline --project-name "DataDog/agent-release-management" --git-ref "main"
--variable ACTION
--variable AUTO_RELEASE
diff --git a/CHANGELOG-DCA.rst b/CHANGELOG-DCA.rst
index 7a45c65b38675..786b6903a4c0d 100644
--- a/CHANGELOG-DCA.rst
+++ b/CHANGELOG-DCA.rst
@@ -2,6 +2,58 @@
Release Notes
=============
+.. _Release Notes_7.57.0:
+
+7.57.0
+======
+
+.. _Release Notes_7.57.0_Prelude:
+
+Prelude
+-------
+
+Released on: 2024-09-09
+Pinned to datadog-agent v7.57.0: `CHANGELOG `_.
+
+.. _Release Notes_7.57.0_New Features:
+
+New Features
+------------
+
+- The Cluster Agent now supports activating Continuous Profiling
+ using Admission Controller.
+
+- ``LimitRange`` and ``StorageClass`` resources are now collected by the orchestrator check.
+
+
+.. _Release Notes_7.57.0_Enhancement Notes:
+
+Enhancement Notes
+-----------------
+
+- The auto-instrumentation webhook (beta) uses a new injector library.
+
+
+.. _Release Notes_7.57.0_Bug Fixes:
+
+Bug Fixes
+---------
+
+- Fixes a rare bug where some Kubernetes events would be emitted
+ without a timestamp and would be dropped upstream as a result.
+
+- Library package versions for auto-instrumentation are now set to the latest major
+ version of the library-package instead of `latest`.
+
+ * java:v1
+ * dotnet:v2
+ * python:v2
+ * ruby:v2
+ * js:v5
+
+- Fix APIServer error logs generated when external metrics endpoint is activated
+
+
.. _Release Notes_7.56.2:
7.56.2
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index 10ea12b5fa3ac..f0ccf802fe2cd 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -2,6 +2,209 @@
Release Notes
=============
+.. _Release Notes_7.57.0:
+
+7.57.0
+======
+
+.. _Release Notes_7.57.0_Prelude:
+
+Prelude
+-------
+
+Release on: 2024-09-09
+
+- Please refer to the `7.57.0 tag on integrations-core `_ for the list of changes on the Core Checks
+
+
+.. _Release Notes_7.57.0_Upgrade Notes:
+
+Upgrade Notes
+-------------
+
+- Update cURL to 8.7.1.
+
+- Update OpenSSL from 3.0.14 to 3.3.1 (on Linux and macOS).
+
+
+.. _Release Notes_7.57.0_New Features:
+
+New Features
+------------
+
+- The `agent diagnose` command now includes a ``--json`` option to output the results in JSON format.
+
+- Add `integration` value for device metadata.
+
+- APM: In order to allow for automatic instrumentation to work in Kubernetes
+ clusters that enforce a ``Restricted`` `Pod Security Standard `_,
+ which require all containers to explicitly set a ``securityContext``,
+ an option to configure a `securityContext` to be used for all ``initContainers``
+ created by the auto instrumentation has been added.
+ | This can be done through the ``DD_ADMISSION_CONTROLLER_AUTO_INSTRUMENTATION_INIT_SECURITY_CONTEXT``
+ environment value, or ``admission_controller.auto_instrumentation.init_security_context`` configuration -
+ in both cases a ``json`` string should be supplied.
+
+- Adds a `kube_runtime_class` tag to metrics associated with Kubernetes
+ pods and their containers.
+
+- Expose the Agent's get host tags function to python checks using the new `datadog_agent.get_host_tags` method.
+
+- Implement static allowlist of Kubernetes events to send by default.
+ This feature is only enabled when ``filtering_enabled`` is set to
+ ``true`` in the ``kubernetes_apiserver`` integration configuration.
+
+- Adds a new launcher to handle incoming logs from integtrations.
+
+- Add optional reverse DNS enrichment of private IP addresses to NDM NetFlow.
+
+- On Windows, the default value for the service inference feature is now enabled.
+
+
+.. _Release Notes_7.57.0_Enhancement Notes:
+
+Enhancement Notes
+-----------------
+
+- Turn on Orchestrator Explorer by default in the core agent
+
+- Added new source_host tag to TCP/UDP logs to help users understand where their logs came from.
+
+- Added support to handling UDP/TCP Logs when running the containerized agent.
+
+- APM: Allow custom HTTP client to be provided when instantiating the
+ trace-agent configuration. This feature is primarily intended for the
+ OpenTelemetry exporter.
+
+- APM: Add default UDS listeners for traces (trace-agent) and
+ dogstatsd (core-agent) on /var/run/datadog/apm.socket and
+ /var/run/datadog/dsd.socket, respectively.
+ These are used in the Single Step APM Instrumentation, improving
+ the onboarding experience and minimizing the agent configuration.
+
+- For the [Inferred Service Dependencies beta](https://docs.datadoghq.com/tracing/guide/inferred-service-opt-in/?tab=java), add two new `peer.hostname` precursor attributes, `out.host` and `dns.hostname`. This will improve coverage of inferred services because some tracer integrations only place the peer hostname in one of those attributes.
+
+- APM stats for internal service overrides are now aggregated by the `_dd.base_service` tag only, enhancing visibility into specific base services.
+
+- Include spans with `span.kind=consumer` for aggregation of
+ stats on peer tags.
+
+- IP address quantization on all peer tags is done the backend during ingestion. This change updates the Agent to apply the same IP address quantization. This reduces unnecessary aggregation that is currently done on raw IP addresses. And therefore, improves the aggregation performance of stats on peer tags.
+
+- APM: Add new setting to disable the HTTP receiver in the
+ trace-agent. This setting should almost never be disabled and
+ is only a convenience parameter for OpenTelemetry extensions.
+ Disabling the receiver is semantically equivalent to setting the
+ receiver_port to 0 and receiver_socket to "".
+
+- Agents are now built with Go ``1.22.6``.
+
+- [NDM] Adds the option to collect BGP neighbors metrics from Cisco SD-WAN.
+
+- [NDM] Add option to collect cloud application metrics from Cisco SD-WAN.
+
+- [Cisco SD-WAN] Allow enabling/disabling metrics collection.
+
+- Report the hostname of Kubernetes events based on the associated
+ pod that the event relates to.
+
+- Introduces a parser to extract tags from integration logs and attach them to outgoing logs.
+
+- Implement External Data environment variable injection in the Admission Controller.
+ Format for this new environment variable is `it-INIT_CONTAINER,cn-CONTAINER_NAME,pu-POD_UID`.
+ This new variable is needed for the New Origin Detection spec. It is used for Origin Detection
+ in case Local Data are unavailable, for example with Kata Containers and CGroups v2.
+
+- Upgraded JMXFetch to `0.49.3 `_ which adds support for jsr77 j2ee statistics
+ and custom ConnectionFactory. See `0.49.3 `_ for more details.
+
+- Windows Agent Installer gives a better error message when a gMSA
+ account is provided for ``ddagentuser`` that Windows does not recognize.
+
+- Uninstalling the Windows Agent MSI Installer removes specific
+ subdirectories of the install path to help prevent data loss when
+ ``PROJECTLOCATION`` is misconfigured to an existing directory.
+
+- Adds a default upper limit of 10000 to the number of network traffic
+ paths that are captured at a single time. The user can increase or
+ decrease this limit as needed.
+
+- Language detection can run on the core Agent without needing a gRPC server.
+
+- Add Hostname and ExtraTags to `CollectorECSTask`.
+
+- Collect SystemInfo for Pods and ECS Tasks.
+
+- Implement API that allows Python checks to send logs for
+ eventual submission.
+
+- Users can use ``DD_ORCHESTRATOR_EXPLORER_CUSTOM_SENSITIVE_ANNOTATIONS_LABELS`` to remove sensitive annotations and labels.
+ For example: ``DD_ORCHESTRATOR_EXPLORER_CUSTOM_SENSITIVE_ANNOTATIONS_LABELS="sensitive-key-1 sensitive-key-2"``.
+ Keys should be separated by spaces. The agent removes any annotations and labels matching these keys.
+
+- Add the ability to tag interface metrics with user-defined tags.
+
+
+.. _Release Notes_7.57.0_Security Notes:
+
+Security Notes
+--------------
+
+- Fix CVE-2024-41110.
+
+
+.. _Release Notes_7.57.0_Bug Fixes:
+
+Bug Fixes
+---------
+
+- Results of `agent config` did not reflect the actual runtime config for the other services. This will have other Datadog Agent services (e.g. trace-agent) running as a systemd service read the same environment variables from a text file `/etc/datadog-agent/environment` as the core Agent process.
+
+- [DBM] Bump go-sqllexer to 0.0.13 to fix a bug where the table name is incorrectly collected on PostgreSQL SELECT ONLY statement.
+
+- [Cisco SD-WAN] Do not collect unspecified IP addresses.
+
+- Fix `container.net.*` metrics accuracy on Linux. Currently `container.net.*` metrics are always emitted with high cardinality tags while the values may not represent actual container-level values but POD-level values (multiple containers in a pod) or host-level values (containers running in host network). With this bug fix, the `container.net.*` metrics aren't emitted for containers running in host network and a single timeseries is emitted by pods when running multiple containers. Finally, in non-Kubernetes environments, if multiple containers share the same network namespace, `container.net.*` metrics won't be emitted.
+
+- Fix duplicate logging in Process Agent component's Enabled() method.
+
+- Fixed bug in kubelet check when running in core agent that
+ was causing `kubernetes.kubelet.container.log_filesystem.used_bytes`
+ to be reported by the check for excluded/non-existing containers.
+ The metric was being reported in this case without tags.
+ This bug does not exist in the python integration version of the
+ kubelet check.
+
+- Fixes a bug on Windows in the driver installation custom actions that could prevent rollback from working properly if an installation failed or was canceled.
+
+- Update pro-bing library to include fix for a Windows specific issue with large ICMP packets
+
+- [oracle] Fix wrong durations for cloud databases.
+
+- Stop chunking outputs in manual checks for container, process, and process_discovery checks to allow JSON unmarshaler to parse output.
+
+- Remove the original pod annotation on consul
+
+- Fix pod status for pods using native sidecars.
+
+- Fix a regression where the Agent would fail to start on systems with SysVinit.
+
+- APM: Fixes issue where the number of HTTP decoders was incorrectly set if setting GOMAXPROCS to milli-cpu values.
+
+
+.. _Release Notes_7.57.0_Other Notes:
+
+Other Notes
+-----------
+
+- Add metrics origins for vLLM integration.
+
+- Add deprecation warnings when running process checks on the Process Agent in Linux.
+ This change prepares for the deprecation of processes and container collection in the Process Agent, occurring in a future release.
+
+- Add metric origin for the AWS Neuron integration
+
+
.. _Release Notes_7.56.2:
7.56.2
diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv
index 79e35081214b1..f8555a000f46d 100644
--- a/LICENSE-3rdparty.csv
+++ b/LICENSE-3rdparty.csv
@@ -275,7 +275,6 @@ core,github.com/ProtonMail/go-crypto/openpgp/internal/ecc,BSD-3-Clause,Copyright
core,github.com/ProtonMail/go-crypto/openpgp/internal/encoding,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved
core,github.com/ProtonMail/go-crypto/openpgp/packet,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved
core,github.com/ProtonMail/go-crypto/openpgp/s2k,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved
-core,github.com/PuerkitoBio/goquery,BSD-3-Clause,"Copyright (c) 2012-2021, Martin Angers & Contributors"
core,github.com/Showmax/go-fqdn,Apache-2.0,Copyright since 2015 Showmax s.r.o
core,github.com/StackExchange/wmi,MIT,Copyright (c) 2013 Stack Exchange
core,github.com/VividCortex/ewma,MIT,"Copyright (c) 2013 VividCortex | Copyright (c) 2013 VividCortex, Inc. All rights reserved"
@@ -290,10 +289,6 @@ core,github.com/alecthomas/participle/v2,MIT,Copyright (C) 2017 Alec Thomas | Co
core,github.com/alecthomas/participle/v2/lexer,MIT,Copyright (C) 2017 Alec Thomas | Copyright (C) 2017-2022 Alec Thomas
core,github.com/alecthomas/units,MIT,Copyright (C) 2014 Alec Thomas
core,github.com/anchore/go-struct-converter,Apache-2.0,"Copyright (c) 2022-2023 Anchore, Inc."
-core,github.com/andybalholm/cascadia,BSD-2-Clause,Copyright (c) 2011 Andy Balholm. All rights reserved
-core,github.com/antchfx/htmlquery,MIT,Copyright (c) 2016 Zheng Chun
-core,github.com/antchfx/xmlquery,MIT,Copyright (c) 2016 Zheng Chun
-core,github.com/antchfx/xpath,MIT,Copyright (c) 2016 Zheng Chun
core,github.com/antlr/antlr4/runtime/Go/antlr/v4,BSD-3-Clause,Copyright 2021 The ANTLR Project
core,github.com/apache/thrift/lib/go/thrift,Apache-2.0,"Copyright (C) 2006 - 2019, The Apache Software Foundation | Copyright (c) 2006- Facebook | Copyright (c) 2006-2008 Alexander Chemeris | Copyright (c) 2007 Thomas Porschberg | Copyright (c) 2008- Patrick Collison | Copyright 2007 by Nathan C. Myers ; some rights reserved | Copyright 2012 Twitter, Inc"
core,github.com/aquasecurity/go-gem-version,Apache-2.0,Copyright (c) 2020 Teppei Fukuda (knqyf263)
@@ -1078,9 +1073,6 @@ core,github.com/goccy/go-json/internal/encoder/vm_color_indent,MIT,Copyright (c)
core,github.com/goccy/go-json/internal/encoder/vm_indent,MIT,Copyright (c) 2020 Masaaki Goshima
core,github.com/goccy/go-json/internal/errors,MIT,Copyright (c) 2020 Masaaki Goshima
core,github.com/goccy/go-json/internal/runtime,MIT,Copyright (c) 2020 Masaaki Goshima
-core,github.com/gocolly/colly/v2,Apache-2.0,Copyright 2018 Adam Tauber
-core,github.com/gocolly/colly/v2/debug,Apache-2.0,Copyright 2018 Adam Tauber
-core,github.com/gocolly/colly/v2/storage,Apache-2.0,Copyright 2018 Adam Tauber
core,github.com/gocomply/scap/pkg/scap/constants,CC0-1.0,CC0 1.0 Universal
core,github.com/gocomply/scap/pkg/scap/models/cdf,CC0-1.0,CC0 1.0 Universal
core,github.com/gocomply/scap/pkg/scap/models/cpe,CC0-1.0,CC0 1.0 Universal
@@ -1297,7 +1289,6 @@ core,github.com/hashicorp/go-sockaddr,MPL-2.0,"Copyright © 2014-2018 HashiCorp,
core,github.com/hashicorp/go-version,MPL-2.0,"Copyright © 2014-2018 HashiCorp, Inc"
core,github.com/hashicorp/golang-lru/simplelru,MPL-2.0,"Copyright © 2014-2018 HashiCorp, Inc"
core,github.com/hashicorp/golang-lru/v2,MPL-2.0,"Copyright © 2014-2018 HashiCorp, Inc"
-core,github.com/hashicorp/golang-lru/v2/expirable,MPL-2.0,"Copyright © 2014-2018 HashiCorp, Inc"
core,github.com/hashicorp/golang-lru/v2/internal,MPL-2.0,"Copyright © 2014-2018 HashiCorp, Inc"
core,github.com/hashicorp/golang-lru/v2/simplelru,MPL-2.0,"Copyright © 2014-2018 HashiCorp, Inc"
core,github.com/hashicorp/hcl,MPL-2.0,"Copyright © 2014-2018 HashiCorp, Inc"
@@ -1376,6 +1367,7 @@ core,github.com/jaegertracing/jaeger/thrift-gen/jaeger,Apache-2.0,Copyright 2015
core,github.com/jaegertracing/jaeger/thrift-gen/sampling,Apache-2.0,Copyright 2015-2019 The Jaeger Project Authors
core,github.com/jaegertracing/jaeger/thrift-gen/zipkincore,Apache-2.0,Copyright 2015-2019 The Jaeger Project Authors
core,github.com/jbenet/go-context/io,MIT,Copyright (c) 2014 Juan Batiz-Benet
+core,github.com/jellydator/ttlcache/v3,MIT,Copyright (c) 2022 Jellydator
core,github.com/jinzhu/inflection,MIT,Copyright (c) 2015 - Jinzhu
core,github.com/jlaffaye/ftp,ISC,"Copyright (c) 2011-2013, Julien Laffaye "
core,github.com/jmespath/go-jmespath,Apache-2.0,Copyright 2015 James Saryerwinnie
@@ -1391,7 +1383,6 @@ core,github.com/justincormack/go-memfd/msyscall,MIT,Copyright (c) 2017 Justin Co
core,github.com/kardianos/osext,BSD-3-Clause,Copyright (c) 2012 The Go Authors. All rights reserved
core,github.com/karrick/godirwalk,BSD-2-Clause,"Copyright (c) 2017, Karrick McDermott"
core,github.com/kballard/go-shellquote,MIT,Copyright (C) 2014 Kevin Ballard
-core,github.com/kennygrant/sanitize,BSD-3-Clause,Copyright (c) 2017 Mechanism Design. All rights reserved
core,github.com/kevinburke/ssh_config,MIT,"Copyright (c) 2013 - 2017 Thomas Pelletier, Eric Anderton | Copyright (c) 2017 Kevin Burke"
core,github.com/kjk/lzma,BSD-3-Clause,"Copyright (c) 2010, Andrei Vieru. All rights reserved"
core,github.com/klauspost/compress,BSD-3-Clause,Copyright (c) 2011 The Snappy-Go Authors. All rights reserved | Copyright (c) 2012 The Go Authors. All rights reserved | Copyright (c) 2015 Klaus Post | Copyright (c) 2019 Klaus Post. All rights reserved | Copyright 2016 The filepathx Authors | Copyright 2016-2017 The New York Times Company
@@ -1993,7 +1984,6 @@ core,github.com/rs/zerolog/log,MIT,Copyright (c) 2017 Olivier Poitrey
core,github.com/ryanuber/go-glob,MIT,Copyright (c) 2014 Ryan Uber
core,github.com/sagikazarmark/locafero,MIT,Copyright (c) 2023 Márk Sági-Kazár
core,github.com/sagikazarmark/slog-shim,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved
-core,github.com/saintfish/chardet,MIT,Copyright (c) 2012 chardet Authors | Sheng Yu (yusheng dot sjtu at gmail dot com)
core,github.com/samber/lo,MIT,Copyright (c) 2022 Samuel Berthe | Copyright © 2022 [Samuel Berthe](https://github.com/samber)
core,github.com/samber/lo/internal/constraints,MIT,Copyright (c) 2022 Samuel Berthe | Copyright © 2022 [Samuel Berthe](https://github.com/samber)
core,github.com/samber/lo/internal/rand,MIT,Copyright (c) 2022 Samuel Berthe | Copyright © 2022 [Samuel Berthe](https://github.com/samber)
@@ -2132,7 +2122,6 @@ core,github.com/syndtr/goleveldb/leveldb/table,BSD-2-Clause,Copyright 2012 Surya
core,github.com/syndtr/goleveldb/leveldb/util,BSD-2-Clause,Copyright 2012 Suryandaru Triandana
core,github.com/tchap/go-patricia/v2/patricia,MIT,Copyright (c) 2014 The AUTHORS | Ondřej Kupka | This is the complete list of go-patricia copyright holders:
core,github.com/tedsuo/rata,MIT,Copyright (c) 2014 Ted Young
-core,github.com/temoto/robotstxt,MIT,Copyright (c) 2010 Sergey Shepelev
core,github.com/tetratelabs/wazero,Apache-2.0,Copyright 2020-2023 wazero authors
core,github.com/tetratelabs/wazero/api,Apache-2.0,Copyright 2020-2023 wazero authors
core,github.com/tetratelabs/wazero/experimental,Apache-2.0,Copyright 2020-2023 wazero authors
@@ -2363,12 +2352,9 @@ core,go.opentelemetry.io/collector/exporter/exporterbatcher,Apache-2.0,Copyright
core,go.opentelemetry.io/collector/exporter/exporterhelper,Apache-2.0,Copyright The OpenTelemetry Authors
core,go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata,Apache-2.0,Copyright The OpenTelemetry Authors
core,go.opentelemetry.io/collector/exporter/exporterqueue,Apache-2.0,Copyright The OpenTelemetry Authors
-core,go.opentelemetry.io/collector/exporter/internal/common,Apache-2.0,Copyright The OpenTelemetry Authors
core,go.opentelemetry.io/collector/exporter/internal/experr,Apache-2.0,Copyright The OpenTelemetry Authors
core,go.opentelemetry.io/collector/exporter/internal/otlptext,Apache-2.0,Copyright The OpenTelemetry Authors
core,go.opentelemetry.io/collector/exporter/internal/queue,Apache-2.0,Copyright The OpenTelemetry Authors
-core,go.opentelemetry.io/collector/exporter/loggingexporter,Apache-2.0,Copyright The OpenTelemetry Authors
-core,go.opentelemetry.io/collector/exporter/loggingexporter/internal/metadata,Apache-2.0,Copyright The OpenTelemetry Authors
core,go.opentelemetry.io/collector/exporter/nopexporter,Apache-2.0,Copyright The OpenTelemetry Authors
core,go.opentelemetry.io/collector/exporter/nopexporter/internal/metadata,Apache-2.0,Copyright The OpenTelemetry Authors
core,go.opentelemetry.io/collector/exporter/otlpexporter,Apache-2.0,Copyright The OpenTelemetry Authors
@@ -2754,13 +2740,6 @@ core,google.golang.org/api/transport,BSD-3-Clause,Copyright (c) 2011 Google Inc.
core,google.golang.org/api/transport/grpc,BSD-3-Clause,Copyright (c) 2011 Google Inc. All rights reserved.
core,google.golang.org/api/transport/http,BSD-3-Clause,Copyright (c) 2011 Google Inc. All rights reserved.
core,google.golang.org/api/transport/http/internal/propagation,BSD-3-Clause,Copyright (c) 2011 Google Inc. All rights reserved.
-core,google.golang.org/appengine/internal,Apache-2.0,Copyright 2011 Google Inc. All rights reserved.
-core,google.golang.org/appengine/internal/base,Apache-2.0,Copyright 2011 Google Inc. All rights reserved.
-core,google.golang.org/appengine/internal/datastore,Apache-2.0,Copyright 2011 Google Inc. All rights reserved.
-core,google.golang.org/appengine/internal/log,Apache-2.0,Copyright 2011 Google Inc. All rights reserved.
-core,google.golang.org/appengine/internal/remote_api,Apache-2.0,Copyright 2011 Google Inc. All rights reserved.
-core,google.golang.org/appengine/internal/urlfetch,Apache-2.0,Copyright 2011 Google Inc. All rights reserved.
-core,google.golang.org/appengine/urlfetch,Apache-2.0,Copyright 2011 Google Inc. All rights reserved.
core,google.golang.org/genproto/googleapis/api,Apache-2.0,Copyright 2015 Google LLC
core,google.golang.org/genproto/googleapis/api/annotations,Apache-2.0,Copyright 2015 Google LLC
core,google.golang.org/genproto/googleapis/api/expr/v1alpha1,Apache-2.0,Copyright 2015 Google LLC
diff --git a/cmd/agent/dist/conf.d/network_path.d/conf.yaml.example b/cmd/agent/dist/conf.d/network_path.d/conf.yaml.example
index 6a1a287f9d267..6f3e29eed0bfd 100644
--- a/cmd/agent/dist/conf.d/network_path.d/conf.yaml.example
+++ b/cmd/agent/dist/conf.d/network_path.d/conf.yaml.example
@@ -5,6 +5,12 @@ init_config:
#
# min_collection_interval: 60
+ ## @param timeout - integer - optional - default: 1000
+ ## Specifies how much time in milliseconds the traceroute should
+ ## wait for a response from each hop before timing out.
+ #
+ # timeout: 1000
+
# Network Path integration is used to monitor individual endpoints.
# Supported platforms are Linux and Windows. macOS is not supported yet.
instances:
@@ -30,6 +36,12 @@ instances:
#
# max_ttl:
+ ## @param timeout - integer - optional - default: 1000
+ ## Specifies how much time in milliseconds the traceroute should
+ ## wait for a response from each hop before timing out.
+ #
+ # timeout: 1000
+
## @param min_collection_interval - number - optional - default: 60
## Specifies how frequently we should probe the endpoint.
## Min collection interval is defined in seconds.
diff --git a/cmd/agent/subcommands/jmx/command.go b/cmd/agent/subcommands/jmx/command.go
index 6ae802311346f..b827558dbdb73 100644
--- a/cmd/agent/subcommands/jmx/command.go
+++ b/cmd/agent/subcommands/jmx/command.go
@@ -158,8 +158,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command {
fx.Provide(tagger.NewTaggerParamsForCoreAgent),
taggerimpl.Module(),
autodiscoveryimpl.Module(),
- agent.Bundle(),
- fx.Supply(jmxloggerimpl.NewCliParams(cliParams.logFile)),
+ agent.Bundle(jmxloggerimpl.NewCliParams(cliParams.logFile)),
)
}
diff --git a/cmd/agent/subcommands/run/command.go b/cmd/agent/subcommands/run/command.go
index b26ba80c58a9a..6ff4f17804df5 100644
--- a/cmd/agent/subcommands/run/command.go
+++ b/cmd/agent/subcommands/run/command.go
@@ -443,8 +443,7 @@ func getSharedFxOption() fx.Option {
collectorimpl.Module(),
process.Bundle(),
guiimpl.Module(),
- agent.Bundle(),
- fx.Supply(jmxloggerimpl.NewDefaultParams()),
+ agent.Bundle(jmxloggerimpl.NewDefaultParams()),
fx.Provide(func(config config.Component) healthprobe.Options {
return healthprobe.Options{
Port: config.GetInt("health_port"),
diff --git a/cmd/installer/subcommands/daemon/run_windows.go b/cmd/installer/subcommands/daemon/run_windows.go
index 57d13a5b5ef53..4559addad1fe1 100644
--- a/cmd/installer/subcommands/daemon/run_windows.go
+++ b/cmd/installer/subcommands/daemon/run_windows.go
@@ -9,6 +9,8 @@ package daemon
import (
"context"
+ "syscall"
+
"github.com/DataDog/datadog-agent/cmd/installer/command"
"github.com/DataDog/datadog-agent/comp/core/pid"
"github.com/DataDog/datadog-agent/comp/updater/localapi"
@@ -16,7 +18,6 @@ import (
"github.com/DataDog/datadog-agent/pkg/util/fxutil"
"github.com/judwhite/go-svc"
"go.uber.org/fx"
- "syscall"
)
type windowsService struct {
@@ -26,7 +27,7 @@ type windowsService struct {
func getFxOptions(global *command.GlobalParams) []fx.Option {
return []fx.Option{
getCommonFxOption(global),
- fxutil.FxAgentBase(),
+ fxutil.FxAgentBase(true),
// Force the instantiation of some components
fx.Invoke(func(_ pid.Component) {}),
fx.Invoke(func(_ localapi.Component) {}),
diff --git a/cmd/installer/subcommands/daemon/status.tmpl b/cmd/installer/subcommands/daemon/status.tmpl
index 714ed431f3214..045b819d53764 100644
--- a/cmd/installer/subcommands/daemon/status.tmpl
+++ b/cmd/installer/subcommands/daemon/status.tmpl
@@ -7,16 +7,38 @@ Datadog Installer v{{ htmlSafe .Version }}
{{ greenText "●" }} stable: v{{ htmlSafe $package.Stable }}
{{- else }}
{{ redText "●" }} stable: none
- {{- end }}{{ if $package.Experiment }}
+ {{- end }}
+ {{- if $package.Experiment }}
{{ yellowText "●" }} experiment: v{{ htmlSafe $package.Experiment }}
{{- else }}
● experiment: none
{{- end }}
- {{- if eq $name "datadog-apm-inject" }}{{ template "datadog-apm-inject" $.ApmInjectionStatus }}{{ end }}
+
+ {{- if eq $name "datadog-apm-inject" }}
+ {{ template "datadog-apm-inject" $.ApmInjectionStatus }}
+ {{- end }}
+
+ {{- range $remoteConfig := $.RemoteConfigState }}
+ {{- if eq $remoteConfig.Package $name }}
+ Remote configuration client state:
+ StableVersion: {{ $remoteConfig.StableVersion }}
+ ExperimentVersion: {{ $remoteConfig.ExperimentVersion }}
+ StableConfigVersion: {{ $remoteConfig.StableConfigVersion }}
+ ExperimentConfigVersion: {{ $remoteConfig.ExperimentConfigVersion }}
+ RemoteConfigVersion: {{ $remoteConfig.RemoteConfigVersion }}
+ Task:
+ {{- if $remoteConfig.Task }}
+ Id: {{ $remoteConfig.Task.Id }}
+ State: {{ $remoteConfig.Task.State }}
+ {{- if $remoteConfig.Task.Error }}
+ Error: {{ $remoteConfig.Task.Error }}
+ {{- end }}
+ {{- else }}
+ No task available
+ {{- end }}
+ {{- end }}
+ {{- end }}
{{ end -}}
-{{- if .RemoteConfigState }}
-{{ template "remote-config-state" $.RemoteConfigState }}
-{{- end -}}
{{- define "datadog-apm-inject" }}
Instrumentation status:
@@ -32,26 +54,4 @@ Datadog Installer v{{ htmlSafe .Version }}
{{- else -}}
{{ redText "●" }} Docker: Not instrumented
{{- end }}
-{{- end -}}
-
-{{- define "remote-config-state" }}
- Remote configuration client state:
- {{ range . }}
- {{ boldText .Package }}
- StableVersion: {{ .StableVersion }}
- ExperimentVersion: {{ .ExperimentVersion }}
- StableConfigVersion: {{ .StableConfigVersion }}
- ExperimentConfigVersion: {{ .ExperimentConfigVersion }}
- RemoteConfigVersion: {{ .RemoteConfigVersion }}
- Task:
- {{- if .Task }}
- Id: {{ .Task.Id }}
- State: {{ .Task.State }}
- {{- if .Task.Error }}
- Error: {{ .Task.Error }}
- {{- end }}
- {{- else }}
- No task available
- {{- end }}
- {{ end }}
{{- end }}
diff --git a/cmd/internal/runcmd/runcmd.go b/cmd/internal/runcmd/runcmd.go
index 6f871fccec364..6b197801df093 100644
--- a/cmd/internal/runcmd/runcmd.go
+++ b/cmd/internal/runcmd/runcmd.go
@@ -9,7 +9,6 @@ package runcmd
import (
"fmt"
"io"
- "os"
"github.com/spf13/cobra"
"go.uber.org/dig"
@@ -36,10 +35,9 @@ func Run(cmd *cobra.Command) int {
// these are simply printed with an "Error: " prefix, but some kinds of errors
// are first simplified to reduce user confusion.
func displayError(err error, w io.Writer) {
- _, traceFxSet := os.LookupEnv("TRACE_FX")
// RootCause returns the error it was given if it cannot find a "root cause",
// and otherwise returns the root cause, which is more useful to the user.
- if rc := dig.RootCause(err); rc != err && !traceFxSet {
+ if rc := dig.RootCause(err); rc != err {
fmt.Fprintln(w, "Error:", rc.Error())
return
}
diff --git a/cmd/internal/runcmd/runcmd_test.go b/cmd/internal/runcmd/runcmd_test.go
index e9aac05348d93..7bd572cd260a4 100644
--- a/cmd/internal/runcmd/runcmd_test.go
+++ b/cmd/internal/runcmd/runcmd_test.go
@@ -8,8 +8,6 @@ package runcmd
import (
"bytes"
"errors"
- "os"
- "regexp"
"testing"
"github.com/spf13/cobra"
@@ -60,16 +58,6 @@ func TestDisplayError_normalError(t *testing.T) {
// fx errors are abbreviated to just the root cause by default
func TestDisplayError_fxError(t *testing.T) {
var buf bytes.Buffer
- t.Setenv("TRACE_FX", "") // get testing to reset this value for us
- os.Unsetenv("TRACE_FX") // but actually _unset_ the value
displayError(makeFxError(t), &buf)
require.Equal(t, "Error: uhoh\n", buf.String())
}
-
-// entire error is included with TRACE_FX set
-func TestDisplayError_fxError_TRACE_FX(t *testing.T) {
- var buf bytes.Buffer
- t.Setenv("TRACE_FX", "1")
- displayError(makeFxError(t), &buf)
- require.Regexp(t, regexp.MustCompile("Error: could not build arguments for function .* uhoh"), buf.String())
-}
diff --git a/cmd/process-agent/command/main_common.go b/cmd/process-agent/command/main_common.go
index fe6f66ef3d88e..d9d9c208b8d59 100644
--- a/cmd/process-agent/command/main_common.go
+++ b/cmd/process-agent/command/main_common.go
@@ -174,14 +174,17 @@ func runApp(ctx context.Context, globalParams *GlobalParams) error {
// Provide the corresponding tagger Params to configure the tagger
fx.Provide(func(c config.Component) tagger.Params {
- if c.GetBool("process_config.remote_tagger") {
+ if c.GetBool("process_config.remote_tagger") ||
+ // If the agent is running in ECS or ECS Fargate and the ECS task collection is enabled, use the remote tagger
+ // as remote tagger can return more tags than the local tagger.
+ ((env.IsECS() || env.IsECSFargate()) && c.GetBool("ecs_task_collection_enabled")) {
return tagger.NewNodeRemoteTaggerParams()
}
return tagger.NewTaggerParams()
}),
// Provides specific features to our own fx wrapper (logging, lifecycle, shutdowner)
- fxutil.FxAgentBase(),
+ fxutil.FxAgentBase(true),
// Set the pid file path
fx.Supply(pidimpl.NewParams(globalParams.PidFilePath)),
diff --git a/cmd/process-agent/subcommands/check/check.go b/cmd/process-agent/subcommands/check/check.go
index d91447f346d19..be338e8df2252 100644
--- a/cmd/process-agent/subcommands/check/check.go
+++ b/cmd/process-agent/subcommands/check/check.go
@@ -182,10 +182,12 @@ func RunCheckCmd(deps Dependencies) error {
names = append(names, ch.Name())
_, processModuleEnabled := deps.Syscfg.SysProbeObject().EnabledModules[sysconfig.ProcessModule]
+ _, networkTracerModuleEnabled := deps.Syscfg.SysProbeObject().EnabledModules[sysconfig.NetworkTracerModule]
cfg := &checks.SysProbeConfig{
- MaxConnsPerMessage: deps.Syscfg.SysProbeObject().MaxConnsPerMessage,
- SystemProbeAddress: deps.Syscfg.SysProbeObject().SocketAddress,
- ProcessModuleEnabled: processModuleEnabled,
+ MaxConnsPerMessage: deps.Syscfg.SysProbeObject().MaxConnsPerMessage,
+ SystemProbeAddress: deps.Syscfg.SysProbeObject().SocketAddress,
+ ProcessModuleEnabled: processModuleEnabled,
+ NetworkTracerModuleEnabled: networkTracerModuleEnabled,
}
if !matchingCheck(deps.CliParams.checkName, ch) {
diff --git a/cmd/security-agent/subcommands/runtime/command.go b/cmd/security-agent/subcommands/runtime/command.go
index 667f9778f2467..7725d89c81e44 100644
--- a/cmd/security-agent/subcommands/runtime/command.go
+++ b/cmd/security-agent/subcommands/runtime/command.go
@@ -684,8 +684,7 @@ func StartRuntimeSecurity(log log.Component, config config.Component, hostname s
// start/stop order is important, agent need to be stopped first and started after all the others
// components
agent, err := secagent.NewRuntimeSecurityAgent(statsdClient, hostname, secagent.RSAOptions{
- LogProfiledWorkloads: config.GetBool("runtime_security_config.log_profiled_workloads"),
- IgnoreDDAgentContainers: config.GetBool("runtime_security_config.telemetry.ignore_dd_agent_containers"),
+ LogProfiledWorkloads: config.GetBool("runtime_security_config.log_profiled_workloads"),
}, wmeta)
if err != nil {
return nil, fmt.Errorf("unable to create a runtime security agent instance: %w", err)
diff --git a/cmd/serverless/dependencies_linux_amd64.txt b/cmd/serverless/dependencies_linux_amd64.txt
index 6059520efe6e5..c31cb5dcebd87 100644
--- a/cmd/serverless/dependencies_linux_amd64.txt
+++ b/cmd/serverless/dependencies_linux_amd64.txt
@@ -271,6 +271,7 @@ github.com/DataDog/datadog-agent/pkg/util/fargate
github.com/DataDog/datadog-agent/pkg/util/filesystem
github.com/DataDog/datadog-agent/pkg/util/flavor
github.com/DataDog/datadog-agent/pkg/util/fxutil
+github.com/DataDog/datadog-agent/pkg/util/fxutil/logging
github.com/DataDog/datadog-agent/pkg/util/hostname
github.com/DataDog/datadog-agent/pkg/util/hostname/validate
github.com/DataDog/datadog-agent/pkg/util/http
@@ -579,16 +580,16 @@ go.opentelemetry.io/collector/connector
go.opentelemetry.io/collector/consumer
go.opentelemetry.io/collector/consumer/consumererror
go.opentelemetry.io/collector/exporter
+go.opentelemetry.io/collector/exporter/debugexporter
+go.opentelemetry.io/collector/exporter/debugexporter/internal/metadata
+go.opentelemetry.io/collector/exporter/debugexporter/internal/normal
go.opentelemetry.io/collector/exporter/exporterbatcher
go.opentelemetry.io/collector/exporter/exporterhelper
go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata
go.opentelemetry.io/collector/exporter/exporterqueue
-go.opentelemetry.io/collector/exporter/internal/common
go.opentelemetry.io/collector/exporter/internal/experr
go.opentelemetry.io/collector/exporter/internal/otlptext
go.opentelemetry.io/collector/exporter/internal/queue
-go.opentelemetry.io/collector/exporter/loggingexporter
-go.opentelemetry.io/collector/exporter/loggingexporter/internal/metadata
go.opentelemetry.io/collector/exporter/otlpexporter
go.opentelemetry.io/collector/exporter/otlpexporter/internal/metadata
go.opentelemetry.io/collector/extension
diff --git a/cmd/serverless/dependencies_linux_arm64.txt b/cmd/serverless/dependencies_linux_arm64.txt
index f5373b770a685..afc9f4e4fa881 100644
--- a/cmd/serverless/dependencies_linux_arm64.txt
+++ b/cmd/serverless/dependencies_linux_arm64.txt
@@ -271,6 +271,7 @@ github.com/DataDog/datadog-agent/pkg/util/fargate
github.com/DataDog/datadog-agent/pkg/util/filesystem
github.com/DataDog/datadog-agent/pkg/util/flavor
github.com/DataDog/datadog-agent/pkg/util/fxutil
+github.com/DataDog/datadog-agent/pkg/util/fxutil/logging
github.com/DataDog/datadog-agent/pkg/util/hostname
github.com/DataDog/datadog-agent/pkg/util/hostname/validate
github.com/DataDog/datadog-agent/pkg/util/http
@@ -578,16 +579,16 @@ go.opentelemetry.io/collector/connector
go.opentelemetry.io/collector/consumer
go.opentelemetry.io/collector/consumer/consumererror
go.opentelemetry.io/collector/exporter
+go.opentelemetry.io/collector/exporter/debugexporter
+go.opentelemetry.io/collector/exporter/debugexporter/internal/metadata
+go.opentelemetry.io/collector/exporter/debugexporter/internal/normal
go.opentelemetry.io/collector/exporter/exporterbatcher
go.opentelemetry.io/collector/exporter/exporterhelper
go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata
go.opentelemetry.io/collector/exporter/exporterqueue
-go.opentelemetry.io/collector/exporter/internal/common
go.opentelemetry.io/collector/exporter/internal/experr
go.opentelemetry.io/collector/exporter/internal/otlptext
go.opentelemetry.io/collector/exporter/internal/queue
-go.opentelemetry.io/collector/exporter/loggingexporter
-go.opentelemetry.io/collector/exporter/loggingexporter/internal/metadata
go.opentelemetry.io/collector/exporter/otlpexporter
go.opentelemetry.io/collector/exporter/otlpexporter/internal/metadata
go.opentelemetry.io/collector/extension
diff --git a/cmd/system-probe/modules/dynamic_instrumentation.go b/cmd/system-probe/modules/dynamic_instrumentation.go
index 7ff8d7d48ba6d..bd4272b8c8295 100644
--- a/cmd/system-probe/modules/dynamic_instrumentation.go
+++ b/cmd/system-probe/modules/dynamic_instrumentation.go
@@ -14,23 +14,26 @@ import (
"github.com/DataDog/datadog-agent/cmd/system-probe/api/module"
"github.com/DataDog/datadog-agent/cmd/system-probe/config"
sysconfigtypes "github.com/DataDog/datadog-agent/cmd/system-probe/config/types"
- "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation"
+ dimod "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/module"
"github.com/DataDog/datadog-agent/pkg/ebpf"
)
-// DynamicInstrumentation is the dynamic instrumentation module factory
+// DynamicInstrumentation is a system probe module which allows you to add instrumentation into
+// running Go services without restarts.
var DynamicInstrumentation = module.Factory{
Name: config.DynamicInstrumentationModule,
ConfigNamespaces: []string{},
Fn: func(agentConfiguration *sysconfigtypes.Config, _ module.FactoryDependencies) (module.Module, error) {
- config, err := dynamicinstrumentation.NewConfig(agentConfiguration)
+ config, err := dimod.NewConfig(agentConfiguration)
if err != nil {
return nil, fmt.Errorf("invalid dynamic instrumentation module configuration: %w", err)
}
-
- m, err := dynamicinstrumentation.NewModule(config)
- if errors.Is(err, ebpf.ErrNotImplemented) {
- return nil, module.ErrNotEnabled
+ m, err := dimod.NewModule(config)
+ if err != nil {
+ if errors.Is(err, ebpf.ErrNotImplemented) {
+ return nil, module.ErrNotEnabled
+ }
+ return nil, err
}
return m, nil
diff --git a/cmd/system-probe/modules/eventmonitor.go b/cmd/system-probe/modules/eventmonitor.go
index cec624ea3f746..d94555cddd939 100644
--- a/cmd/system-probe/modules/eventmonitor.go
+++ b/cmd/system-probe/modules/eventmonitor.go
@@ -48,7 +48,7 @@ func createEventMonitorModule(_ *sysconfigtypes.Config, deps module.FactoryDepen
}
if secconfig.RuntimeSecurity.IsRuntimeEnabled() {
- cws, err := secmodule.NewCWSConsumer(evm, secconfig.RuntimeSecurity, secmoduleOpts)
+ cws, err := secmodule.NewCWSConsumer(evm, secconfig.RuntimeSecurity, deps.WMeta, secmoduleOpts)
if err != nil {
return nil, err
}
diff --git a/cmd/system-probe/modules/network_tracer.go b/cmd/system-probe/modules/network_tracer.go
index 4852575b36a75..44f2af55d9c78 100644
--- a/cmd/system-probe/modules/network_tracer.go
+++ b/cmd/system-probe/modules/network_tracer.go
@@ -12,6 +12,7 @@ import (
"encoding/json"
"errors"
"fmt"
+ "io"
"net/http"
"os"
"runtime"
@@ -108,6 +109,16 @@ func (nt *networkTracer) Register(httpMux *module.Router) error {
logRequests(id, count, len(cs.Conns), start)
}))
+ httpMux.HandleFunc("/network_id", utils.WithConcurrencyLimit(utils.DefaultMaxConcurrentRequests, func(w http.ResponseWriter, req *http.Request) {
+ id, err := nt.tracer.GetNetworkID(req.Context())
+ if err != nil {
+ log.Errorf("unable to retrieve network_id: %s", err)
+ w.WriteHeader(500)
+ return
+ }
+ io.WriteString(w, id)
+ }))
+
httpMux.HandleFunc("/register", utils.WithConcurrencyLimit(utils.DefaultMaxConcurrentRequests, func(w http.ResponseWriter, req *http.Request) {
id := getClientID(req)
err := nt.tracer.RegisterClient(id)
diff --git a/cmd/system-probe/modules/traceroute.go b/cmd/system-probe/modules/traceroute.go
index 8314fe4dce396..6e0667e120784 100644
--- a/cmd/system-probe/modules/traceroute.go
+++ b/cmd/system-probe/modules/traceroute.go
@@ -85,7 +85,7 @@ func (t *traceroute) Register(httpMux *module.Router) error {
}
runCount := runCounter.Inc()
- logTracerouteRequests(cfg.DestHostname, id, runCount, start)
+ logTracerouteRequests(cfg, id, runCount, start)
})
return nil
@@ -97,9 +97,9 @@ func (t *traceroute) RegisterGRPC(_ grpc.ServiceRegistrar) error {
func (t *traceroute) Close() {}
-func logTracerouteRequests(host string, client string, runCount uint64, start time.Time) {
- args := []interface{}{host, client, runCount, time.Since(start)}
- msg := "Got request on /traceroute/%s?client_id=%s (count: %d): retrieved traceroute in %s"
+func logTracerouteRequests(cfg tracerouteutil.Config, client string, runCount uint64, start time.Time) {
+ args := []interface{}{cfg.DestHostname, client, cfg.DestPort, cfg.MaxTTL, cfg.Timeout, cfg.Protocol, runCount, time.Since(start)}
+ msg := "Got request on /traceroute/%s?client_id=%s&port=%d&maxTTL=%d&timeout=%d&protocol=%s (count: %d): retrieved traceroute in %s"
switch {
case runCount <= 5, runCount%20 == 0:
log.Infof(msg, args...)
@@ -119,7 +119,7 @@ func parseParams(req *http.Request) (tracerouteutil.Config, error) {
if err != nil {
return tracerouteutil.Config{}, fmt.Errorf("invalid max_ttl: %s", err)
}
- timeout, err := parseUint(req, "timeout", 32)
+ timeout, err := parseUint(req, "timeout", 64)
if err != nil {
return tracerouteutil.Config{}, fmt.Errorf("invalid timeout: %s", err)
}
@@ -129,7 +129,7 @@ func parseParams(req *http.Request) (tracerouteutil.Config, error) {
DestHostname: host,
DestPort: uint16(port),
MaxTTL: uint8(maxTTL),
- TimeoutMs: uint(timeout),
+ Timeout: time.Duration(timeout),
Protocol: payload.Protocol(protocol),
}, nil
}
diff --git a/cmd/system-probe/modules/traceroute_test.go b/cmd/system-probe/modules/traceroute_test.go
index fcfc929234ddf..31451a241c70b 100644
--- a/cmd/system-probe/modules/traceroute_test.go
+++ b/cmd/system-probe/modules/traceroute_test.go
@@ -46,7 +46,7 @@ func TestParseParams(t *testing.T) {
DestHostname: "1.2.3.4",
DestPort: 42,
MaxTTL: 35,
- TimeoutMs: 1000,
+ Timeout: 1000,
},
},
}
diff --git a/comp/agent/bundle.go b/comp/agent/bundle.go
index bfb93359e0554..5e550c0e1ee08 100644
--- a/comp/agent/bundle.go
+++ b/comp/agent/bundle.go
@@ -17,10 +17,10 @@ import (
// team: agent-shared-components
// Bundle defines the fx options for this bundle.
-func Bundle() fxutil.BundleOptions {
+func Bundle(params jmxloggerimpl.Params) fxutil.BundleOptions {
return fxutil.Bundle(
autoexitimpl.Module(),
- jmxloggerimpl.Module(),
+ jmxloggerimpl.Module(params),
expvarserverimpl.Module(),
cloudfoundrycontainerimpl.Module(),
)
diff --git a/comp/agent/bundle_test.go b/comp/agent/bundle_test.go
index 2ce804e6894d9..7f60820804110 100644
--- a/comp/agent/bundle_test.go
+++ b/comp/agent/bundle_test.go
@@ -24,7 +24,7 @@ import (
func TestBundleDependencies(t *testing.T) {
fxutil.TestBundle(t,
- Bundle(),
+ Bundle(jmxloggerimpl.NewDefaultParams()),
core.MockBundle(),
compressionimpl.MockModule(),
defaultforwarder.MockModule(),
@@ -32,7 +32,6 @@ func TestBundleDependencies(t *testing.T) {
eventplatformimpl.MockModule(),
demultiplexerimpl.Module(),
fx.Supply(demultiplexerimpl.NewDefaultParams()),
- fx.Supply(jmxloggerimpl.NewDefaultParams()),
workloadmetafxmock.MockModule(workloadmeta.NewParams()),
)
}
diff --git a/comp/agent/jmxlogger/jmxloggerimpl/jmxlogger.go b/comp/agent/jmxlogger/jmxloggerimpl/jmxlogger.go
index 965fae44628fd..1fe9aa55b1dfa 100644
--- a/comp/agent/jmxlogger/jmxloggerimpl/jmxlogger.go
+++ b/comp/agent/jmxlogger/jmxloggerimpl/jmxlogger.go
@@ -20,9 +20,10 @@ import (
)
// Module defines the fx options for this component.
-func Module() fxutil.Module {
+func Module(params Params) fxutil.Module {
return fxutil.Component(
fx.Provide(newJMXLogger),
+ fx.Supply(params),
)
}
diff --git a/comp/collector/collector/collectorimpl/collector.go b/comp/collector/collector/collectorimpl/collector.go
index e265871190fef..5b262c93d0223 100644
--- a/comp/collector/collector/collectorimpl/collector.go
+++ b/comp/collector/collector/collectorimpl/collector.go
@@ -8,7 +8,9 @@ package collectorimpl
import (
"context"
+ "encoding/json"
"fmt"
+ "os"
"sync"
"time"
@@ -20,6 +22,7 @@ import (
"github.com/DataDog/datadog-agent/comp/collector/collector"
"github.com/DataDog/datadog-agent/comp/collector/collector/collectorimpl/internal/middleware"
"github.com/DataDog/datadog-agent/comp/core/config"
+ flaretypes "github.com/DataDog/datadog-agent/comp/core/flare/types"
log "github.com/DataDog/datadog-agent/comp/core/log/def"
"github.com/DataDog/datadog-agent/comp/core/status"
metadata "github.com/DataDog/datadog-agent/comp/metadata/runner/runnerimpl"
@@ -30,6 +33,8 @@ import (
"github.com/DataDog/datadog-agent/pkg/collector/runner"
"github.com/DataDog/datadog-agent/pkg/collector/runner/expvars"
"github.com/DataDog/datadog-agent/pkg/collector/scheduler"
+ "github.com/DataDog/datadog-agent/pkg/sbom/collectors/host"
+ "github.com/DataDog/datadog-agent/pkg/sbom/scanner"
"github.com/DataDog/datadog-agent/pkg/serializer"
collectorStatus "github.com/DataDog/datadog-agent/pkg/status/collector"
"github.com/DataDog/datadog-agent/pkg/util/fxutil"
@@ -81,6 +86,7 @@ type provides struct {
StatusProvider status.InformationProvider
MetadataProvider metadata.Provider
APIGetPyStatus api.AgentEndpointProvider
+ FlareProvider flaretypes.Provider
}
// Module defines the fx options for this component.
@@ -106,6 +112,7 @@ func newProvides(deps dependencies) provides {
StatusProvider: status.NewInformationProvider(collectorStatus.Provider{}),
MetadataProvider: agentCheckMetadata,
APIGetPyStatus: api.NewAgentEndpointProvider(getPythonStatus, "/py/status", "GET"),
+ FlareProvider: flaretypes.NewProvider(c.fillFlare),
}
}
@@ -132,6 +139,35 @@ func newCollector(deps dependencies) *collectorImpl {
return c
}
+// fillFlare collects all the information related to integrations that need to be added to each flare
+func (c *collectorImpl) fillFlare(fb flaretypes.FlareBuilder) error {
+ scanner := scanner.GetGlobalScanner()
+ if scanner == nil {
+ return nil
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
+ defer cancel()
+
+ scanRequest := host.NewScanRequest("/", os.DirFS("/"))
+ scanResult := scanner.PerformScan(ctx, scanRequest, scanner.GetCollector(scanRequest.Collector()))
+ if scanResult.Error != nil {
+ return scanResult.Error
+ }
+
+ cycloneDX, err := scanResult.Report.ToCycloneDX()
+ if err != nil {
+ return err
+ }
+
+ jsonContent, err := json.MarshalIndent(cycloneDX, "", " ")
+ if err != nil {
+ return err
+ }
+
+ return fb.AddFile("host-sbom.json", jsonContent)
+}
+
// AddEventReceiver adds a callback to the collector to be called each time a check is added or removed.
func (c *collectorImpl) AddEventReceiver(cb collector.EventReceiver) {
c.m.Lock()
diff --git a/comp/core/agenttelemetry/impl/config.go b/comp/core/agenttelemetry/impl/config.go
index a33a440162119..3109a400b573c 100644
--- a/comp/core/agenttelemetry/impl/config.go
+++ b/comp/core/agenttelemetry/impl/config.go
@@ -237,7 +237,7 @@ var defaultProfiles = `
- check_name:network
- check_name:io
- check_name:file_handle
- metrics:
+ metrics:
- name: checks.runs
aggregate_tags:
- check_name
@@ -257,6 +257,10 @@ var defaultProfiles = `
- name: logs.destination_http_resp
aggregate_tags:
- status_code
+ - name: oracle.activity_samples_count
+ - name: oracle.activity_latency
+ - name: oracle.statement_metrics
+ - name: oracle.statement_plan_errors
- name: transactions.input_count
- name: transactions.requeued
- name: transactions.retries
diff --git a/comp/core/autodiscovery/common/utils/pod_annotations.go b/comp/core/autodiscovery/common/utils/pod_annotations.go
index d498245beb39f..2db5b0c323305 100644
--- a/comp/core/autodiscovery/common/utils/pod_annotations.go
+++ b/comp/core/autodiscovery/common/utils/pod_annotations.go
@@ -58,6 +58,7 @@ func parseChecksJSON(adIdentifier string, checksJSON string) ([]integration.Conf
Name string `json:"name"`
InitConfig json.RawMessage `json:"init_config"`
Instances []interface{} `json:"instances"`
+ Logs json.RawMessage `json:"logs"`
IgnoreAutodiscoveryTags bool `json:"ignore_autodiscovery_tags"`
}
@@ -83,6 +84,9 @@ func parseChecksJSON(adIdentifier string, checksJSON string) ([]integration.Conf
IgnoreAutodiscoveryTags: config.IgnoreAutodiscoveryTags,
}
+ if len(config.Logs) > 0 {
+ c.LogsConfig = integration.Data(config.Logs)
+ }
for _, i := range config.Instances {
instance, err := parseJSONObjToData(i)
if err != nil {
diff --git a/comp/core/autodiscovery/common/utils/pod_annotations_test.go b/comp/core/autodiscovery/common/utils/pod_annotations_test.go
index a583e94c422ed..7043e7c19e301 100644
--- a/comp/core/autodiscovery/common/utils/pod_annotations_test.go
+++ b/comp/core/autodiscovery/common/utils/pod_annotations_test.go
@@ -428,6 +428,25 @@ func TestExtractTemplatesFromAnnotations(t *testing.T) {
},
},
},
+ {
+ name: "v2 annotations label logs",
+ annotations: map[string]string{
+ "ad.datadoghq.com/foobar.checks": `{
+ "apache": {
+ "logs": [{"service":"any_service","source":"any_source"}]
+ }
+ }`,
+ },
+ adIdentifier: "foobar",
+ output: []integration.Config{
+ {
+ Name: "apache",
+ LogsConfig: integration.Data("[{\"service\":\"any_service\",\"source\":\"any_source\"}]"),
+ ADIdentifiers: []string{adID},
+ InitConfig: integration.Data("{}"),
+ },
+ },
+ },
}
for _, tt := range tests {
diff --git a/comp/core/flare/flare.go b/comp/core/flare/flare.go
index 2b0e249774101..6340f1b2f4934 100644
--- a/comp/core/flare/flare.go
+++ b/comp/core/flare/flare.go
@@ -176,6 +176,9 @@ func (f *flare) Create(pdata ProfileData, ipcError error) (string, error) {
}
// Adding legacy and internal providers. Registering then as Provider through FX create cycle dependencies.
+ //
+ // Do not extend this list, this is legacy behavior that should be remove at some point. To add data to a flare
+ // use the flare provider system: https://datadoghq.dev/datadog-agent/components/shared_features/flares/
providers := append(
f.providers,
func(fb types.FlareBuilder) error {
diff --git a/comp/core/log/fx-systemprobe/fx.go b/comp/core/log/fx-systemprobe/fx.go
index 48e883d4ef019..0b948e74336ee 100644
--- a/comp/core/log/fx-systemprobe/fx.go
+++ b/comp/core/log/fx-systemprobe/fx.go
@@ -7,8 +7,10 @@
package fx
import (
+ logdef "github.com/DataDog/datadog-agent/comp/core/log/def"
logimpl "github.com/DataDog/datadog-agent/comp/core/log/impl-systemprobe"
"github.com/DataDog/datadog-agent/pkg/util/fxutil"
+ "github.com/DataDog/datadog-agent/pkg/util/fxutil/logging"
)
// Module defines the fx options for this component
@@ -17,5 +19,6 @@ func Module() fxutil.Module {
fxutil.ProvideComponentConstructor(
logimpl.NewComponent,
),
+ logging.NewFxEventLoggerOption[logdef.Component](),
)
}
diff --git a/comp/core/log/fx-trace/fx.go b/comp/core/log/fx-trace/fx.go
index 91dc0fc968e93..ebfd4d30d75d6 100644
--- a/comp/core/log/fx-trace/fx.go
+++ b/comp/core/log/fx-trace/fx.go
@@ -7,8 +7,10 @@
package fx
import (
+ logdef "github.com/DataDog/datadog-agent/comp/core/log/def"
impltrace "github.com/DataDog/datadog-agent/comp/core/log/impl-trace"
"github.com/DataDog/datadog-agent/pkg/util/fxutil"
+ "github.com/DataDog/datadog-agent/pkg/util/fxutil/logging"
)
// Module defines the fx options for this component
@@ -17,5 +19,6 @@ func Module() fxutil.Module {
fxutil.ProvideComponentConstructor(
impltrace.NewComponent,
),
+ logging.NewFxEventLoggerOption[logdef.Component](),
)
}
diff --git a/comp/core/log/fx/fx.go b/comp/core/log/fx/fx.go
index 17bc03ca3a8e2..27b07266f1709 100644
--- a/comp/core/log/fx/fx.go
+++ b/comp/core/log/fx/fx.go
@@ -7,8 +7,10 @@
package fx
import (
+ logdef "github.com/DataDog/datadog-agent/comp/core/log/def"
logimpl "github.com/DataDog/datadog-agent/comp/core/log/impl"
"github.com/DataDog/datadog-agent/pkg/util/fxutil"
+ "github.com/DataDog/datadog-agent/pkg/util/fxutil/logging"
)
// Module defines the fx options for this component
@@ -17,5 +19,6 @@ func Module() fxutil.Module {
fxutil.ProvideComponentConstructor(
logimpl.NewComponent,
),
+ logging.NewFxEventLoggerOption[logdef.Component](),
)
}
diff --git a/comp/core/tagger/taggerimpl/collectors/workloadmeta_extract.go b/comp/core/tagger/taggerimpl/collectors/workloadmeta_extract.go
index d7753c7f495f2..9dfa487f002f4 100644
--- a/comp/core/tagger/taggerimpl/collectors/workloadmeta_extract.go
+++ b/comp/core/tagger/taggerimpl/collectors/workloadmeta_extract.go
@@ -462,6 +462,10 @@ func (c *WorkloadMetaCollector) handleECSTask(ev workloadmeta.Event) []*types.Ta
addResourceTags(taskTags, task.Tags)
}
+ if task.ServiceName != "" {
+ taskTags.AddLow(tags.EcsServiceName, strings.ToLower(task.ServiceName))
+ }
+
tagInfos := make([]*types.TagInfo, 0, len(task.Containers))
for _, taskContainer := range task.Containers {
container, err := c.store.GetContainer(taskContainer.ID)
diff --git a/comp/core/tagger/taggerimpl/collectors/workloadmeta_test.go b/comp/core/tagger/taggerimpl/collectors/workloadmeta_test.go
index 63c2a66adc4af..c2ab0bb1714d8 100644
--- a/comp/core/tagger/taggerimpl/collectors/workloadmeta_test.go
+++ b/comp/core/tagger/taggerimpl/collectors/workloadmeta_test.go
@@ -1426,6 +1426,7 @@ func TestHandleECSTask(t *testing.T) {
Name: containerName,
},
},
+ ServiceName: "datadog-agent-service",
},
expected: []*types.TagInfo{
{
@@ -1444,6 +1445,7 @@ func TestHandleECSTask(t *testing.T) {
"task_family:datadog-agent",
"task_name:datadog-agent",
"task_version:1",
+ "ecs_service:datadog-agent-service",
},
StandardTags: []string{},
},
diff --git a/comp/core/tagger/taggerimpl/generic_store/composite_store.go b/comp/core/tagger/taggerimpl/generic_store/composite_store.go
index 13215d6fea371..1a1b7306265c7 100644
--- a/comp/core/tagger/taggerimpl/generic_store/composite_store.go
+++ b/comp/core/tagger/taggerimpl/generic_store/composite_store.go
@@ -5,7 +5,9 @@
package genericstore
-import "github.com/DataDog/datadog-agent/comp/core/tagger/types"
+import (
+ "github.com/DataDog/datadog-agent/comp/core/tagger/types"
+)
type compositeObjectStore[T any] struct {
data map[types.EntityIDPrefix]map[string]T
@@ -64,10 +66,11 @@ func (os *compositeObjectStore[T]) Size() int {
}
// ListObjects implements ObjectStore#ListObjects
-func (os *compositeObjectStore[T]) ListObjects() []T {
+func (os *compositeObjectStore[T]) ListObjects(filter *types.Filter) []T {
objects := make([]T, 0, os.Size())
- for _, idToObjects := range os.data {
+ for prefix := range filter.GetPrefixes() {
+ idToObjects := os.data[prefix]
for _, object := range idToObjects {
objects = append(objects, object)
}
@@ -77,8 +80,9 @@ func (os *compositeObjectStore[T]) ListObjects() []T {
}
// ForEach implements ObjectStore#ForEach
-func (os *compositeObjectStore[T]) ForEach(apply types.ApplyFunc[T]) {
- for prefix, idToObjects := range os.data {
+func (os *compositeObjectStore[T]) ForEach(filter *types.Filter, apply types.ApplyFunc[T]) {
+ for prefix := range filter.GetPrefixes() {
+ idToObjects := os.data[prefix]
for id, object := range idToObjects {
apply(types.NewEntityID(prefix, id), object)
}
diff --git a/comp/core/tagger/taggerimpl/generic_store/default_store.go b/comp/core/tagger/taggerimpl/generic_store/default_store.go
index d0112b8c0c8eb..66e383de8a1c2 100644
--- a/comp/core/tagger/taggerimpl/generic_store/default_store.go
+++ b/comp/core/tagger/taggerimpl/generic_store/default_store.go
@@ -35,19 +35,35 @@ func (os defaulObjectStore[T]) Size() int {
}
// ListObjects implements ObjectStore#ListObjects
-func (os defaulObjectStore[T]) ListObjects() []T {
+func (os defaulObjectStore[T]) ListObjects(filter *types.Filter) []T {
objects := make([]T, 0)
- for _, object := range os {
- objects = append(objects, object)
+ if filter == nil {
+ for _, object := range os {
+ objects = append(objects, object)
+ }
+ } else {
+ for entityID, object := range os {
+ if filter.MatchesPrefix(entityID.GetPrefix()) {
+ objects = append(objects, object)
+ }
+ }
}
return objects
}
// ForEach implements ObjectStore#ForEach
-func (os defaulObjectStore[T]) ForEach(apply types.ApplyFunc[T]) {
- for id, object := range os {
- apply(id, object)
+func (os defaulObjectStore[T]) ForEach(filter *types.Filter, apply types.ApplyFunc[T]) {
+ if filter == nil {
+ for id, object := range os {
+ apply(id, object)
+ }
+ } else {
+ for id, object := range os {
+ if filter.MatchesPrefix(id.GetPrefix()) {
+ apply(id, object)
+ }
+ }
}
}
diff --git a/comp/core/tagger/taggerimpl/generic_store/store_bench_test.go b/comp/core/tagger/taggerimpl/generic_store/store_bench_test.go
index 13715c69de459..0a07b142ee1b3 100644
--- a/comp/core/tagger/taggerimpl/generic_store/store_bench_test.go
+++ b/comp/core/tagger/taggerimpl/generic_store/store_bench_test.go
@@ -14,7 +14,7 @@ import (
configmock "github.com/DataDog/datadog-agent/pkg/config/mock"
)
-const samples int = 1000000
+const samples int = 10000000
var weightedPrefixes = map[string]int{
"container_image_metadata": 60,
@@ -24,7 +24,7 @@ var weightedPrefixes = map[string]int{
"deployment": 15,
"kubernetes_metadata": 30,
"kubernetes_pod_uid": 30,
- "process": 30,
+ "process": 60,
}
// getWeightedPrefix selects a prefix based on the provided weights.
@@ -55,6 +55,19 @@ func initStore(store types.ObjectStore[int]) {
}
}
+func initFilter() *types.Filter {
+ fb := types.NewFilterBuilder()
+
+ numberOfPrefixes := rand.Intn(len(weightedPrefixes))
+
+ for range numberOfPrefixes {
+ prefix := getNextPrefix()
+ fb.Include(prefix)
+ }
+
+ return fb.Build(types.HighCardinality)
+}
+
// Mock ApplyFunc for testing purposes
func mockApplyFunc[T any](_ types.EntityID, _ T) {}
@@ -166,7 +179,10 @@ func BenchmarkDefaultObjectStore_ForEach(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
- store.ForEach(mockApplyFunc[int])
+ b.StopTimer()
+ filter := initFilter()
+ b.StartTimer()
+ store.ForEach(filter, mockApplyFunc[int])
}
}
@@ -178,11 +194,14 @@ func BenchmarkCompositeObjectStore_ForEach(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
- store.ForEach(mockApplyFunc[int])
+ b.StopTimer()
+ filter := initFilter()
+ b.StartTimer()
+ store.ForEach(filter, mockApplyFunc[int])
}
}
-func BenchmarkDefaultObjectStore_ListAll(b *testing.B) {
+func BenchmarkDefaultObjectStore_ListObjects(b *testing.B) {
cfg := configmock.New(b)
cfg.SetWithoutSource("tagger.tagstore_use_composite_entity_id", false)
store := NewObjectStore[int](cfg)
@@ -191,11 +210,14 @@ func BenchmarkDefaultObjectStore_ListAll(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
- _ = store.ListObjects()
+ b.StopTimer()
+ filter := initFilter()
+ b.StartTimer()
+ _ = store.ListObjects(filter)
}
}
-func BenchmarkCompositeObjectStore_ListAll(b *testing.B) {
+func BenchmarkCompositeObjectStore_ListObjects(b *testing.B) {
cfg := configmock.New(b)
cfg.SetWithoutSource("tagger.tagstore_use_composite_entity_id", true)
store := NewObjectStore[int](cfg)
@@ -204,6 +226,9 @@ func BenchmarkCompositeObjectStore_ListAll(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
- _ = store.ListObjects()
+ b.StopTimer()
+ filter := initFilter()
+ b.StartTimer()
+ _ = store.ListObjects(filter)
}
}
diff --git a/comp/core/tagger/taggerimpl/generic_store/store_test.go b/comp/core/tagger/taggerimpl/generic_store/store_test.go
index 0a72a87fecb2a..48a3331d181ff 100644
--- a/comp/core/tagger/taggerimpl/generic_store/store_test.go
+++ b/comp/core/tagger/taggerimpl/generic_store/store_test.go
@@ -114,20 +114,26 @@ func TestObjectStore_ListObjects(t *testing.T) {
cfg.SetWithoutSource("tagger.tagstore_use_composite_entity_id", isComposite)
store := NewObjectStore[any](cfg)
+ // build some filter
+ fb := types.NewFilterBuilder()
+ fb.Include(types.EntityIDPrefix("prefix1"), types.EntityIDPrefix("prefix2"))
+ filter := fb.Build(types.HighCardinality)
+
// list should return empty
- list := store.ListObjects()
+ list := store.ListObjects(filter)
assert.Equalf(t, len(list), 0, "ListObjects should return an empty list")
// add some items
ids := []string{"prefix1://id1", "prefix2://id2", "prefix3://id3", "prefix4://id4"}
for _, id := range ids {
entityID, _ := types.NewEntityIDFromString(id)
- store.Set(entityID, struct{}{})
+ store.Set(entityID, id)
}
// list should return empty
- list = store.ListObjects()
- assert.Equalf(t, len(list), len(ids), "ListObjects should return a list of size %d", len(ids))
+ list = store.ListObjects(filter)
+ expectedListing := []any{"prefix1://id1", "prefix2://id2"}
+ assert.ElementsMatch(t, expectedListing, list)
}
// default store
@@ -152,10 +158,16 @@ func TestObjectStore_ForEach(t *testing.T) {
}
accumulator := []string{}
- store.ForEach(func(id types.EntityID, _ any) { accumulator = append(accumulator, id.String()) })
+
+ // build some filter
+ fb := types.NewFilterBuilder()
+ fb.Include(types.EntityIDPrefix("prefix1"), types.EntityIDPrefix("prefix2"))
+ filter := fb.Build(types.HighCardinality)
+
+ store.ForEach(filter, func(id types.EntityID, _ any) { accumulator = append(accumulator, id.String()) })
// list should return empty
- assert.ElementsMatch(t, accumulator, ids)
+ assert.ElementsMatch(t, accumulator, []string{"prefix1://id1", "prefix2://id2"})
}
// default store
diff --git a/comp/core/tagger/taggerimpl/remote/tagstore.go b/comp/core/tagger/taggerimpl/remote/tagstore.go
index 5f3d12de8eac4..3ac87a5151d66 100644
--- a/comp/core/tagger/taggerimpl/remote/tagstore.go
+++ b/comp/core/tagger/taggerimpl/remote/tagstore.go
@@ -81,7 +81,7 @@ func (s *tagStore) getEntity(entityID types.EntityID) *types.Entity {
func (s *tagStore) listEntities() []*types.Entity {
s.mutex.RLock()
defer s.mutex.RUnlock()
- return s.store.ListObjects()
+ return s.store.ListObjects(types.NewMatchAllFilter())
}
func (s *tagStore) collectTelemetry() {
@@ -93,7 +93,7 @@ func (s *tagStore) collectTelemetry() {
s.mutex.Lock()
defer s.mutex.Unlock()
- s.store.ForEach(func(_ types.EntityID, e *types.Entity) { s.telemetry[string(e.ID.GetPrefix())]++ })
+ s.store.ForEach(nil, func(_ types.EntityID, e *types.Entity) { s.telemetry[string(e.ID.GetPrefix())]++ })
for prefix, storedEntities := range s.telemetry {
s.telemetryStore.StoredEntities.Set(storedEntities, remoteSource, prefix)
@@ -107,7 +107,7 @@ func (s *tagStore) subscribe(cardinality types.TagCardinality) chan []types.Enti
events := make([]types.EntityEvent, 0, s.store.Size())
- s.store.ForEach(func(_ types.EntityID, e *types.Entity) {
+ s.store.ForEach(nil, func(_ types.EntityID, e *types.Entity) {
events = append(events, types.EntityEvent{
EventType: types.EventTypeAdded,
Entity: *e,
@@ -138,7 +138,7 @@ func (s *tagStore) reset() {
events := make([]types.EntityEvent, 0, s.store.Size())
- s.store.ForEach(func(_ types.EntityID, e *types.Entity) {
+ s.store.ForEach(nil, func(_ types.EntityID, e *types.Entity) {
events = append(events, types.EntityEvent{
EventType: types.EventTypeDeleted,
Entity: types.Entity{ID: e.ID},
diff --git a/comp/core/tagger/taggerimpl/tagstore/tagstore.go b/comp/core/tagger/taggerimpl/tagstore/tagstore.go
index ddd07cb17252b..020128c06b2df 100644
--- a/comp/core/tagger/taggerimpl/tagstore/tagstore.go
+++ b/comp/core/tagger/taggerimpl/tagstore/tagstore.go
@@ -163,7 +163,7 @@ func (s *TagStore) collectTelemetry() {
s.Lock()
defer s.Unlock()
- s.store.ForEach(func(_ types.EntityID, et EntityTags) {
+ s.store.ForEach(nil, func(_ types.EntityID, et EntityTags) {
prefix := string(et.getEntityID().GetPrefix())
for _, source := range et.sources() {
@@ -192,7 +192,7 @@ func (s *TagStore) Subscribe(cardinality types.TagCardinality) chan []types.Enti
events := make([]types.EntityEvent, 0, s.store.Size())
- s.store.ForEach(func(_ types.EntityID, et EntityTags) {
+ s.store.ForEach(nil, func(_ types.EntityID, et EntityTags) {
events = append(events, types.EntityEvent{
EventType: types.EventTypeAdded,
Entity: et.toEntity(),
@@ -220,7 +220,7 @@ func (s *TagStore) Prune() {
now := s.clock.Now()
events := []types.EntityEvent{}
- s.store.ForEach(func(eid types.EntityID, et EntityTags) {
+ s.store.ForEach(nil, func(eid types.EntityID, et EntityTags) {
changed := et.deleteExpired(now)
if !changed && !et.shouldRemove() {
@@ -283,7 +283,7 @@ func (s *TagStore) List() types.TaggerListResponse {
s.RLock()
defer s.RUnlock()
- for _, et := range s.store.ListObjects() {
+ for _, et := range s.store.ListObjects(types.NewMatchAllFilter()) {
r.Entities[et.getEntityID().String()] = types.TaggerListEntity{
Tags: et.tagsBySource(),
}
diff --git a/comp/core/tagger/taggerimpl/tagstore/tagstore_test.go b/comp/core/tagger/taggerimpl/tagstore/tagstore_test.go
index 9e64b8f314e0b..eb2e21ac3ee01 100644
--- a/comp/core/tagger/taggerimpl/tagstore/tagstore_test.go
+++ b/comp/core/tagger/taggerimpl/tagstore/tagstore_test.go
@@ -6,7 +6,6 @@
package tagstore
import (
- "fmt"
"sync"
"testing"
"time"
@@ -39,7 +38,6 @@ func (s *StoreTestSuite) SetupTest() {
s.clock.Add(time.Since(time.Unix(0, 0)))
mockConfig := configmock.New(s.T())
- fmt.Println("New Checkpoint: ", mockConfig)
s.tagstore = newTagStoreWithClock(mockConfig, s.clock, telemetryStore)
}
diff --git a/comp/core/tagger/tags/tags.go b/comp/core/tagger/tags/tags.go
index 2307447fbe789..fa24321003010 100644
--- a/comp/core/tagger/tags/tags.go
+++ b/comp/core/tagger/tags/tags.go
@@ -108,6 +108,8 @@ const (
EcsContainerName = "ecs_container_name"
// EcsClusterName is the tag for the ECS cluster name
EcsClusterName = "ecs_cluster_name"
+ // EcsServiceName is the tag for the ECS service name
+ EcsServiceName = "ecs_service"
// Language is the tag for the process language
Language = "language"
diff --git a/comp/core/tagger/types/entity_id.go b/comp/core/tagger/types/entity_id.go
index 62e44eb75f2f9..e049d2825c799 100644
--- a/comp/core/tagger/types/entity_id.go
+++ b/comp/core/tagger/types/entity_id.go
@@ -129,3 +129,17 @@ const (
// Process is the prefix `process`
Process EntityIDPrefix = "process"
)
+
+// AllPrefixesSet returns a set of all possible entity id prefixes that can be used in the tagger
+func AllPrefixesSet() map[EntityIDPrefix]struct{} {
+ return map[EntityIDPrefix]struct{}{
+ ContainerID: {},
+ ContainerImageMetadata: {},
+ ECSTask: {},
+ Host: {},
+ KubernetesDeployment: {},
+ KubernetesMetadata: {},
+ KubernetesPodUID: {},
+ Process: {},
+ }
+}
diff --git a/comp/core/tagger/types/filter_builder.go b/comp/core/tagger/types/filter_builder.go
new file mode 100644
index 0000000000000..233b495d168a5
--- /dev/null
+++ b/comp/core/tagger/types/filter_builder.go
@@ -0,0 +1,82 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package types
+
+import (
+ "maps"
+)
+
+// FilterBuilder builds a tagger subscriber filter based on include/exclude rules
+type FilterBuilder struct {
+ prefixesToInclude map[EntityIDPrefix]struct{}
+
+ prefixesToExclude map[EntityIDPrefix]struct{}
+}
+
+// NewFilterBuilder returns a new empty filter builder
+func NewFilterBuilder() *FilterBuilder {
+ return &FilterBuilder{
+ prefixesToInclude: make(map[EntityIDPrefix]struct{}),
+ prefixesToExclude: make(map[EntityIDPrefix]struct{}),
+ }
+}
+
+// Include includes the specified prefixes in the filter
+func (fb *FilterBuilder) Include(prefixes ...EntityIDPrefix) *FilterBuilder {
+ if fb == nil {
+ panic("filter builder should not be nil")
+ }
+
+ for _, prefix := range prefixes {
+ fb.prefixesToInclude[prefix] = struct{}{}
+ }
+
+ return fb
+}
+
+// Exclude excludes the specified prefixes from the filter
+func (fb *FilterBuilder) Exclude(prefixes ...EntityIDPrefix) *FilterBuilder {
+ if fb == nil {
+ panic("filter builder should not be nil")
+ }
+
+ for _, prefix := range prefixes {
+ fb.prefixesToExclude[prefix] = struct{}{}
+ }
+
+ return fb
+}
+
+// Build builds a new Filter object based on the calls to Include and Exclude
+// If the builder only excludes prefixes, the created filter will match any prefix except for the excluded ones.
+// If the builder only includes prefixes, the created filter will match only the prefixes included in the builder.
+// If the builder includes prefixes and excludes prefixes, the created filter will match only prefixes that are included but a not excluded in the builder
+// If the builder has neither included nor excluded prefixes, it will match all prefixes by default
+func (fb *FilterBuilder) Build(card TagCardinality) *Filter {
+ if fb == nil {
+ panic("filter builder should not be nil")
+ }
+
+ if len(fb.prefixesToInclude)+len(fb.prefixesToExclude) == 0 {
+ return newFilter(AllPrefixesSet(), card)
+ }
+
+ var prefixSet map[EntityIDPrefix]struct{}
+
+ // initialise prefixSet with what should be included
+ if len(fb.prefixesToInclude) == 0 {
+ prefixSet = maps.Clone(AllPrefixesSet())
+ } else {
+ prefixSet = maps.Clone(fb.prefixesToInclude)
+ }
+
+ // exclude unwanted prefixes
+ for prefix := range fb.prefixesToExclude {
+ delete(prefixSet, prefix)
+ }
+
+ return newFilter(prefixSet, card)
+}
diff --git a/comp/core/tagger/types/filter_builder_test.go b/comp/core/tagger/types/filter_builder_test.go
new file mode 100644
index 0000000000000..73d5424103fae
--- /dev/null
+++ b/comp/core/tagger/types/filter_builder_test.go
@@ -0,0 +1,99 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package types
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestFilterBuilderOps(t *testing.T) {
+ tests := []struct {
+ name string
+ do func(*FilterBuilder)
+ buildCard TagCardinality
+ expectBuildFilter Filter
+ }{
+ {
+ name: "do nothing",
+ do: func(_ *FilterBuilder) {},
+ buildCard: HighCardinality,
+ expectBuildFilter: Filter{
+ prefixes: AllPrefixesSet(),
+ cardinality: HighCardinality,
+ },
+ },
+ {
+ name: "only includes",
+ do: func(fb *FilterBuilder) {
+ fb.Include(KubernetesDeployment, ContainerID)
+ fb.Include(Host)
+ },
+ buildCard: HighCardinality,
+ expectBuildFilter: Filter{
+ prefixes: map[EntityIDPrefix]struct{}{
+ KubernetesDeployment: {},
+ ContainerID: {},
+ Host: {},
+ },
+ cardinality: HighCardinality,
+ },
+ },
+ {
+ name: "only excludes",
+ do: func(fb *FilterBuilder) {
+ fb.Exclude(KubernetesDeployment, ContainerID)
+ fb.Exclude(Host)
+ },
+ buildCard: HighCardinality,
+ expectBuildFilter: Filter{
+ prefixes: map[EntityIDPrefix]struct{}{
+ ContainerImageMetadata: {},
+ ECSTask: {},
+ KubernetesMetadata: {},
+ KubernetesPodUID: {},
+ Process: {},
+ },
+ cardinality: HighCardinality,
+ },
+ },
+ {
+ name: "both includes and excludes",
+ do: func(fb *FilterBuilder) {
+ fb.Include(ContainerImageMetadata)
+ fb.Exclude(KubernetesDeployment, ContainerID)
+ fb.Include(ContainerID)
+ fb.Exclude(Host, KubernetesMetadata)
+ fb.Include(Host, Process)
+ },
+ buildCard: HighCardinality,
+ expectBuildFilter: Filter{
+ prefixes: map[EntityIDPrefix]struct{}{
+ ContainerImageMetadata: {},
+ Process: {},
+ },
+ cardinality: HighCardinality,
+ },
+ },
+ }
+
+ for _, test := range tests {
+ fb := NewFilterBuilder()
+ test.do(fb)
+ filter := fb.Build(test.buildCard)
+ assert.Truef(t, reflect.DeepEqual(*filter, test.expectBuildFilter), "expected %v, found %v", test.expectBuildFilter, filter)
+ }
+}
+
+func TestNilFilterBuilderOps(t *testing.T) {
+ var fb *FilterBuilder
+
+ assert.Panics(t, func() { fb.Include(ContainerID) })
+ assert.Panics(t, func() { fb.Exclude(ContainerID) })
+ assert.Panics(t, func() { fb.Build(HighCardinality) })
+}
diff --git a/comp/core/tagger/types/filters.go b/comp/core/tagger/types/filters.go
new file mode 100644
index 0000000000000..436b334e3bff2
--- /dev/null
+++ b/comp/core/tagger/types/filters.go
@@ -0,0 +1,60 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package types
+
+import (
+ "maps"
+)
+
+// NewMatchAllFilter returns a filter that matches any prefix
+func NewMatchAllFilter() *Filter {
+ return nil
+}
+
+// Filter represents a subscription filter for the tagger
+type Filter struct {
+ prefixes map[EntityIDPrefix]struct{}
+ cardinality TagCardinality
+}
+
+func newFilter(prefixes map[EntityIDPrefix]struct{}, cardinality TagCardinality) *Filter {
+ return &Filter{
+ prefixes: maps.Clone(prefixes),
+ cardinality: cardinality,
+ }
+}
+
+// GetPrefixes returns the prefix set of the filter
+// If the filter is nil, a set containing all possible prefixes is returned
+func (f *Filter) GetPrefixes() map[EntityIDPrefix]struct{} {
+ if f == nil {
+ return AllPrefixesSet()
+ }
+
+ return maps.Clone(f.prefixes)
+}
+
+// GetCardinality returns the filter cardinality
+// If the filter is nil, High cardinality is returned
+func (f *Filter) GetCardinality() TagCardinality {
+ if f == nil {
+ return HighCardinality
+ }
+
+ return f.cardinality
+}
+
+// MatchesPrefix returns whether or not the filter matches the prefix passed as argument
+func (f *Filter) MatchesPrefix(prefix EntityIDPrefix) bool {
+ // A nil filter should match everything
+ if f == nil {
+ return true
+ }
+
+ _, found := f.prefixes[prefix]
+
+ return found
+}
diff --git a/comp/core/tagger/types/filters_test.go b/comp/core/tagger/types/filters_test.go
new file mode 100644
index 0000000000000..97827d218a069
--- /dev/null
+++ b/comp/core/tagger/types/filters_test.go
@@ -0,0 +1,45 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package types
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestFilterOps(t *testing.T) {
+ f := Filter{
+ prefixes: map[EntityIDPrefix]struct{}{
+ KubernetesDeployment: {},
+ KubernetesPodUID: {},
+ },
+ cardinality: OrchestratorCardinality,
+ }
+
+ // assert cardinality is correct
+ cardinality := f.GetCardinality()
+ assert.Equal(t, OrchestratorCardinality, cardinality)
+
+ // assert GetPrefixes
+ expectedPrefixes := map[EntityIDPrefix]struct{}{
+ KubernetesDeployment: {},
+ KubernetesPodUID: {},
+ }
+ assert.Truef(t, reflect.DeepEqual(expectedPrefixes, f.GetPrefixes()), "expected %v, found %v", expectedPrefixes, f.GetPrefixes())
+}
+
+func TestNilFilter(t *testing.T) {
+ var f *Filter
+
+ assert.Truef(t, reflect.DeepEqual(f.GetPrefixes(), AllPrefixesSet()), "expected %v, found %v", AllPrefixesSet(), f.GetPrefixes())
+ assert.Equalf(t, HighCardinality, f.GetCardinality(), "nil filter should have cardinality HIGH, found %v", f.GetCardinality())
+
+ for prefix := range AllPrefixesSet() {
+ assert.Truef(t, f.MatchesPrefix(prefix), "nil filter should match any prefix, didn't match %v", prefix)
+ }
+}
diff --git a/comp/core/tagger/types/go.mod b/comp/core/tagger/types/go.mod
new file mode 100644
index 0000000000000..5abfecabc88c2
--- /dev/null
+++ b/comp/core/tagger/types/go.mod
@@ -0,0 +1,88 @@
+module github.com/DataDog/datadog-agent/comp/core/tagger/types
+
+go 1.22.0
+
+replace (
+ github.com/DataDog/datadog-agent/comp/api/api/def => ../../../api/api/def
+ github.com/DataDog/datadog-agent/comp/core/flare/builder => ../../flare/builder
+ github.com/DataDog/datadog-agent/comp/core/flare/types => ../../flare/types
+ github.com/DataDog/datadog-agent/comp/core/secrets => ../../secrets
+ github.com/DataDog/datadog-agent/comp/core/tagger/utils => ../utils
+ github.com/DataDog/datadog-agent/comp/core/telemetry => ../../telemetry
+ github.com/DataDog/datadog-agent/comp/def => ../../../def
+ github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../../../../pkg/collector/check/defaults
+ github.com/DataDog/datadog-agent/pkg/config/env => ../../../../pkg/config/env
+ github.com/DataDog/datadog-agent/pkg/config/model => ../../../../pkg/config/model
+ github.com/DataDog/datadog-agent/pkg/config/setup => ../../../../pkg/config/setup
+ github.com/DataDog/datadog-agent/pkg/util/executable => ../../../../pkg/util/executable
+ github.com/DataDog/datadog-agent/pkg/util/filesystem => ../../../../pkg/util/filesystem
+ github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../../pkg/util/fxutil
+ github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../../../pkg/util/hostname/validate
+ github.com/DataDog/datadog-agent/pkg/util/log => ../../../../pkg/util/log
+ github.com/DataDog/datadog-agent/pkg/util/optional => ../../../../pkg/util/optional
+ github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../../pkg/util/pointer
+ github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../../pkg/util/scrubber
+ github.com/DataDog/datadog-agent/pkg/util/system => ../../../../pkg/util/system
+ github.com/DataDog/datadog-agent/pkg/util/system/socket => ../../../../pkg/util/system/socket
+ github.com/DataDog/datadog-agent/pkg/util/tagger => ../../../../pkg/util/tagger
+ github.com/DataDog/datadog-agent/pkg/util/testutil => ../../../../pkg/util/testutil
+ github.com/DataDog/datadog-agent/pkg/util/winutil => ../../../../pkg/util/winutil
+)
+
+require (
+ github.com/DataDog/datadog-agent/comp/core/tagger/utils v0.56.2
+ github.com/DataDog/datadog-agent/pkg/util/tagger v0.56.2
+ github.com/stretchr/testify v1.9.0
+)
+
+require (
+ github.com/DataDog/datadog-agent/comp/core/secrets v0.56.2 // indirect
+ github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.2 // indirect
+ github.com/DataDog/datadog-agent/pkg/config/env v0.56.2 // indirect
+ github.com/DataDog/datadog-agent/pkg/config/model v0.56.2 // indirect
+ github.com/DataDog/datadog-agent/pkg/config/setup v0.56.2 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/executable v0.56.2 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.2 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.2 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/log v0.56.2 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/optional v0.56.2 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.2 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.2 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/system v0.56.2 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.2 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.2 // indirect
+ github.com/DataDog/viper v1.13.5 // indirect
+ github.com/Microsoft/go-winio v0.6.1 // indirect
+ github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/fsnotify/fsnotify v1.7.0 // indirect
+ github.com/go-ole/go-ole v1.2.6 // indirect
+ github.com/hashicorp/hcl v1.0.0 // indirect
+ github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 // indirect
+ github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
+ github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
+ github.com/magiconair/properties v1.8.1 // indirect
+ github.com/mitchellh/mapstructure v1.1.2 // indirect
+ github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect
+ github.com/pelletier/go-toml v1.2.0 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
+ github.com/shirou/gopsutil/v3 v3.23.12 // indirect
+ github.com/shoenig/go-m1cpu v0.1.6 // indirect
+ github.com/spf13/afero v1.1.2 // indirect
+ github.com/spf13/cast v1.5.1 // indirect
+ github.com/spf13/jwalterweatherman v1.0.0 // indirect
+ github.com/spf13/pflag v1.0.5 // indirect
+ github.com/tklauser/go-sysconf v0.3.12 // indirect
+ github.com/tklauser/numcpus v0.6.1 // indirect
+ github.com/yusufpapurcu/wmi v1.2.3 // indirect
+ go.uber.org/atomic v1.11.0 // indirect
+ golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect
+ golang.org/x/mod v0.20.0 // indirect
+ golang.org/x/sync v0.8.0 // indirect
+ golang.org/x/sys v0.24.0 // indirect
+ golang.org/x/text v0.17.0 // indirect
+ golang.org/x/tools v0.24.0 // indirect
+ gopkg.in/yaml.v2 v2.4.0 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
+)
diff --git a/comp/core/tagger/types/go.sum b/comp/core/tagger/types/go.sum
new file mode 100644
index 0000000000000..77ba213060c82
--- /dev/null
+++ b/comp/core/tagger/types/go.sum
@@ -0,0 +1,352 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64=
+github.com/DataDog/viper v1.13.5/go.mod h1:wDdUVJ2SHaMaPrCZrlRCObwkubsX8j5sme3LaR/SGTc=
+github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
+github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
+github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs=
+github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
+github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
+github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
+github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
+github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
+github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
+github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
+github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
+github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c=
+github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 h1:S4qyfL2sEm5Budr4KVMyEniCy+PbS55651I/a+Kn/NQ=
+github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA=
+github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
+github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
+github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
+github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
+github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
+github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
+github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw=
+github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
+github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
+github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
+github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
+github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
+github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
+github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
+github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
+github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
+github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
+github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY=
+github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
+github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
+github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
+github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4=
+github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM=
+github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM=
+github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
+github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU=
+github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
+github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
+github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA=
+github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48=
+github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
+github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
+github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
+github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
+github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
+github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
+github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
+github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20200122045848-3419fae592fc/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
+github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
+github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
+github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
+github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
+go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg=
+go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ=
+go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik=
+go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak=
+go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI=
+go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A=
+go.opentelemetry.io/otel/sdk/metric v1.27.0 h1:5uGNOlpXi+Hbo/DRoI31BSb1v+OGcpv2NemcCrOL8gI=
+go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw=
+go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw=
+go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4=
+go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
+go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
+go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
+go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw=
+go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
+go.uber.org/fx v1.22.2 h1:iPW+OPxv0G8w75OemJ1RAnTUrF55zOJlXlo1TbJ0Buw=
+go.uber.org/fx v1.22.2/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU=
+go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
+go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
+go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
+go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
+go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI=
+golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0=
+golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
+golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg=
+golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
+golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24=
+golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
+google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
+gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
diff --git a/comp/core/tagger/types/types.go b/comp/core/tagger/types/types.go
index e9e16653cf9aa..f39cdf375b71f 100644
--- a/comp/core/tagger/types/types.go
+++ b/comp/core/tagger/types/types.go
@@ -28,10 +28,10 @@ type ObjectStore[V any] interface {
Unset(EntityID)
// Size returns the total number of objects in the store
Size() int
- // ListObjects returns a slice containing all objects of the store
- ListObjects() []V
- // ForEach applies a given function to each object in the store
- ForEach(ApplyFunc[V])
+ // ListObjects returns a slice containing objects of the store matching the filter
+ ListObjects(*Filter) []V
+ // ForEach applies a given function to each object in the store matching the filter
+ ForEach(*Filter, ApplyFunc[V])
}
// TaggerListResponse holds the tagger list response
diff --git a/comp/forwarder/defaultforwarder/forwarder.go b/comp/forwarder/defaultforwarder/forwarder.go
index 9769bbaeb2ab0..64cc62cd636df 100644
--- a/comp/forwarder/defaultforwarder/forwarder.go
+++ b/comp/forwarder/defaultforwarder/forwarder.go
@@ -33,16 +33,23 @@ type provides struct {
}
func newForwarder(dep dependencies) provides {
+ if dep.Params.useNoopForwarder {
+ return provides{
+ Comp: NoopForwarder{},
+ }
+ }
+
options := createOptions(dep.Params, dep.Config, dep.Log)
- return NewForwarder(dep.Config, dep.Log, dep.Lc, true, options, dep.Params.useNoopForwarder)
+
+ return NewForwarder(dep.Config, dep.Log, dep.Lc, true, options)
}
func createOptions(params Params, config config.Component, log log.Component) *Options {
var options *Options
+ keysPerDomain := getMultipleEndpoints(config, log)
if !params.withResolver {
- options = NewOptions(config, log, getMultipleEndpoints(config, log))
+ options = NewOptions(config, log, keysPerDomain)
} else {
- keysPerDomain := getMultipleEndpoints(config, log)
options = NewOptionsWithResolvers(config, log, resolver.NewSingleDomainResolvers(keysPerDomain))
}
// Override the DisableAPIKeyChecking only if WithFeatures was called
@@ -66,12 +73,7 @@ func getMultipleEndpoints(config config.Component, log log.Component) map[string
// NewForwarder returns a new forwarder component.
//
//nolint:revive
-func NewForwarder(config config.Component, log log.Component, lc fx.Lifecycle, ignoreLifeCycleError bool, options *Options, useNoopForwarder bool) provides {
- if useNoopForwarder {
- return provides{
- Comp: NoopForwarder{},
- }
- }
+func NewForwarder(config config.Component, log log.Component, lc fx.Lifecycle, ignoreLifeCycleError bool, options *Options) provides {
forwarder := NewDefaultForwarder(config, log, options)
lc.Append(fx.Hook{
diff --git a/comp/networkpath/npcollector/npcollectorimpl/config.go b/comp/networkpath/npcollector/npcollectorimpl/config.go
index c752153db7747..3a91df33ad08d 100644
--- a/comp/networkpath/npcollector/npcollectorimpl/config.go
+++ b/comp/networkpath/npcollector/npcollectorimpl/config.go
@@ -14,6 +14,8 @@ import (
type collectorConfigs struct {
connectionsMonitoringEnabled bool
workers int
+ timeout time.Duration
+ maxTTL int
pathtestInputChanSize int
pathtestProcessingChanSize int
pathtestContextsLimit int
@@ -24,9 +26,12 @@ type collectorConfigs struct {
}
func newConfig(agentConfig config.Component) *collectorConfigs {
+
return &collectorConfigs{
connectionsMonitoringEnabled: agentConfig.GetBool("network_path.connections_monitoring.enabled"),
workers: agentConfig.GetInt("network_path.collector.workers"),
+ timeout: agentConfig.GetDuration("network_path.collector.timeout") * time.Millisecond,
+ maxTTL: agentConfig.GetInt("network_path.collector.max_ttl"),
pathtestInputChanSize: agentConfig.GetInt("network_path.collector.input_chan_size"),
pathtestProcessingChanSize: agentConfig.GetInt("network_path.collector.processing_chan_size"),
pathtestContextsLimit: agentConfig.GetInt("network_path.collector.pathtest_contexts_limit"),
diff --git a/comp/networkpath/npcollector/npcollectorimpl/config_test.go b/comp/networkpath/npcollector/npcollectorimpl/config_test.go
new file mode 100644
index 0000000000000..3be402965e8aa
--- /dev/null
+++ b/comp/networkpath/npcollector/npcollectorimpl/config_test.go
@@ -0,0 +1,21 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024-present Datadog, Inc.
+package npcollectorimpl
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestNetworkPathCollectorEnabled(t *testing.T) {
+ config := &collectorConfigs{
+ connectionsMonitoringEnabled: true,
+ }
+ assert.True(t, config.networkPathCollectorEnabled())
+
+ config.connectionsMonitoringEnabled = false
+ assert.False(t, config.networkPathCollectorEnabled())
+}
diff --git a/comp/networkpath/npcollector/npcollectorimpl/npcollector.go b/comp/networkpath/npcollector/npcollectorimpl/npcollector.go
index e35cdc9d05c51..b1f1e089ef8be 100644
--- a/comp/networkpath/npcollector/npcollectorimpl/npcollector.go
+++ b/comp/networkpath/npcollector/npcollectorimpl/npcollector.go
@@ -75,8 +75,10 @@ func newNoopNpCollectorImpl() *npCollectorImpl {
}
func newNpCollectorImpl(epForwarder eventplatform.Forwarder, collectorConfigs *collectorConfigs, logger log.Component, telemetrycomp telemetryComp.Component) *npCollectorImpl {
- logger.Infof("New NpCollector (workers=%d input_chan_size=%d processing_chan_size=%d pathtest_contexts_limit=%d pathtest_ttl=%s pathtest_interval=%s flush_interval=%s)",
+ logger.Infof("New NpCollector (workers=%d timeout=%d max_ttl=%d input_chan_size=%d processing_chan_size=%d pathtest_contexts_limit=%d pathtest_ttl=%s pathtest_interval=%s flush_interval=%s)",
collectorConfigs.workers,
+ collectorConfigs.timeout,
+ collectorConfigs.maxTTL,
collectorConfigs.pathtestInputChanSize,
collectorConfigs.pathtestProcessingChanSize,
collectorConfigs.pathtestContextsLimit,
@@ -118,8 +120,13 @@ func (s *npCollectorImpl) ScheduleConns(conns []*model.Connection) {
startTime := s.TimeNowFn()
for _, conn := range conns {
remoteAddr := conn.Raddr
- remotePort := uint16(conn.Raddr.GetPort())
protocol := convertProtocol(conn.GetType())
+ var remotePort uint16
+ // UDP traces should not be done to the active
+ // port
+ if protocol != payload.ProtocolUDP {
+ remotePort = uint16(conn.Raddr.GetPort())
+ }
if !shouldScheduleNetworkPathForConn(conn) {
s.logger.Tracef("Skipped connection: addr=%s, port=%d, protocol=%s", remoteAddr, remotePort, protocol)
continue
@@ -210,8 +217,8 @@ func (s *npCollectorImpl) runTracerouteForPath(ptest *pathteststore.PathtestCont
cfg := traceroute.Config{
DestHostname: ptest.Pathtest.Hostname,
DestPort: ptest.Pathtest.Port,
- MaxTTL: 0, // TODO: make it configurable, setting 0 to use default value for now
- TimeoutMs: 0, // TODO: make it configurable, setting 0 to use default value for now
+ MaxTTL: uint8(s.collectorConfigs.maxTTL),
+ Timeout: s.collectorConfigs.timeout,
Protocol: ptest.Pathtest.Protocol,
}
diff --git a/comp/networkpath/npcollector/npcollectorimpl/npcollector_test.go b/comp/networkpath/npcollector/npcollectorimpl/npcollector_test.go
index 9fe8542ab59d2..770e90ed5e188 100644
--- a/comp/networkpath/npcollector/npcollectorimpl/npcollector_test.go
+++ b/comp/networkpath/npcollector/npcollectorimpl/npcollector_test.go
@@ -372,7 +372,7 @@ func Test_npCollectorImpl_ScheduleConns(t *testing.T) {
},
},
expectedPathtests: []*common.Pathtest{
- {Hostname: "10.0.0.6", Port: uint16(161), Protocol: payload.ProtocolUDP, SourceContainerID: "testId1"},
+ {Hostname: "10.0.0.6", Port: uint16(0), Protocol: payload.ProtocolUDP, SourceContainerID: "testId1"},
},
},
{
diff --git a/comp/otelcol/collector-contrib/impl/components.go b/comp/otelcol/collector-contrib/impl/components.go
index 28ac885df9fa7..9f27554af7d37 100644
--- a/comp/otelcol/collector-contrib/impl/components.go
+++ b/comp/otelcol/collector-contrib/impl/components.go
@@ -38,7 +38,6 @@ import (
"go.opentelemetry.io/collector/connector"
"go.opentelemetry.io/collector/exporter"
debugexporter "go.opentelemetry.io/collector/exporter/debugexporter"
- loggingexporter "go.opentelemetry.io/collector/exporter/loggingexporter"
nopexporter "go.opentelemetry.io/collector/exporter/nopexporter"
otlpexporter "go.opentelemetry.io/collector/exporter/otlpexporter"
otlphttpexporter "go.opentelemetry.io/collector/exporter/otlphttpexporter"
@@ -88,7 +87,6 @@ func components() (otelcol.Factories, error) {
factories.Exporters, err = exporter.MakeFactoryMap(
debugexporter.NewFactory(),
- loggingexporter.NewFactory(),
nopexporter.NewFactory(),
otlpexporter.NewFactory(),
otlphttpexporter.NewFactory(),
diff --git a/comp/otelcol/collector-contrib/impl/go.mod b/comp/otelcol/collector-contrib/impl/go.mod
index 96fb8ef3d6823..6342ac0e8bca0 100644
--- a/comp/otelcol/collector-contrib/impl/go.mod
+++ b/comp/otelcol/collector-contrib/impl/go.mod
@@ -38,7 +38,6 @@ require (
go.opentelemetry.io/collector/connector v0.104.0
go.opentelemetry.io/collector/exporter v0.104.0
go.opentelemetry.io/collector/exporter/debugexporter v0.104.0
- go.opentelemetry.io/collector/exporter/loggingexporter v0.104.0
go.opentelemetry.io/collector/exporter/nopexporter v0.104.0
go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0
go.opentelemetry.io/collector/exporter/otlphttpexporter v0.104.0
diff --git a/comp/otelcol/collector-contrib/impl/go.sum b/comp/otelcol/collector-contrib/impl/go.sum
index b4814e7aa6f0c..9910d38ca9f68 100644
--- a/comp/otelcol/collector-contrib/impl/go.sum
+++ b/comp/otelcol/collector-contrib/impl/go.sum
@@ -928,8 +928,6 @@ go.opentelemetry.io/collector/exporter v0.104.0 h1:C2HmnfBa05IQ2T+p9T7K7gXVxjrBL
go.opentelemetry.io/collector/exporter v0.104.0/go.mod h1:Rx0oB0E4Ccg1JuAnEWwhtrq1ygRBkfx4mco1DpR3WaQ=
go.opentelemetry.io/collector/exporter/debugexporter v0.104.0 h1:1Z63H/xxv6IzMP7GPmI6v/lQAqZwYZCVC0rWYcYOomw=
go.opentelemetry.io/collector/exporter/debugexporter v0.104.0/go.mod h1:NHVzTM0Z/bomgR7SAe3ysx4CZzh2UJ3TXWSCnaOB1Wo=
-go.opentelemetry.io/collector/exporter/loggingexporter v0.104.0 h1:MaBTuHmK/HAQ+/rLTrGf3tazKum8Sic3/CaXgNr5xnc=
-go.opentelemetry.io/collector/exporter/loggingexporter v0.104.0/go.mod h1:sXZhACvds6z71cf2fzKrojMgdJItJZxeClKlF/PI/l8=
go.opentelemetry.io/collector/exporter/nopexporter v0.104.0 h1:33JeCQiJbvhSXFqQ34R4ole/wD4iHtF5LYp2GziYVnY=
go.opentelemetry.io/collector/exporter/nopexporter v0.104.0/go.mod h1:73afhI8uc5NKAl9pMJlgQQ46Ck9e7nQ2zZGXHHSzuwo=
go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0 h1:EFOdhnc2yGhqou0Tud1HsM7fsgWo/H3tdQhYYytDprQ=
diff --git a/comp/otelcol/collector-contrib/impl/manifest.yaml b/comp/otelcol/collector-contrib/impl/manifest.yaml
index 092ea7cb47a3c..69f4208a1de98 100644
--- a/comp/otelcol/collector-contrib/impl/manifest.yaml
+++ b/comp/otelcol/collector-contrib/impl/manifest.yaml
@@ -18,7 +18,6 @@ extensions:
exporters:
- gomod: go.opentelemetry.io/collector/exporter/debugexporter v0.104.0
- - gomod: go.opentelemetry.io/collector/exporter/loggingexporter v0.104.0
- gomod: go.opentelemetry.io/collector/exporter/nopexporter v0.104.0
- gomod: go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0
- gomod: go.opentelemetry.io/collector/exporter/otlphttpexporter v0.104.0
diff --git a/comp/otelcol/collector/impl-pipeline/flare_filler.go b/comp/otelcol/collector/impl-pipeline/flare_filler.go
index b574a81cb5af1..109429fb4bb9d 100644
--- a/comp/otelcol/collector/impl-pipeline/flare_filler.go
+++ b/comp/otelcol/collector/impl-pipeline/flare_filler.go
@@ -20,8 +20,6 @@ import (
"strings"
"time"
- "github.com/gocolly/colly/v2"
-
flaretypes "github.com/DataDog/datadog-agent/comp/core/flare/types"
extension "github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/def"
"github.com/DataDog/datadog-agent/pkg/util/log"
@@ -106,48 +104,11 @@ func (c *collectorImpl) fillFlare(fb flaretypes.FlareBuilder) error {
fb.AddFile(fmt.Sprintf("otel/otel-flare/%s.dat", name), data)
}
- if !src.Crawl {
- continue
- }
-
- // crawl the url by following any hyperlinks
- col := colly.NewCollector()
- col.OnHTML("a", func(e *colly.HTMLElement) {
- // visit all links
- link := e.Attr("href")
- if err := e.Request.Visit(e.Request.AbsoluteURL(link)); err != nil {
- filename := strings.ReplaceAll(url.PathEscape(link), ":", "_")
- fb.AddFile(fmt.Sprintf("otel/otel-flare/crawl-%s.err", filename), []byte(err.Error()))
- }
- })
- col.OnResponse(func(r *colly.Response) {
- // the root sources (from the extension.Response) were already fetched earlier
- // don't re-fetch them
- responseURL := r.Request.URL.String()
- if contains(sourceURLs, responseURL) {
- return
- }
- // use the url as the basis for the filename saved in the flare
- filename := strings.ReplaceAll(url.PathEscape(responseURL), ":", "_")
- fb.AddFile(fmt.Sprintf("otel/otel-flare/crawl-%s", filename), r.Body)
- })
- if err := col.Visit(sourceURL); err != nil {
- fb.AddFile("otel/otel-flare/crawl.err", []byte(err.Error()))
- }
}
}
return nil
}
-func contains(s []string, e string) bool {
- for _, a := range s {
- if a == e {
- return true
- }
- }
- return false
-}
-
func toJSON(it interface{}) string {
data, err := json.Marshal(it)
if err != nil {
diff --git a/comp/otelcol/collector/impl-pipeline/flare_filler_test.go b/comp/otelcol/collector/impl-pipeline/flare_filler_test.go
index fe23d57bbb773..ed98632b0f171 100644
--- a/comp/otelcol/collector/impl-pipeline/flare_filler_test.go
+++ b/comp/otelcol/collector/impl-pipeline/flare_filler_test.go
@@ -11,7 +11,6 @@ package collectorimpl
import (
"bytes"
- "fmt"
"io"
"net/http"
"net/http/httptest"
@@ -41,8 +40,7 @@ func createFakeOTelExtensionHTTPServer() (string, func()) {
io.WriteString(w, "data-source-2")
return
} else if r.URL.Path == "/three" {
- pageTmpl := `Another source is here`
- io.WriteString(w, fmt.Sprintf(pageTmpl, testServerURL))
+ io.WriteString(w, "data-source-3")
return
} else if r.URL.Path == "/four" {
io.WriteString(w, "data-source-4")
@@ -93,27 +91,23 @@ func TestOTelExtFlareBuilder(t *testing.T) {
"prometheus": {
"url": [
"{{.url}}/one"
- ],
- "crawl": false
+ ]
},
"health_check": {
"url": [
"{{.url}}/two"
- ],
- "crawl": false
+ ]
},
"zpages": {
"url": [
"{{.url}}/three"
- ],
- "crawl": true
+ ]
},
"pprof": {
"url": [
"{{.url}}/four",
"{{.url}}/five/six"
- ],
- "crawl": false
+ ]
}
},
"environment": {{.environment}}
@@ -150,12 +144,9 @@ func TestOTelExtFlareBuilder(t *testing.T) {
f.AssertFileExists("otel", "otel-response.json")
- // Template for the crawable page
- pageTmpl := `Another source is here`
-
f.AssertFileContent("data-source-1", "otel/otel-flare/prometheus_one.dat")
f.AssertFileContent("data-source-2", "otel/otel-flare/health_check_two.dat")
- f.AssertFileContent(fmt.Sprintf(pageTmpl, localServerURL), "otel/otel-flare/zpages_three.dat")
+ f.AssertFileContent("data-source-3", "otel/otel-flare/zpages_three.dat")
f.AssertFileContent("data-source-4", "otel/otel-flare/pprof_four.dat")
f.AssertFileContent("data-source-5-6", "otel/otel-flare/pprof_five_six.dat")
diff --git a/comp/otelcol/ddflareextension/README.md b/comp/otelcol/ddflareextension/README.md
new file mode 100644
index 0000000000000..79a9677e4207e
--- /dev/null
+++ b/comp/otelcol/ddflareextension/README.md
@@ -0,0 +1,64 @@
+# ddflare Extension
+
+The ddflare extension component allows inclusion of otel-agent data in the datadog-agent [flare](https://docs.datadoghq.com/agent/troubleshooting/send_a_flare/?tab=agent). A flare can be triggered by the core agent process, or remote-config.
+
+The ddflare extension also provides the relevant metadata for otel-agent configuration and inventory tracking in Fleet Automation. This metadata is periodically collected by the core-agent which then submits that data to the backend.
+
+
+
+## Extension Configuration
+
+The datadogextension will be added automatically by the [converter component](../converter/README.md). If you opted out of the converter, or you want to change the defaults, you are able to configure the extension as so:
+
+*Collector config:*
+```
+extensions:
+ datadog:
+ port: 7777
+```
+
+*Agent Config:*
+```
+otel-agent:
+ enabled: true
+ flare_port: 7777
+```
+
+The port is the location in which the otel-agent will expose the data required to build the flare. The core agent then fetches the data from this port.
+
+## Data collected by flare
+
+### Configurations
+
+The flare will collect both the provided collector config and the enhanced config (enhanced via [converter](../converter/README.md)).
+
+The provided collector configs can be found in `otel/otel-flare/customer.cfg` and the enhanced config can be found in `otel/otel-flare/customer.cfg`.
+
+### Environment variables
+
+The flare will collect all environment variables, and these can be found in `otel/otel-flare/environment.json`.
+
+### Extension data
+
+The flare also adds data collected from extensions. These extensions are added automatically by the [converter component](../converter/README.md). The data collected is from extensions:
+- health_check: Found in `otel/otel-flare/health_check`.
+
+Will contain a JSON of the latest health check, for example:
+
+```
+{"status":"Server available","upSince":"2024-08-14T14:54:00.575804+02:00","uptime":"28.470434291s"}
+```
+- pprof: Found in `otel/otel-flare/pprof`
+- zpages: Found in `otel/otel-flare/zpages`
+
+### Logs
+
+The flare will collect the otel-agent logs which can be found in `logs/otel-agent.log`.
+
+### Raw data
+
+The raw response can be found in `otel-response.json`. This corresponds to the data that is exposed at the datadog extensions port.
+
+## Data collected for inventory
+
+The ddflare extension submits a variety of metadata for fleet automation including version, command, configuration. You can find more information about the Inventory Agent Payload in [comp/metadata/inventoryotel/README.md](../../metadata/inventoryotel/README.md).
diff --git a/comp/otelcol/ddflareextension/def/types.go b/comp/otelcol/ddflareextension/def/types.go
index f25901754fe85..f7002d816225d 100644
--- a/comp/otelcol/ddflareextension/def/types.go
+++ b/comp/otelcol/ddflareextension/def/types.go
@@ -24,8 +24,7 @@ type ConfigResponse struct {
// OTelFlareSource is the response struct for flare debug sources
type OTelFlareSource struct {
- URLs []string `json:"url"`
- Crawl bool `json:"crawl"`
+ URLs []string `json:"url"`
}
// DebugSourceResponse is the response struct for a map of OTelFlareSource
diff --git a/comp/otelcol/ddflareextension/image/ddflareextensiondiagram.png b/comp/otelcol/ddflareextension/image/ddflareextensiondiagram.png
new file mode 100644
index 0000000000000..02894b083267b
Binary files /dev/null and b/comp/otelcol/ddflareextension/image/ddflareextensiondiagram.png differ
diff --git a/comp/otelcol/ddflareextension/impl/config.go b/comp/otelcol/ddflareextension/impl/config.go
index 456559ff3eb09..e5c7aa8a66aba 100644
--- a/comp/otelcol/ddflareextension/impl/config.go
+++ b/comp/otelcol/ddflareextension/impl/config.go
@@ -16,15 +16,14 @@ import (
"go.opentelemetry.io/collector/confmap"
)
-type extractDebugEndpoint func(conf *confmap.Conf) (string, bool, error)
+type extractDebugEndpoint func(conf *confmap.Conf) (string, error)
var (
errHTTPEndpointRequired = errors.New("http endpoint required")
supportedDebugExtensions = map[string]extractDebugEndpoint{
"health_check": healthExtractEndpoint,
- // disabled zpages from flare until solution to display data.
- // "zpages": zPagesExtractEndpoint,
- "pprof": pprofExtractEndpoint,
+ "zpages": zPagesExtractEndpoint,
+ "pprof": pprofExtractEndpoint,
}
)
@@ -58,20 +57,19 @@ func (c *Config) Unmarshal(conf *confmap.Conf) error {
return nil
}
-// todo: uncomment once zpages data is re-added to flare
-// func zPagesExtractEndpoint(c *confmap.Conf) (string, bool, error) {
-// endpoint, err := regularStringEndpointExtractor(c)
-// return endpoint, true, err
-// }
+func zPagesExtractEndpoint(c *confmap.Conf) (string, error) {
+ endpoint, err := regularStringEndpointExtractor(c)
+ return endpoint, err
+}
-func pprofExtractEndpoint(c *confmap.Conf) (string, bool, error) {
+func pprofExtractEndpoint(c *confmap.Conf) (string, error) {
endpoint, err := regularStringEndpointExtractor(c)
- return endpoint, false, err
+ return endpoint, err
}
-func healthExtractEndpoint(c *confmap.Conf) (string, bool, error) {
+func healthExtractEndpoint(c *confmap.Conf) (string, error) {
endpoint, err := regularStringEndpointExtractor(c)
- return endpoint, false, err
+ return endpoint, err
}
func regularStringEndpointExtractor(c *confmap.Conf) (string, error) {
diff --git a/comp/otelcol/ddflareextension/impl/config_test.go b/comp/otelcol/ddflareextension/impl/config_test.go
index 1c1aa99056858..b3c15eab8a784 100644
--- a/comp/otelcol/ddflareextension/impl/config_test.go
+++ b/comp/otelcol/ddflareextension/impl/config_test.go
@@ -73,21 +73,15 @@ func TestExtractors(t *testing.T) {
myConfMap := confmap.NewFromStringMap(m)
- for extension, extractor := range supportedDebugExtensions {
- expectedCrawl := false
- if extension == "zpages" {
- expectedCrawl = true
- }
-
- uri, crawl, err := extractor(myConfMap)
+ for _, extractor := range supportedDebugExtensions {
+ uri, err := extractor(myConfMap)
assert.NoError(t, err)
- assert.Equal(t, expectedCrawl, crawl)
assert.Equal(t, endpoint, uri)
}
myConfMap = confmap.New()
for _, extractor := range supportedDebugExtensions {
- _, _, err := extractor(myConfMap)
+ _, err := extractor(myConfMap)
assert.Error(t, err)
}
diff --git a/comp/otelcol/ddflareextension/impl/extension.go b/comp/otelcol/ddflareextension/impl/extension.go
index e1cc0b2b9e743..fca2149b9544d 100644
--- a/comp/otelcol/ddflareextension/impl/extension.go
+++ b/comp/otelcol/ddflareextension/impl/extension.go
@@ -87,23 +87,36 @@ func (ext *ddExtension) Start(_ context.Context, host component.Host) error {
continue
}
- uri, crawl, err := extractor(exconf)
+ uri, err := extractor(exconf)
var uris []string
- if extension.Type().String() == "pprof" {
- uris = []string{uri + "/debug/pprof/heap", uri + "/debug/pprof/allocs", uri + "/debug/pprof/profile"}
- } else {
+ switch extension.Type().String() {
+ case "pprof":
+ uris = []string{
+ uri + "/debug/pprof/heap",
+ uri + "/debug/pprof/allocs",
+ uri + "/debug/pprof/profile",
+ }
+ case "zpages":
+ uris = []string{
+ uri + "/debug/servicez",
+ uri + "/debug/pipelinez",
+ uri + "/debug/extensionz",
+ uri + "/debug/featurez",
+ uri + "/debug/tracez",
+ }
+ default:
uris = []string{uri}
}
if err != nil {
ext.telemetry.Logger.Info("Unavailable debug extension for", zap.String("extension", extension.String()))
- } else {
- ext.telemetry.Logger.Info("Found debug extension at", zap.String("uri", uri))
- ext.debug.Sources[extension.String()] = extensionDef.OTelFlareSource{
- URLs: uris,
- Crawl: crawl,
- }
+ continue
+ }
+
+ ext.telemetry.Logger.Info("Found debug extension at", zap.String("uri", uri))
+ ext.debug.Sources[extension.String()] = extensionDef.OTelFlareSource{
+ URLs: uris,
}
}
diff --git a/comp/otelcol/otlp/collector.go b/comp/otelcol/otlp/collector.go
index e3978764031e5..96787d7a53196 100644
--- a/comp/otelcol/otlp/collector.go
+++ b/comp/otelcol/otlp/collector.go
@@ -12,9 +12,10 @@ import (
"fmt"
"go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/collector/config/configtelemetry"
"go.opentelemetry.io/collector/confmap"
"go.opentelemetry.io/collector/exporter"
- "go.opentelemetry.io/collector/exporter/loggingexporter"
+ "go.opentelemetry.io/collector/exporter/debugexporter"
"go.opentelemetry.io/collector/exporter/otlpexporter"
"go.opentelemetry.io/collector/extension"
"go.opentelemetry.io/collector/otelcol"
@@ -106,7 +107,7 @@ func getComponents(s serializer.MetricSerializer, logsAgentChannel chan *message
exporterFactories := []exporter.Factory{
otlpexporter.NewFactory(),
serializerexporter.NewFactory(s, &tagEnricher{cardinality: types.LowCardinality}, hostname.Get, nil, nil),
- loggingexporter.NewFactory(),
+ debugexporter.NewFactory(),
}
if logsAgentChannel != nil {
@@ -163,30 +164,20 @@ type PipelineConfig struct {
Metrics map[string]interface{}
}
-// valid values for debug log level.
-var debugLogLevelMap = map[string]struct{}{
- "disabled": {},
- "debug": {},
- "info": {},
- "warn": {},
- "error": {},
-}
-
// shouldSetLoggingSection returns whether debug logging is enabled.
-// If an invalid loglevel value is set, it assumes debug logging is disabled.
-// If the special 'disabled' value is set, it returns false.
-// Otherwise it returns true and lets the Collector handle the rest.
+// Debug logging is enabled when verbosity is set to a valid value except for "none", or left unset.
func (p *PipelineConfig) shouldSetLoggingSection() bool {
- // Legacy behavior: keep it so that we support `loglevel: disabled`.
- if v, ok := p.Debug["loglevel"]; ok {
- if s, ok := v.(string); ok {
- _, ok := debugLogLevelMap[s]
- return ok && s != "disabled"
- }
+ v, ok := p.Debug["verbosity"]
+ if !ok {
+ return true
}
-
- // If the legacy behavior does not apply, we always want to set the logging section.
- return true
+ s, ok := v.(string)
+ if !ok {
+ return false
+ }
+ var level configtelemetry.Level
+ err := level.UnmarshalText([]byte(s))
+ return err == nil && s != "none"
}
// Pipeline is an OTLP pipeline.
diff --git a/comp/otelcol/otlp/components/exporter/datadogexporter/go.mod b/comp/otelcol/otlp/components/exporter/datadogexporter/go.mod
index 8857c8dcff0c4..5f48e6da16c46 100644
--- a/comp/otelcol/otlp/components/exporter/datadogexporter/go.mod
+++ b/comp/otelcol/otlp/components/exporter/datadogexporter/go.mod
@@ -185,7 +185,7 @@ require (
github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect
github.com/DataDog/datadog-api-client-go/v2 v2.26.0 // indirect
github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect
- github.com/DataDog/go-sqllexer v0.0.13 // indirect
+ github.com/DataDog/go-sqllexer v0.0.14 // indirect
github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect
github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 // indirect
github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.16.1 // indirect
diff --git a/comp/otelcol/otlp/components/exporter/datadogexporter/go.sum b/comp/otelcol/otlp/components/exporter/datadogexporter/go.sum
index e60a093490cb1..cdf2e4f5f136d 100644
--- a/comp/otelcol/otlp/components/exporter/datadogexporter/go.sum
+++ b/comp/otelcol/otlp/components/exporter/datadogexporter/go.sum
@@ -8,8 +8,8 @@ github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI
github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw=
github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 h1:RoH7VLzTnxHEugRPIgnGlxwDFszFGI7b3WZZUtWuPRM=
github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42/go.mod h1:TX7CTOQ3LbQjfAi4SwqUoR5gY1zfUk7VRBDTuArjaDc=
-github.com/DataDog/go-sqllexer v0.0.13 h1:9mKfe+3s73GI/7dWBxi2Ds7+xZynJqMKK9cIUBrutak=
-github.com/DataDog/go-sqllexer v0.0.13/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc=
+github.com/DataDog/go-sqllexer v0.0.14 h1:xUQh2tLr/95LGxDzLmttLgTo/1gzFeOyuwrQa/Iig4Q=
+github.com/DataDog/go-sqllexer v0.0.14/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc=
github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4=
github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0=
github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 h1:EbzDX8HPk5uE2FsJYxD74QmMw0/3CqSKhEr6teh0ncQ=
diff --git a/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter.go b/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter.go
index 7bf6c54154f23..04f8eaeae152b 100644
--- a/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter.go
+++ b/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter.go
@@ -94,6 +94,9 @@ func (e *Exporter) ConsumeLogs(ctx context.Context, ld plog.Logs) (err error) {
// ingestionTs is an internal field used for latency tracking on the status page, not the actual log timestamp.
ingestionTs := time.Now().UnixNano()
message := message.NewMessage(content, origin, status, ingestionTs)
+ if ddLog.Hostname != nil {
+ message.Hostname = *ddLog.Hostname
+ }
e.logsAgentChannel <- message
}
diff --git a/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter_test.go b/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter_test.go
index 3944c650f7828..839b5a294c1d2 100644
--- a/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter_test.go
+++ b/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter_test.go
@@ -73,6 +73,7 @@ func TestLogsExporter(t *testing.T) {
ldd := lrr.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0)
ldd.Attributes().PutStr("message", "hello")
ldd.Attributes().PutStr("datadog.log.source", "custom_source")
+ ldd.Attributes().PutStr("host.name", "test-host")
return lrr
}(),
otelSource: otelSource,
@@ -95,6 +96,8 @@ func TestLogsExporter(t *testing.T) {
"otel.trace_id": traceIDToHexOrEmptyString(ld.TraceID()),
"otel.timestamp": fmt.Sprintf("%d", testutil.TestLogTime.UnixNano()),
"resource-attr": "resource-attr-val-1",
+ "host.name": "test-host",
+ "hostname": "test-host",
},
},
expectedTags: [][]string{{"otel_source:datadog_agent"}},
diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/README.md b/comp/otelcol/otlp/components/processor/infraattributesprocessor/README.md
new file mode 100644
index 0000000000000..3c2859b510b64
--- /dev/null
+++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/README.md
@@ -0,0 +1,196 @@
+# Infra Attributes Processor
+
+The infra attributes processor extracts [Kubernetes tags](https://docs.datadoghq.com/containers/kubernetes/tag/?tab=datadogoperator#out-of-the-box-tags) based on labels or annotations and assigns these tags as resource attributes on traces, metrics, and logs.
+
+When telemetry is exported from the otel-agent, these infra attributes will be converted into Datadog tags and used as metadata in [Container Monitoring](https://docs.datadoghq.com/containers/).
+
+## Configuration
+
+The infra attributes processor will be added automatically by the [converter component](../../../../converter/README.md). If you opted out of the converter, or you want to change the defaults, you are able to configure the processor as so:
+```
+processors:
+ infraattributes:
+ cardinality: 0
+```
+
+The infra attributes processor also needs to be included in the pipelines in order to take effect:
+```
+service:
+ pipelines:
+ traces:
+ receivers: [otlp]
+ processors: [infraattributes]
+ exporters: [datadog/connector, datadog]
+ metrics:
+ receivers: [otlp, datadog/connector]
+ processors: [infraattributes]
+ exporters: [datadog]
+ logs:
+ receivers: [otlp]
+ processors: [infraattributes]
+ exporters: [datadog]
+```
+
+### Cardinality
+The cardinality option sets the [TagCardinality](../../../../../../comp/core/tagger/README.md#tagcardinality) in the Datadog Agent tagger component. Possible values for this option include:
+* `cardinality: 0` - **LowCardinality**: in the host count order of magnitude *(default)*
+* `cardinality: 1` - **OrchestratorCardinality**: tags that change value for each pod or task
+* `cardinality: 2` - **HighCardinality**: typically tags that change value for each web request, user agent, container, etc.
+
+## Expected Attributes
+
+The infra attributes processor [looks up the following resource attributes](https://github.com/DataDog/datadog-agent/blob/7d51e9e0dc9fb52aab468b372a5724eece97538c/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics.go#L42-L77) in order to extract Kubernetes Tags. These resource attributes can be set in your SDK or in your otel-agent collector configuration:
+
+| *[Entity](../../../../../../comp/core/tagger/README.md#entity-ids)* | *Resource Attributes* |
+|----------------------------------------------------------------------|---------------------------------------------|
+| workloadmeta.KindContainer | `container.id` |
+| workloadmeta.KindContainerImageMetadata | `container.image.id` |
+| workloadmeta.KindECSTask | `aws.ecs.task.arn` |
+| workloadmeta.KindKubernetesDeployment | `k8s.deployment.name`, `k8s.namespace.name` |
+| workloadmeta.KindKubernetesMetadata | `k8s.namespace.name`, `k8s.node.name` |
+| workloadmeta.KindKubernetesPod | `k8s.pod.uid` |
+| workloadmeta.KindProcess | `process.pid` |
+
+### SDK Configuration
+
+The expected resource attributes can be set by using the `OTEL_RESOURCE_ATTRIBUTES` environment variable. For example, this can be set in your Kubernetes deployment yaml:
+```
+env:
+ ...
+ - name: OTEL_SERVICE_NAME
+ value: {{ include "calendar.fullname" . }}
+ - name: OTEL_K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.namespace
+ - name: OTEL_K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: spec.nodeName
+ - name: OTEL_K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.name
+ - name: OTEL_K8S_POD_ID
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.uid
+ - name: OTEL_RESOURCE_ATTRIBUTES
+ value: >-
+ service.name=$(OTEL_SERVICE_NAME),
+ k8s.namespace.name=$(OTEL_K8S_NAMESPACE),
+ k8s.node.name=$(OTEL_K8S_NODE_NAME),
+ k8s.pod.name=$(OTEL_K8S_POD_NAME),
+ k8s.pod.uid=$(OTEL_K8S_POD_ID),
+ k8s.container.name={{ .Chart.Name }},
+ host.name=$(OTEL_K8S_NODE_NAME),
+ deployment.environment=$(OTEL_K8S_NAMESPACE)
+```
+
+If you are using OTel SDK auto-instrumentation, `container.id` and `process.pid` will be automatically set by your SDK.
+
+### Collector Configuration
+
+The expected resource attributes can be set by configuring the [Kubernetes attributes processor and resource detection processor](https://docs.datadoghq.com/opentelemetry/collector_exporter/hostname_tagging/?tab=kubernetesdaemonset). This is demonstrated in the [k8s-values.yaml](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/k8s-chart/k8s-values.yaml) example:
+```
+mode: daemonset
+presets:
+ kubernetesAttributes:
+ enabled: true
+extraEnvs:
+ - name: POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: OTEL_RESOURCE_ATTRIBUTES
+ value: "k8s.pod.ip=$(POD_IP)"
+config:
+ processors:
+ k8sattributes:
+ passthrough: false
+ auth_type: "serviceAccount"
+ pod_association:
+ - sources:
+ - from: resource_attribute
+ name: k8s.pod.ip
+ extract:
+ metadata:
+ - k8s.pod.name
+ - k8s.pod.uid
+ - k8s.deployment.name
+ - k8s.node.name
+ - k8s.namespace.name
+ - k8s.pod.start_time
+ - k8s.replicaset.name
+ - k8s.replicaset.uid
+ - k8s.daemonset.name
+ - k8s.daemonset.uid
+ - k8s.job.name
+ - k8s.job.uid
+ - k8s.cronjob.name
+ - k8s.statefulset.name
+ - k8s.statefulset.uid
+ - container.image.name
+ - container.image.tag
+ - container.id
+ - k8s.container.name
+ - container.image.name
+ - container.image.tag
+ - container.id
+ labels:
+ - tag_name: kube_app_name
+ key: app.kubernetes.io/name
+ from: pod
+ - tag_name: kube_app_instance
+ key: app.kubernetes.io/instance
+ from: pod
+ - tag_name: kube_app_version
+ key: app.kubernetes.io/version
+ from: pod
+ - tag_name: kube_app_component
+ key: app.kubernetes.io/component
+ from: pod
+ - tag_name: kube_app_part_of
+ key: app.kubernetes.io/part-of
+ from: pod
+ - tag_name: kube_app_managed_by
+ key: app.kubernetes.io/managed-by
+ from: pod
+ resourcedetection:
+ detectors: [env, eks, ec2, system]
+ timeout: 2s
+ override: false
+ batch:
+ send_batch_max_size: 1000
+ send_batch_size: 100
+ timeout: 10s
+ exporters:
+ datadog:
+ api:
+ site: ${env:DD_SITE}
+ key: ${env:DD_API_KEY}
+ traces:
+ trace_buffer: 500
+ service:
+ pipelines:
+ metrics:
+ receivers: [otlp]
+ processors: [batch, resourcedetection, k8sattributes]
+ exporters: [datadog]
+ traces:
+ receivers: [otlp]
+ processors: [batch, resourcedetection, k8sattributes]
+ exporters: [datadog]
+ logs:
+ receivers: [otlp]
+ processors: [batch, resourcedetection, k8sattributes]
+ exporters: [datadog]
+```
+
+## List of Kubernetes Tags
+
+For the full list of Kubernetes Tags added by the infra attributes processor, see [comp/core/tagger/tags/tags.go](../../../../../../comp/core/tagger/tags/tags.go).
diff --git a/comp/otelcol/otlp/components/statsprocessor/go.mod b/comp/otelcol/otlp/components/statsprocessor/go.mod
index 50b1bae9469db..b21dae11b5ade 100644
--- a/comp/otelcol/otlp/components/statsprocessor/go.mod
+++ b/comp/otelcol/otlp/components/statsprocessor/go.mod
@@ -38,7 +38,7 @@ require (
github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 // indirect
github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect
github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect
- github.com/DataDog/go-sqllexer v0.0.13 // indirect
+ github.com/DataDog/go-sqllexer v0.0.14 // indirect
github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect
github.com/DataDog/sketches-go v1.4.2 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
diff --git a/comp/otelcol/otlp/components/statsprocessor/go.sum b/comp/otelcol/otlp/components/statsprocessor/go.sum
index dcf72c47c645c..a928af42bb225 100644
--- a/comp/otelcol/otlp/components/statsprocessor/go.sum
+++ b/comp/otelcol/otlp/components/statsprocessor/go.sum
@@ -1,7 +1,7 @@
github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU=
github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw=
-github.com/DataDog/go-sqllexer v0.0.13 h1:9mKfe+3s73GI/7dWBxi2Ds7+xZynJqMKK9cIUBrutak=
-github.com/DataDog/go-sqllexer v0.0.13/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc=
+github.com/DataDog/go-sqllexer v0.0.14 h1:xUQh2tLr/95LGxDzLmttLgTo/1gzFeOyuwrQa/Iig4Q=
+github.com/DataDog/go-sqllexer v0.0.14/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc=
github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4=
github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0=
github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.16.1 h1:ZI8u3CgdMXpDplrf9/gIr13+/g/tUzUcBMk2ZhXgzLE=
diff --git a/comp/otelcol/otlp/config_test.go b/comp/otelcol/otlp/config_test.go
index 593c27e495cfc..faa2223c8c661 100644
--- a/comp/otelcol/otlp/config_test.go
+++ b/comp/otelcol/otlp/config_test.go
@@ -304,7 +304,7 @@ func TestFromEnvironmentVariables(t *testing.T) {
name: "only gRPC, disabled logging",
env: map[string]string{
"DD_OTLP_CONFIG_RECEIVER_PROTOCOLS_GRPC_ENDPOINT": "0.0.0.0:9999",
- "DD_OTLP_CONFIG_DEBUG_LOGLEVEL": "disabled",
+ "DD_OTLP_CONFIG_DEBUG_VERBOSITY": "none",
},
cfg: PipelineConfig{
OTLPReceiverConfig: map[string]interface{}{
@@ -325,7 +325,7 @@ func TestFromEnvironmentVariables(t *testing.T) {
"apm_stats_receiver_addr": "http://localhost:8126/v0.6/stats",
},
Debug: map[string]interface{}{
- "loglevel": "disabled",
+ "verbosity": "none",
},
},
},
@@ -475,7 +475,7 @@ func TestFromAgentConfigMetrics(t *testing.T) {
"tags": "tag1:value1,tag2:value2",
},
Debug: map[string]interface{}{
- "loglevel": "debug",
+ "verbosity": "detailed",
},
},
},
@@ -520,7 +520,7 @@ func TestFromAgentConfigDebug(t *testing.T) {
},
},
{
- path: "debug/loglevel_debug.yaml",
+ path: "debug/verbosity_detailed.yaml",
shouldSet: true,
cfg: PipelineConfig{
OTLPReceiverConfig: map[string]interface{}{},
@@ -529,7 +529,7 @@ func TestFromAgentConfigDebug(t *testing.T) {
MetricsEnabled: true,
TracesEnabled: true,
LogsEnabled: false,
- Debug: map[string]interface{}{"loglevel": "debug"},
+ Debug: map[string]interface{}{"verbosity": "detailed"},
Metrics: map[string]interface{}{
"enabled": true,
"tag_cardinality": "low",
@@ -538,7 +538,7 @@ func TestFromAgentConfigDebug(t *testing.T) {
},
},
{
- path: "debug/loglevel_disabled.yaml",
+ path: "debug/verbosity_none.yaml",
shouldSet: false,
cfg: PipelineConfig{
OTLPReceiverConfig: map[string]interface{}{},
@@ -547,7 +547,7 @@ func TestFromAgentConfigDebug(t *testing.T) {
MetricsEnabled: true,
TracesEnabled: true,
LogsEnabled: false,
- Debug: map[string]interface{}{"loglevel": "disabled"},
+ Debug: map[string]interface{}{"verbosity": "none"},
Metrics: map[string]interface{}{
"enabled": true,
"tag_cardinality": "low",
diff --git a/comp/otelcol/otlp/map_provider.go b/comp/otelcol/otlp/map_provider.go
index ed443ff15ca56..0c65bcc0fa99d 100644
--- a/comp/otelcol/otlp/map_provider.go
+++ b/comp/otelcol/otlp/map_provider.go
@@ -94,31 +94,31 @@ func buildMap(cfg PipelineConfig) (*confmap.Conf, error) {
if cfg.shouldSetLoggingSection() {
m := map[string]interface{}{
"exporters": map[string]interface{}{
- "logging": cfg.Debug,
+ "debug": cfg.Debug,
},
}
if cfg.MetricsEnabled {
key := buildKey("service", "pipelines", "metrics", "exporters")
if v, ok := retMap.Get(key).([]interface{}); ok {
- m[key] = append(v, "logging")
+ m[key] = append(v, "debug")
} else {
- m[key] = []interface{}{"logging"}
+ m[key] = []interface{}{"debug"}
}
}
if cfg.TracesEnabled {
key := buildKey("service", "pipelines", "traces", "exporters")
if v, ok := retMap.Get(key).([]interface{}); ok {
- m[key] = append(v, "logging")
+ m[key] = append(v, "debug")
} else {
- m[key] = []interface{}{"logging"}
+ m[key] = []interface{}{"debug"}
}
}
if cfg.LogsEnabled {
key := buildKey("service", "pipelines", "logs", "exporters")
if v, ok := retMap.Get(key).([]interface{}); ok {
- m[key] = append(v, "logging")
+ m[key] = append(v, "debug")
} else {
- m[key] = []interface{}{"logging"}
+ m[key] = []interface{}{"debug"}
}
}
errs = append(errs, retMap.Merge(confmap.NewFromStringMap(m)))
diff --git a/comp/otelcol/otlp/map_provider_not_serverless_test.go b/comp/otelcol/otlp/map_provider_not_serverless_test.go
index 9b9624e405f06..4ee1a08a38efe 100644
--- a/comp/otelcol/otlp/map_provider_not_serverless_test.go
+++ b/comp/otelcol/otlp/map_provider_not_serverless_test.go
@@ -37,7 +37,7 @@ func TestNewMap(t *testing.T) {
TracePort: 5003,
TracesEnabled: true,
Debug: map[string]interface{}{
- "loglevel": "disabled",
+ "verbosity": "none",
},
},
ocfg: map[string]interface{}{
@@ -91,7 +91,7 @@ func TestNewMap(t *testing.T) {
},
},
Debug: map[string]interface{}{
- "loglevel": "disabled",
+ "verbosity": "none",
},
},
ocfg: map[string]interface{}{
@@ -150,7 +150,7 @@ func TestNewMap(t *testing.T) {
},
},
{
- name: "only HTTP, metrics and traces, invalid loglevel(ignored)",
+ name: "only HTTP, metrics and traces, invalid verbosity (ignored)",
pcfg: PipelineConfig{
OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 0, 1234),
TracePort: 5003,
@@ -167,7 +167,7 @@ func TestNewMap(t *testing.T) {
},
},
Debug: map[string]interface{}{
- "loglevel": "foo",
+ "verbosity": "foo",
},
},
ocfg: map[string]interface{}{
@@ -232,7 +232,7 @@ func TestNewMap(t *testing.T) {
TracePort: 5003,
TracesEnabled: true,
Debug: map[string]interface{}{
- "loglevel": "disabled",
+ "verbosity": "none",
},
},
ocfg: map[string]interface{}{
@@ -288,7 +288,7 @@ func TestNewMap(t *testing.T) {
},
},
Debug: map[string]interface{}{
- "loglevel": "disabled",
+ "verbosity": "none",
},
},
ocfg: map[string]interface{}{
@@ -333,13 +333,13 @@ func TestNewMap(t *testing.T) {
},
},
{
- name: "only gRPC, only Traces, logging info",
+ name: "only gRPC, only Traces, logging with normal verbosity",
pcfg: PipelineConfig{
OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 1234, 0),
TracePort: 5003,
TracesEnabled: true,
Debug: map[string]interface{}{
- "loglevel": "info",
+ "verbosity": "normal",
},
},
ocfg: map[string]interface{}{
@@ -363,8 +363,8 @@ func TestNewMap(t *testing.T) {
"enabled": false,
},
},
- "logging": map[string]interface{}{
- "loglevel": "info",
+ "debug": map[string]interface{}{
+ "verbosity": "normal",
},
},
"service": map[string]interface{}{
@@ -372,14 +372,14 @@ func TestNewMap(t *testing.T) {
"pipelines": map[string]interface{}{
"traces": map[string]interface{}{
"receivers": []interface{}{"otlp"},
- "exporters": []interface{}{"otlp", "logging"},
+ "exporters": []interface{}{"otlp", "debug"},
},
},
},
},
},
{
- name: "only HTTP, only metrics, logging debug",
+ name: "only HTTP, only metrics, logging with detailed verbosity",
pcfg: PipelineConfig{
OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 0, 1234),
TracePort: 5003,
@@ -394,7 +394,7 @@ func TestNewMap(t *testing.T) {
},
},
Debug: map[string]interface{}{
- "loglevel": "debug",
+ "verbosity": "detailed",
},
},
ocfg: map[string]interface{}{
@@ -424,8 +424,8 @@ func TestNewMap(t *testing.T) {
},
},
},
- "logging": map[string]interface{}{
- "loglevel": "debug",
+ "debug": map[string]interface{}{
+ "verbosity": "detailed",
},
},
"service": map[string]interface{}{
@@ -434,14 +434,14 @@ func TestNewMap(t *testing.T) {
"metrics": map[string]interface{}{
"receivers": []interface{}{"otlp"},
"processors": []interface{}{"batch"},
- "exporters": []interface{}{"serializer", "logging"},
+ "exporters": []interface{}{"serializer", "debug"},
},
},
},
},
},
{
- name: "only HTTP, metrics and traces, logging warn",
+ name: "only HTTP, metrics and traces, logging with basic verbosity",
pcfg: PipelineConfig{
OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 0, 1234),
TracePort: 5003,
@@ -457,7 +457,7 @@ func TestNewMap(t *testing.T) {
},
},
Debug: map[string]interface{}{
- "loglevel": "warn",
+ "verbosity": "basic",
},
},
ocfg: map[string]interface{}{
@@ -497,8 +497,8 @@ func TestNewMap(t *testing.T) {
},
},
},
- "logging": map[string]interface{}{
- "loglevel": "warn",
+ "debug": map[string]interface{}{
+ "verbosity": "basic",
},
},
"service": map[string]interface{}{
@@ -506,12 +506,12 @@ func TestNewMap(t *testing.T) {
"pipelines": map[string]interface{}{
"traces": map[string]interface{}{
"receivers": []interface{}{"otlp"},
- "exporters": []interface{}{"otlp", "logging"},
+ "exporters": []interface{}{"otlp", "debug"},
},
"metrics": map[string]interface{}{
"receivers": []interface{}{"otlp"},
"processors": []interface{}{"batch"},
- "exporters": []interface{}{"serializer", "logging"},
+ "exporters": []interface{}{"serializer", "debug"},
},
},
},
@@ -525,7 +525,7 @@ func TestNewMap(t *testing.T) {
TracesEnabled: true,
LogsEnabled: true,
Debug: map[string]interface{}{
- "loglevel": "disabled",
+ "verbosity": "none",
},
},
ocfg: map[string]interface{}{
@@ -592,7 +592,7 @@ func TestNewMap(t *testing.T) {
},
},
Debug: map[string]interface{}{
- "loglevel": "disabled",
+ "verbosity": "none",
},
},
ocfg: map[string]interface{}{
@@ -658,7 +658,7 @@ func TestNewMap(t *testing.T) {
},
},
{
- name: "only HTTP; metrics, logs and traces; invalid loglevel(ignored)",
+ name: "only HTTP; metrics, logs and traces; invalid verbosity (ignored)",
pcfg: PipelineConfig{
OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 0, 1234),
TracePort: 5003,
@@ -676,7 +676,7 @@ func TestNewMap(t *testing.T) {
},
},
Debug: map[string]interface{}{
- "loglevel": "foo",
+ "verbosity": "foo",
},
},
ocfg: map[string]interface{}{
@@ -749,7 +749,7 @@ func TestNewMap(t *testing.T) {
TracesEnabled: true,
LogsEnabled: true,
Debug: map[string]interface{}{
- "loglevel": "disabled",
+ "verbosity": "none",
},
},
ocfg: map[string]interface{}{
@@ -818,7 +818,7 @@ func TestNewMap(t *testing.T) {
},
},
Debug: map[string]interface{}{
- "loglevel": "disabled",
+ "verbosity": "none",
},
},
ocfg: map[string]interface{}{
@@ -870,14 +870,14 @@ func TestNewMap(t *testing.T) {
},
},
{
- name: "only gRPC, traces and logs, logging info",
+ name: "only gRPC, traces and logs, logging with normal verbosity",
pcfg: PipelineConfig{
OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 1234, 0),
TracePort: 5003,
TracesEnabled: true,
LogsEnabled: true,
Debug: map[string]interface{}{
- "loglevel": "info",
+ "verbosity": "normal",
},
},
ocfg: map[string]interface{}{
@@ -907,8 +907,8 @@ func TestNewMap(t *testing.T) {
"enabled": false,
},
},
- "logging": map[string]interface{}{
- "loglevel": "info",
+ "debug": map[string]interface{}{
+ "verbosity": "normal",
},
"logsagent": interface{}(nil),
},
@@ -917,19 +917,19 @@ func TestNewMap(t *testing.T) {
"pipelines": map[string]interface{}{
"traces": map[string]interface{}{
"receivers": []interface{}{"otlp"},
- "exporters": []interface{}{"otlp", "logging"},
+ "exporters": []interface{}{"otlp", "debug"},
},
"logs": map[string]interface{}{
"receivers": []interface{}{"otlp"},
"processors": []interface{}{"infraattributes", "batch"},
- "exporters": []interface{}{"logsagent", "logging"},
+ "exporters": []interface{}{"logsagent", "debug"},
},
},
},
},
},
{
- name: "only HTTP, metrics and logs, logging debug",
+ name: "only HTTP, metrics and logs, logging with detailed verbosity",
pcfg: PipelineConfig{
OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 0, 1234),
TracePort: 5003,
@@ -945,7 +945,7 @@ func TestNewMap(t *testing.T) {
},
},
Debug: map[string]interface{}{
- "loglevel": "debug",
+ "verbosity": "detailed",
},
},
ocfg: map[string]interface{}{
@@ -976,8 +976,8 @@ func TestNewMap(t *testing.T) {
},
},
},
- "logging": map[string]interface{}{
- "loglevel": "debug",
+ "debug": map[string]interface{}{
+ "verbosity": "detailed",
},
"logsagent": interface{}(nil),
},
@@ -987,19 +987,19 @@ func TestNewMap(t *testing.T) {
"metrics": map[string]interface{}{
"receivers": []interface{}{"otlp"},
"processors": []interface{}{"batch"},
- "exporters": []interface{}{"serializer", "logging"},
+ "exporters": []interface{}{"serializer", "debug"},
},
"logs": map[string]interface{}{
"receivers": []interface{}{"otlp"},
"processors": []interface{}{"infraattributes", "batch"},
- "exporters": []interface{}{"logsagent", "logging"},
+ "exporters": []interface{}{"logsagent", "debug"},
},
},
},
},
},
{
- name: "only HTTP; metrics, traces, and logs; logging warn",
+ name: "only HTTP; metrics, traces, and logs; logging with basic verbosity",
pcfg: PipelineConfig{
OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 0, 1234),
TracePort: 5003,
@@ -1016,7 +1016,7 @@ func TestNewMap(t *testing.T) {
},
},
Debug: map[string]interface{}{
- "loglevel": "warn",
+ "verbosity": "basic",
},
},
ocfg: map[string]interface{}{
@@ -1057,8 +1057,8 @@ func TestNewMap(t *testing.T) {
},
},
},
- "logging": map[string]interface{}{
- "loglevel": "warn",
+ "debug": map[string]interface{}{
+ "verbosity": "basic",
},
"logsagent": interface{}(nil),
},
@@ -1067,17 +1067,17 @@ func TestNewMap(t *testing.T) {
"pipelines": map[string]interface{}{
"traces": map[string]interface{}{
"receivers": []interface{}{"otlp"},
- "exporters": []interface{}{"otlp", "logging"},
+ "exporters": []interface{}{"otlp", "debug"},
},
"metrics": map[string]interface{}{
"receivers": []interface{}{"otlp"},
"processors": []interface{}{"batch"},
- "exporters": []interface{}{"serializer", "logging"},
+ "exporters": []interface{}{"serializer", "debug"},
},
"logs": map[string]interface{}{
"receivers": []interface{}{"otlp"},
"processors": []interface{}{"infraattributes", "batch"},
- "exporters": []interface{}{"logsagent", "logging"},
+ "exporters": []interface{}{"logsagent", "debug"},
},
},
},
diff --git a/comp/otelcol/otlp/map_provider_serverless_test.go b/comp/otelcol/otlp/map_provider_serverless_test.go
index 7e1721cd33883..99f58c78a1660 100644
--- a/comp/otelcol/otlp/map_provider_serverless_test.go
+++ b/comp/otelcol/otlp/map_provider_serverless_test.go
@@ -31,7 +31,7 @@ func TestNewMap(t *testing.T) {
TracePort: 5003,
TracesEnabled: true,
Debug: map[string]interface{}{
- "loglevel": "disabled",
+ "verbosity": "none",
},
},
ocfg: map[string]interface{}{
@@ -85,7 +85,7 @@ func TestNewMap(t *testing.T) {
},
},
Debug: map[string]interface{}{
- "loglevel": "disabled",
+ "verbosity": "none",
},
},
ocfg: map[string]interface{}{
@@ -138,7 +138,7 @@ func TestNewMap(t *testing.T) {
},
},
{
- name: "only HTTP, metrics and traces, invalid loglevel(ignored)",
+ name: "only HTTP, metrics and traces, invalid verbosity (ignored)",
pcfg: PipelineConfig{
OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 0, 1234),
TracePort: 5003,
@@ -155,7 +155,7 @@ func TestNewMap(t *testing.T) {
},
},
Debug: map[string]interface{}{
- "loglevel": "foo",
+ "verbosity": "foo",
},
},
ocfg: map[string]interface{}{
@@ -214,7 +214,7 @@ func TestNewMap(t *testing.T) {
TracePort: 5003,
TracesEnabled: true,
Debug: map[string]interface{}{
- "loglevel": "disabled",
+ "verbosity": "none",
},
},
ocfg: map[string]interface{}{
@@ -270,7 +270,7 @@ func TestNewMap(t *testing.T) {
},
},
Debug: map[string]interface{}{
- "loglevel": "disabled",
+ "verbosity": "none",
},
},
ocfg: map[string]interface{}{
@@ -309,13 +309,13 @@ func TestNewMap(t *testing.T) {
},
},
{
- name: "only gRPC, only Traces, logging info",
+ name: "only gRPC, only Traces, logging with normal verbosity",
pcfg: PipelineConfig{
OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 1234, 0),
TracePort: 5003,
TracesEnabled: true,
Debug: map[string]interface{}{
- "loglevel": "info",
+ "verbosity": "normal",
},
},
ocfg: map[string]interface{}{
@@ -339,8 +339,8 @@ func TestNewMap(t *testing.T) {
"enabled": false,
},
},
- "logging": map[string]interface{}{
- "loglevel": "info",
+ "debug": map[string]interface{}{
+ "verbosity": "normal",
},
},
"service": map[string]interface{}{
@@ -348,14 +348,14 @@ func TestNewMap(t *testing.T) {
"pipelines": map[string]interface{}{
"traces": map[string]interface{}{
"receivers": []interface{}{"otlp"},
- "exporters": []interface{}{"otlp", "logging"},
+ "exporters": []interface{}{"otlp", "debug"},
},
},
},
},
},
{
- name: "only HTTP, only metrics, logging debug",
+ name: "only HTTP, only metrics, logging with detailed verbosity",
pcfg: PipelineConfig{
OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 0, 1234),
TracePort: 5003,
@@ -370,7 +370,7 @@ func TestNewMap(t *testing.T) {
},
},
Debug: map[string]interface{}{
- "loglevel": "debug",
+ "verbosity": "detailed",
},
},
ocfg: map[string]interface{}{
@@ -395,8 +395,8 @@ func TestNewMap(t *testing.T) {
},
},
},
- "logging": map[string]interface{}{
- "loglevel": "debug",
+ "debug": map[string]interface{}{
+ "verbosity": "detailed",
},
},
"service": map[string]interface{}{
@@ -404,14 +404,14 @@ func TestNewMap(t *testing.T) {
"pipelines": map[string]interface{}{
"metrics": map[string]interface{}{
"receivers": []interface{}{"otlp"},
- "exporters": []interface{}{"serializer", "logging"},
+ "exporters": []interface{}{"serializer", "debug"},
},
},
},
},
},
{
- name: "only HTTP, metrics and traces, logging warn",
+ name: "only HTTP, metrics and traces, logging with basic verbosity",
pcfg: PipelineConfig{
OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 0, 1234),
TracePort: 5003,
@@ -427,7 +427,7 @@ func TestNewMap(t *testing.T) {
},
},
Debug: map[string]interface{}{
- "loglevel": "warn",
+ "verbosity": "basic",
},
},
ocfg: map[string]interface{}{
@@ -462,8 +462,8 @@ func TestNewMap(t *testing.T) {
},
},
},
- "logging": map[string]interface{}{
- "loglevel": "warn",
+ "debug": map[string]interface{}{
+ "verbosity": "basic",
},
},
"service": map[string]interface{}{
@@ -471,11 +471,11 @@ func TestNewMap(t *testing.T) {
"pipelines": map[string]interface{}{
"traces": map[string]interface{}{
"receivers": []interface{}{"otlp"},
- "exporters": []interface{}{"otlp", "logging"},
+ "exporters": []interface{}{"otlp", "debug"},
},
"metrics": map[string]interface{}{
"receivers": []interface{}{"otlp"},
- "exporters": []interface{}{"serializer", "logging"},
+ "exporters": []interface{}{"serializer", "debug"},
},
},
},
diff --git a/comp/otelcol/otlp/testdata/debug/loglevel_disabled.yaml b/comp/otelcol/otlp/testdata/debug/loglevel_disabled.yaml
deleted file mode 100644
index 92576a13e2c0a..0000000000000
--- a/comp/otelcol/otlp/testdata/debug/loglevel_disabled.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-otlp_config:
- debug:
- loglevel: disabled
diff --git a/comp/otelcol/otlp/testdata/debug/verbosity_detailed.yaml b/comp/otelcol/otlp/testdata/debug/verbosity_detailed.yaml
new file mode 100644
index 0000000000000..ff66512c59706
--- /dev/null
+++ b/comp/otelcol/otlp/testdata/debug/verbosity_detailed.yaml
@@ -0,0 +1,3 @@
+otlp_config:
+ debug:
+ verbosity: detailed
diff --git a/comp/otelcol/otlp/testdata/debug/loglevel_debug.yaml b/comp/otelcol/otlp/testdata/debug/verbosity_none.yaml
similarity index 52%
rename from comp/otelcol/otlp/testdata/debug/loglevel_debug.yaml
rename to comp/otelcol/otlp/testdata/debug/verbosity_none.yaml
index 48df64859c790..bd281f2225170 100644
--- a/comp/otelcol/otlp/testdata/debug/loglevel_debug.yaml
+++ b/comp/otelcol/otlp/testdata/debug/verbosity_none.yaml
@@ -1,3 +1,3 @@
otlp_config:
debug:
- loglevel: debug
+ verbosity: none
diff --git a/comp/otelcol/otlp/testdata/metrics/allconfig.yaml b/comp/otelcol/otlp/testdata/metrics/allconfig.yaml
index 9982e9731a587..3d7b7849259ce 100644
--- a/comp/otelcol/otlp/testdata/metrics/allconfig.yaml
+++ b/comp/otelcol/otlp/testdata/metrics/allconfig.yaml
@@ -17,4 +17,4 @@ otlp_config:
send_count_sum_metrics: true
send_aggregation_metrics: true
debug:
- loglevel: debug
+ verbosity: detailed
diff --git a/comp/process/forwarders/forwardersimpl/forwarders.go b/comp/process/forwarders/forwardersimpl/forwarders.go
index 2e81a8b290d8f..97c22f21f1480 100644
--- a/comp/process/forwarders/forwardersimpl/forwarders.go
+++ b/comp/process/forwarders/forwardersimpl/forwarders.go
@@ -73,7 +73,7 @@ func newForwarders(deps dependencies) (forwarders.Component, error) {
}
func createForwarder(deps dependencies, options *defaultforwarder.Options) defaultforwarder.Component {
- return defaultforwarder.NewForwarder(deps.Config, deps.Logger, deps.Lc, false, options, false).Comp
+ return defaultforwarder.NewForwarder(deps.Config, deps.Logger, deps.Lc, false, options).Comp
}
func createParams(config config.Component, log log.Component, queueBytes int, endpoints []apicfg.Endpoint) *defaultforwarder.Options {
diff --git a/comp/remote-config/rcclient/rcclientimpl/rcclient.go b/comp/remote-config/rcclient/rcclientimpl/rcclient.go
index 693c7d42e1971..bbe779df219cd 100644
--- a/comp/remote-config/rcclient/rcclientimpl/rcclient.go
+++ b/comp/remote-config/rcclient/rcclientimpl/rcclient.go
@@ -269,8 +269,8 @@ func (rc rcClient) agentConfigUpdateCallback(updates map[string]state.RawConfig,
// - we want to change (once again) the log level through RC
// - we want to fall back to the log level we had saved as fallback (in that case mergedConfig.LogLevel == "")
if len(mergedConfig.LogLevel) == 0 {
- pkglog.Infof("Removing remote-config log level override, falling back to '%s'", config.Datadog().Get("log_level"))
config.Datadog().UnsetForSource("log_level", model.SourceRC)
+ pkglog.Infof("Removing remote-config log level override, falling back to '%s'", config.Datadog().Get("log_level"))
} else {
newLevel := mergedConfig.LogLevel
pkglog.Infof("Changing log level to '%s' through remote config", newLevel)
diff --git a/comp/snmptraps/server/serverimpl/server.go b/comp/snmptraps/server/serverimpl/server.go
index 269d771b2ee45..6575452402ef4 100644
--- a/comp/snmptraps/server/serverimpl/server.go
+++ b/comp/snmptraps/server/serverimpl/server.go
@@ -29,6 +29,7 @@ import (
"github.com/DataDog/datadog-agent/comp/snmptraps/status"
"github.com/DataDog/datadog-agent/comp/snmptraps/status/statusimpl"
"github.com/DataDog/datadog-agent/pkg/util/fxutil"
+ "github.com/DataDog/datadog-agent/pkg/util/fxutil/logging"
)
// Module defines the fx options for this component.
@@ -95,7 +96,7 @@ func newServer(lc fx.Lifecycle, deps dependencies) provides {
// careful never to double-instantiate anything. Do not use this solution
// elsewhere if possible.
app := fx.New(
- fxutil.FxLoggingOption(),
+ logging.FxLoggingOption(),
fx.Supply(injections{
Conf: deps.Conf,
HNService: deps.HNService,
diff --git a/go.mod b/go.mod
index ded7d7e482a48..a024f6d9ea5a8 100644
--- a/go.mod
+++ b/go.mod
@@ -37,6 +37,7 @@ replace (
github.com/DataDog/datadog-agent/comp/core/secrets => ./comp/core/secrets
github.com/DataDog/datadog-agent/comp/core/status => ./comp/core/status
github.com/DataDog/datadog-agent/comp/core/status/statusimpl => ./comp/core/status/statusimpl
+ github.com/DataDog/datadog-agent/comp/core/tagger/types => ./comp/core/tagger/types
github.com/DataDog/datadog-agent/comp/core/tagger/utils => ./comp/core/tagger/utils
github.com/DataDog/datadog-agent/comp/core/telemetry => ./comp/core/telemetry/
github.com/DataDog/datadog-agent/comp/def => ./comp/def/
@@ -129,6 +130,7 @@ replace (
github.com/DataDog/datadog-agent/pkg/util/statstracker => ./pkg/util/statstracker
github.com/DataDog/datadog-agent/pkg/util/system => ./pkg/util/system
github.com/DataDog/datadog-agent/pkg/util/system/socket => ./pkg/util/system/socket/
+ github.com/DataDog/datadog-agent/pkg/util/tagger => ./pkg/util/tagger
github.com/DataDog/datadog-agent/pkg/util/testutil => ./pkg/util/testutil
github.com/DataDog/datadog-agent/pkg/util/uuid => ./pkg/util/uuid
github.com/DataDog/datadog-agent/pkg/util/winutil => ./pkg/util/winutil/
@@ -147,9 +149,9 @@ require (
github.com/DataDog/datadog-agent/pkg/security/secl v0.56.0-rc.3
github.com/DataDog/datadog-agent/pkg/trace v0.56.0-rc.3
github.com/DataDog/datadog-agent/pkg/util/cgroups v0.56.0-rc.3
- github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3
- github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3
- github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3
+ github.com/DataDog/datadog-agent/pkg/util/log v0.56.2
+ github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.2
+ github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.2
github.com/DataDog/datadog-go/v5 v5.5.0
github.com/DataDog/datadog-operator v1.8.0-rc.1
github.com/DataDog/ebpf-manager v0.7.1
@@ -267,7 +269,7 @@ require (
github.com/streadway/amqp v1.1.0
github.com/stretchr/testify v1.9.0
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
- github.com/tinylib/msgp v1.2.0
+ github.com/tinylib/msgp v1.2.1
github.com/twmb/murmur3 v1.1.8
github.com/uptrace/bun v1.2.1
github.com/uptrace/bun/dialect/pgdialect v1.2.1
@@ -285,7 +287,7 @@ require (
go.opentelemetry.io/collector/component v0.104.0
go.opentelemetry.io/collector/confmap v0.104.0
go.opentelemetry.io/collector/exporter v0.104.0
- go.opentelemetry.io/collector/exporter/loggingexporter v0.104.0
+ go.opentelemetry.io/collector/exporter/debugexporter v0.104.0
go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0
go.opentelemetry.io/collector/pdata v1.11.0
go.opentelemetry.io/collector/processor/batchprocessor v0.104.0
@@ -299,17 +301,17 @@ require (
go.uber.org/multierr v1.11.0
go.uber.org/zap v1.27.0
go4.org/netipx v0.0.0-20220812043211-3cc044ffd68d
- golang.org/x/arch v0.9.0
+ golang.org/x/arch v0.10.0
golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa
- golang.org/x/net v0.28.0
+ golang.org/x/net v0.29.0
golang.org/x/sync v0.8.0
- golang.org/x/sys v0.24.0
- golang.org/x/text v0.17.0
+ golang.org/x/sys v0.25.0
+ golang.org/x/text v0.18.0
golang.org/x/time v0.6.0
- golang.org/x/tools v0.24.0
+ golang.org/x/tools v0.25.0
golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9
google.golang.org/genproto v0.0.0-20240617180043-68d350f18fd4 // indirect
- google.golang.org/grpc v1.66.0
+ google.golang.org/grpc v1.66.1
google.golang.org/grpc/examples v0.0.0-20221020162917-9127159caf5a
google.golang.org/protobuf v1.34.2
gopkg.in/DataDog/dd-trace-go.v1 v1.67.0
@@ -566,10 +568,10 @@ require (
go.opentelemetry.io/otel/sdk/metric v1.27.0 // indirect
go.opentelemetry.io/otel/trace v1.28.0
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
- golang.org/x/crypto v0.26.0 // indirect
- golang.org/x/mod v0.20.0
+ golang.org/x/crypto v0.27.0 // indirect
+ golang.org/x/mod v0.21.0
golang.org/x/oauth2 v0.21.0 // indirect
- golang.org/x/term v0.23.0 // indirect
+ golang.org/x/term v0.24.0 // indirect
gonum.org/v1/gonum v0.15.0 // indirect
google.golang.org/api v0.185.0 // indirect
google.golang.org/appengine v1.6.8 // indirect
@@ -594,19 +596,21 @@ require (
)
require (
- github.com/DataDog/datadog-agent/comp/core/tagger/utils v0.0.0-00010101000000-000000000000
+ github.com/DataDog/datadog-agent/comp/core/tagger/utils v0.56.2
github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/def v0.56.0-rc.3
github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/impl v0.0.0-00010101000000-000000000000
github.com/containerd/containerd/api v1.7.19
github.com/containerd/errdefs v0.1.0
github.com/distribution/reference v0.6.0
+ github.com/jellydator/ttlcache/v3 v3.3.0
github.com/kouhin/envflag v0.0.0-20150818174321-0e9a86061649
github.com/lorenzosaino/go-sysctl v0.3.1
+ go.opentelemetry.io/collector/config/configtelemetry v0.104.0
)
require (
github.com/DATA-DOG/go-sqlmock v1.5.2
- github.com/DataDog/agent-payload/v5 v5.0.130
+ github.com/DataDog/agent-payload/v5 v5.0.132
github.com/DataDog/datadog-agent/cmd/agent/common/path v0.56.0-rc.3
github.com/DataDog/datadog-agent/comp/api/api/def v0.56.0-rc.3
github.com/DataDog/datadog-agent/comp/core/config v0.56.0-rc.3
@@ -616,9 +620,10 @@ require (
github.com/DataDog/datadog-agent/comp/core/log/impl v0.0.0-00010101000000-000000000000
github.com/DataDog/datadog-agent/comp/core/log/impl-trace v0.0.0-00010101000000-000000000000
github.com/DataDog/datadog-agent/comp/core/log/mock v0.0.0-00010101000000-000000000000
- github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3
+ github.com/DataDog/datadog-agent/comp/core/secrets v0.56.2
github.com/DataDog/datadog-agent/comp/core/status v0.56.0-rc.3
github.com/DataDog/datadog-agent/comp/core/status/statusimpl v0.56.0-rc.3
+ github.com/DataDog/datadog-agent/comp/core/tagger/types v0.56.2
github.com/DataDog/datadog-agent/comp/core/telemetry v0.56.0-rc.3
github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3
github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder v0.56.0-rc.3
@@ -645,12 +650,12 @@ require (
github.com/DataDog/datadog-agent/comp/trace/compression/impl-zstd v0.56.0-rc.3
github.com/DataDog/datadog-agent/pkg/aggregator/ckey v0.56.0-rc.3
github.com/DataDog/datadog-agent/pkg/api v0.56.0-rc.3
- github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3
- github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3
+ github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.2
+ github.com/DataDog/datadog-agent/pkg/config/env v0.56.2
github.com/DataDog/datadog-agent/pkg/config/mock v0.58.0-devel
- github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3
+ github.com/DataDog/datadog-agent/pkg/config/model v0.56.2
github.com/DataDog/datadog-agent/pkg/config/remote v0.56.0-rc.3
- github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3
+ github.com/DataDog/datadog-agent/pkg/config/setup v0.56.2
github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3
github.com/DataDog/datadog-agent/pkg/errors v0.56.0-rc.3
github.com/DataDog/datadog-agent/pkg/logs/auditor v0.56.0-rc.3
@@ -680,25 +685,26 @@ require (
github.com/DataDog/datadog-agent/pkg/util/cache v0.56.0-rc.3
github.com/DataDog/datadog-agent/pkg/util/common v0.56.0-rc.3
github.com/DataDog/datadog-agent/pkg/util/containers/image v0.56.2
- github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3
- github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3
+ github.com/DataDog/datadog-agent/pkg/util/executable v0.56.2
+ github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.2
github.com/DataDog/datadog-agent/pkg/util/flavor v0.56.0-rc.3
github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3
github.com/DataDog/datadog-agent/pkg/util/grpc v0.56.0-rc.3
- github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3
+ github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.2
github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3
github.com/DataDog/datadog-agent/pkg/util/json v0.56.0-rc.3
github.com/DataDog/datadog-agent/pkg/util/log/setup v1.0.0
- github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3
+ github.com/DataDog/datadog-agent/pkg/util/optional v0.56.2
github.com/DataDog/datadog-agent/pkg/util/sort v0.56.0-rc.3
github.com/DataDog/datadog-agent/pkg/util/startstop v0.56.0-rc.3
- github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3
+ github.com/DataDog/datadog-agent/pkg/util/system v0.56.2
+ github.com/DataDog/datadog-agent/pkg/util/tagger v0.56.2 // indirect
github.com/DataDog/datadog-agent/pkg/util/testutil v0.56.0-rc.3
github.com/DataDog/datadog-agent/pkg/util/uuid v0.56.0-rc.3
- github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3
+ github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.2
github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3
github.com/DataDog/go-libddwaf/v3 v3.3.0
- github.com/DataDog/go-sqllexer v0.0.13
+ github.com/DataDog/go-sqllexer v0.0.14
github.com/Datadog/dublin-traceroute v0.0.1
github.com/aquasecurity/trivy v0.49.2-0.20240227072422-e1ea02c7b80d
github.com/aws/aws-sdk-go-v2/service/kms v1.34.1
@@ -712,7 +718,6 @@ require (
github.com/elastic/go-seccomp-bpf v1.4.0
github.com/fatih/structtag v1.2.0
github.com/glaslos/ssdeep v0.4.0
- github.com/gocolly/colly/v2 v2.1.0
github.com/gocomply/scap v0.1.2-0.20230531064509-55a00f73e8d6
github.com/godror/godror v0.37.0
github.com/jackc/pgx/v5 v5.6.0
@@ -766,7 +771,7 @@ require (
github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface v0.56.0-rc.3 // indirect
github.com/DataDog/datadog-agent/pkg/util/buf v0.56.0-rc.3 // indirect
github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect
- github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.2 // indirect
github.com/DataDog/datadog-api-client-go/v2 v2.26.0 // indirect
github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect
github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.17.0 // indirect
@@ -774,16 +779,11 @@ require (
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.0 // indirect
github.com/Intevation/gval v1.3.0 // indirect
github.com/Intevation/jsonpath v0.2.1 // indirect
- github.com/PuerkitoBio/goquery v1.8.1 // indirect
github.com/Showmax/go-fqdn v1.0.0 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect
github.com/agext/levenshtein v1.2.3 // indirect
github.com/alecthomas/participle/v2 v2.1.1 // indirect
github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 // indirect
- github.com/andybalholm/cascadia v1.3.2 // indirect
- github.com/antchfx/htmlquery v1.3.0 // indirect
- github.com/antchfx/xmlquery v1.3.1 // indirect
- github.com/antchfx/xpath v1.2.3 // indirect
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
github.com/apache/thrift v0.20.0 // indirect
github.com/aquasecurity/trivy-java-db v0.0.0-20240109071736-184bd7481d48 // indirect
@@ -855,7 +855,6 @@ require (
github.com/jaegertracing/jaeger v1.58.1 // indirect
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
- github.com/kennygrant/sanitize v1.2.4 // indirect
github.com/kevinburke/ssh_config v1.2.0 // indirect
github.com/knadh/koanf/v2 v2.1.1 // indirect
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b // indirect
@@ -937,7 +936,6 @@ require (
github.com/ryanuber/go-glob v1.0.0 // indirect
github.com/sagikazarmark/locafero v0.4.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
- github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca // indirect
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 // indirect
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25 // indirect
github.com/shirou/gopsutil/v4 v4.24.5 // indirect
@@ -953,7 +951,6 @@ require (
github.com/stormcat24/protodep v0.1.8 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/swaggest/refl v1.3.0 // indirect
- github.com/temoto/robotstxt v1.1.1 // indirect
github.com/tetratelabs/wazero v1.7.0 // indirect
github.com/tidwall/gjson v1.17.1 // indirect
github.com/tidwall/match v1.1.1 // indirect
@@ -970,11 +967,9 @@ require (
go.opentelemetry.io/collector/config/confignet v0.104.0 // indirect
go.opentelemetry.io/collector/config/configopaque v1.11.0 // indirect
go.opentelemetry.io/collector/config/configretry v1.11.0 // indirect
- go.opentelemetry.io/collector/config/configtelemetry v0.104.0 // indirect
go.opentelemetry.io/collector/config/configtls v0.104.0 // indirect
go.opentelemetry.io/collector/config/internal v0.104.0 // indirect
go.opentelemetry.io/collector/connector v0.104.0 // indirect
- go.opentelemetry.io/collector/exporter/debugexporter v0.104.0 // indirect
go.opentelemetry.io/collector/exporter/nopexporter v0.104.0 // indirect
go.opentelemetry.io/collector/exporter/otlphttpexporter v0.104.0 // indirect
go.opentelemetry.io/collector/extension/auth v0.104.0 // indirect
diff --git a/go.sum b/go.sum
index 6ade4f6e537a8..cc9e6112e673b 100644
--- a/go.sum
+++ b/go.sum
@@ -685,8 +685,8 @@ github.com/CycloneDX/cyclonedx-go v0.8.0 h1:FyWVj6x6hoJrui5uRQdYZcSievw3Z32Z88uY
github.com/CycloneDX/cyclonedx-go v0.8.0/go.mod h1:K2bA+324+Og0X84fA8HhN2X066K7Bxz4rpMQ4ZhjtSk=
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
-github.com/DataDog/agent-payload/v5 v5.0.130 h1:pVMRVKkUMmw2vOpmP92TO9jrS0om3K0uKteXHcy/6v0=
-github.com/DataDog/agent-payload/v5 v5.0.130/go.mod h1:FgVQKmVdqdmZTbxIptqJC/l+xEzdiXsaAOs/vGAvWzs=
+github.com/DataDog/agent-payload/v5 v5.0.132 h1:F9wy+iyAgN2QmkEsOlPp3RrQ4vOb4T6k3BXhjSpELS4=
+github.com/DataDog/agent-payload/v5 v5.0.132/go.mod h1:FgVQKmVdqdmZTbxIptqJC/l+xEzdiXsaAOs/vGAvWzs=
github.com/DataDog/appsec-internal-go v1.7.0 h1:iKRNLih83dJeVya3IoUfK+6HLD/hQsIbyBlfvLmAeb0=
github.com/DataDog/appsec-internal-go v1.7.0/go.mod h1:wW0cRfWBo4C044jHGwYiyh5moQV2x0AhnwqMuiX7O/g=
github.com/DataDog/aptly v1.5.3 h1:oLsRvjuXSVM4ia0N83dU3KiQeiJ6BaszYbTZOkSfDlw=
@@ -712,8 +712,8 @@ github.com/DataDog/go-grpc-bidirectional-streaming-example v0.0.0-20221024060302
github.com/DataDog/go-grpc-bidirectional-streaming-example v0.0.0-20221024060302-b9cf785c02fe/go.mod h1:90sqV0j7E8wYCyqIp5d9HmYWLTFQttqPFFtNYDyAybQ=
github.com/DataDog/go-libddwaf/v3 v3.3.0 h1:jS72fuQpFgJZEdEJDmHJCPAgNTEMZoz1EUvimPUOiJ4=
github.com/DataDog/go-libddwaf/v3 v3.3.0/go.mod h1:Bz/0JkpGf689mzbUjKJeheJINqsyyhM8p9PDuHdK2Ec=
-github.com/DataDog/go-sqllexer v0.0.13 h1:9mKfe+3s73GI/7dWBxi2Ds7+xZynJqMKK9cIUBrutak=
-github.com/DataDog/go-sqllexer v0.0.13/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc=
+github.com/DataDog/go-sqllexer v0.0.14 h1:xUQh2tLr/95LGxDzLmttLgTo/1gzFeOyuwrQa/Iig4Q=
+github.com/DataDog/go-sqllexer v0.0.14/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc=
github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4=
github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0=
github.com/DataDog/gohai v0.0.0-20230524154621-4316413895ee h1:tXibLZk3G6HncIFJKaNItsdzcrk4YqILNDZlXPTNt4k=
@@ -793,9 +793,6 @@ github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8
github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78=
github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
-github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc=
-github.com/PuerkitoBio/goquery v1.8.1 h1:uQxhNlArOIdbrH1tr0UXwdVFgDcZDrZVdcpygAcwmWM=
-github.com/PuerkitoBio/goquery v1.8.1/go.mod h1:Q8ICL1kNUJ2sXGoAhPGUdYDJvgQgHzJsnnd3H7Ho5jQ=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/Showmax/go-fqdn v1.0.0 h1:0rG5IbmVliNT5O19Mfuvna9LL7zlHyRfsSvBPZmF9tM=
@@ -836,24 +833,8 @@ github.com/alicebob/miniredis/v2 v2.31.1/go.mod h1:UB/T2Uztp7MlFSDakaX1sTXUv5CAS
github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 h1:aM1rlcoLz8y5B2r4tTLMiVTrMtpfY0O8EScKJxaSaEc=
github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092/go.mod h1:rYqSE9HbjzpHTI74vwPvae4ZVYZd1lue2ta6xHPdblA=
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
-github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
-github.com/andybalholm/cascadia v1.2.0/go.mod h1:YCyR8vOZT9aZ1CHEd8ap0gMVm2aFgxBp0T0eFw1RUQY=
-github.com/andybalholm/cascadia v1.3.1/go.mod h1:R4bJ1UQfqADjvDa4P6HZHLh/3OxWWEqc0Sk8XGwHqvA=
-github.com/andybalholm/cascadia v1.3.2 h1:3Xi6Dw5lHF15JtdcmAHD3i1+T8plmv7BQ/nsViSLyss=
-github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
-github.com/antchfx/htmlquery v1.2.3/go.mod h1:B0ABL+F5irhhMWg54ymEZinzMSi0Kt3I2if0BLYa3V0=
-github.com/antchfx/htmlquery v1.3.0 h1:5I5yNFOVI+egyia5F2s/5Do2nFWxJz41Tr3DyfKD25E=
-github.com/antchfx/htmlquery v1.3.0/go.mod h1:zKPDVTMhfOmcwxheXUsx4rKJy8KEY/PU6eXr/2SebQ8=
-github.com/antchfx/xmlquery v1.2.4/go.mod h1:KQQuESaxSlqugE2ZBcM/qn+ebIpt+d+4Xx7YcSGAIrM=
-github.com/antchfx/xmlquery v1.3.1 h1:nIKWdtnhrXtj0/IRUAAw2I7TfpHUa3zMnHvNmPXFg+w=
-github.com/antchfx/xmlquery v1.3.1/go.mod h1:64w0Xesg2sTaawIdNqMB+7qaW/bSqkQm+ssPaCMWNnc=
-github.com/antchfx/xpath v1.1.6/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk=
-github.com/antchfx/xpath v1.1.8/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk=
-github.com/antchfx/xpath v1.1.10/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk=
-github.com/antchfx/xpath v1.2.3 h1:CCZWOzv5bAqjVv0offZ2LVgVYFbeldKQVuLNbViZdes=
-github.com/antchfx/xpath v1.2.3/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs=
github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18=
@@ -1400,9 +1381,6 @@ github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA=
github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/goccy/go-yaml v1.11.0 h1:n7Z+zx8S9f9KgzG6KtQKf+kwqXZlLNR2F6018Dgau54=
github.com/goccy/go-yaml v1.11.0/go.mod h1:H+mJrWtjPTJAHvRbV09MCK9xYwODM+wRTVFFTWckfng=
-github.com/gocolly/colly v1.2.0/go.mod h1:Hof5T3ZswNVsOHYmba1u03W65HDWgpV5HifSuueE0EA=
-github.com/gocolly/colly/v2 v2.1.0 h1:k0DuZkDoCsx51bKpRJNEmcxcp+W5N8ziuwGaSDuFoGs=
-github.com/gocolly/colly/v2 v2.1.0/go.mod h1:I2MuhsLjQ+Ex+IzK3afNS8/1qP3AedHOusRPcRdC5o0=
github.com/gocomply/scap v0.1.2-0.20230531064509-55a00f73e8d6 h1:u1QKTc+GgWnBO1Mo0CwQ/4DXElFmSvNKRspxAr+AJuY=
github.com/gocomply/scap v0.1.2-0.20230531064509-55a00f73e8d6/go.mod h1:ifGf7cSYIibtw3UXJy7QlbR8kJE6giDk7vGyCQZv0zo=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
@@ -1761,9 +1739,10 @@ github.com/jaegertracing/jaeger v1.58.1 h1:bFtX70yQbBfRbS8TB1JL4/ENr/qR09VJMeC/C
github.com/jaegertracing/jaeger v1.58.1/go.mod h1:2qpJpm9BzpbxNpaillaCA4pvdAIRTJT0ZRxrzMglBlo=
github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww=
github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg=
-github.com/jawher/mow.cli v1.1.0/go.mod h1:aNaQlc7ozF3vw6IJ2dHjp2ZFiA4ozMIYY6PyuRJwlUg=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
+github.com/jellydator/ttlcache/v3 v3.3.0 h1:BdoC9cE81qXfrxeb9eoJi9dWrdhSuwXMAnHTbnBm4Wc=
+github.com/jellydator/ttlcache/v3 v3.3.0/go.mod h1:bj2/e0l4jRnQdrnSTaGTsh4GSXvMjQcy41i7th0GVGw=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jlaffaye/ftp v0.0.0-20180404123514-2403248fa8cc/go.mod h1:lli8NYPQOFy3O++YmYbqVgOcQ1JPCwdOy+5zSjKJ9qY=
@@ -1809,8 +1788,6 @@ github.com/karrick/godirwalk v1.17.0 h1:b4kY7nqDdioR/6qnbHQyDvmA17u5G1cZ6J+CZXwS
github.com/karrick/godirwalk v1.17.0/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
-github.com/kennygrant/sanitize v1.2.4 h1:gN25/otpP5vAsO2djbMhF/LQX6R7+O1TB4yv8NzpJ3o=
-github.com/kennygrant/sanitize v1.2.4/go.mod h1:LGsjYYtgxbetdg5owWB2mpgUL6e2nfw2eObZ0u0qvak=
github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4=
github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
@@ -2355,8 +2332,6 @@ github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6ke
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
-github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca h1:NugYot0LIVPxTvN8n+Kvkn6TrbMyxQiuvKdEwFdR9vI=
-github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca/go.mod h1:uugorj2VCxiV1x+LzaIdVa9b4S4qGAcH6cbhh4qVxOU=
github.com/samber/lo v1.46.0 h1:w8G+oaCPgz1PoCJztqymCFaKwXt+5cCXn51uPxExFfQ=
github.com/samber/lo v1.46.0/go.mod h1:RmDH9Ct32Qy3gduHQuKJ3gW1fMHAnE/fAzQuf6He5cU=
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da h1:p3Vo3i64TCLY7gIfzeQaUJ+kppEO5WQG3cL8iE8tGHU=
@@ -2460,7 +2435,6 @@ github.com/streadway/amqp v1.1.0 h1:py12iX8XSyI7aN/3dUT8DFIDJazNJsVJdxNVEpnQTZM=
github.com/streadway/amqp v1.1.0/go.mod h1:WYSrTEYHOXHd0nwFeUXAe2G2hRnQT+deZJJf88uS9Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
@@ -2500,8 +2474,6 @@ github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00 h1:mujcChM89zOHwgZBBN
github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0=
github.com/tedsuo/rata v1.0.0 h1:Sf9aZrYy6ElSTncjnGkyC2yuVvz5YJetBIUKJ4CmeKE=
github.com/tedsuo/rata v1.0.0/go.mod h1:X47ELzhOoLbfFIY0Cql9P6yo3Cdwf2CMX3FVZxRzJPc=
-github.com/temoto/robotstxt v1.1.1 h1:Gh8RCs8ouX3hRSxxK7B1mO5RFByQ4CmJZDwgom++JaA=
-github.com/temoto/robotstxt v1.1.1/go.mod h1:+1AmkuG3IYkh1kv0d2qEB9Le88ehNO0zwOr3ujewlOo=
github.com/terminalstatic/go-xsd-validate v0.1.5 h1:RqpJnf6HGE2CB/lZB1A8BYguk8uRtcvYAPLCF15qguo=
github.com/terminalstatic/go-xsd-validate v0.1.5/go.mod h1:18lsvYFofBflqCrvo1umpABZ99+GneNTw2kEEc8UPJw=
github.com/testcontainers/testcontainers-go v0.23.0 h1:ERYTSikX01QczBLPZpqsETTBO7lInqEP349phDOVJVs=
@@ -2524,8 +2496,8 @@ github.com/tidwall/wal v1.1.7 h1:emc1TRjIVsdKKSnpwGBAcsAGg0767SvUk8+ygx7Bb+4=
github.com/tidwall/wal v1.1.7/go.mod h1:r6lR1j27W9EPalgHiB7zLJDYu3mzW5BQP5KrzBpYY/E=
github.com/tilinna/clock v1.1.0 h1:6IQQQCo6KoBxVudv6gwtY8o4eDfhHo8ojA5dP0MfhSs=
github.com/tilinna/clock v1.1.0/go.mod h1:ZsP7BcY7sEEz7ktc0IVy8Us6boDrK8VradlKRUGfOao=
-github.com/tinylib/msgp v1.2.0 h1:0uKB/662twsVBpYUPbokj4sTSKhWFKB7LopO2kWK8lY=
-github.com/tinylib/msgp v1.2.0/go.mod h1:2vIGs3lcUo8izAATNobrCHevYZC/LMsJtw4JPiYPHro=
+github.com/tinylib/msgp v1.2.1 h1:6ypy2qcCznxpP4hpORzhtXyTqrBs7cfM9MCCWY8zsmU=
+github.com/tinylib/msgp v1.2.1/go.mod h1:2vIGs3lcUo8izAATNobrCHevYZC/LMsJtw4JPiYPHro=
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY=
github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY=
@@ -2723,8 +2695,6 @@ go.opentelemetry.io/collector/exporter v0.104.0 h1:C2HmnfBa05IQ2T+p9T7K7gXVxjrBL
go.opentelemetry.io/collector/exporter v0.104.0/go.mod h1:Rx0oB0E4Ccg1JuAnEWwhtrq1ygRBkfx4mco1DpR3WaQ=
go.opentelemetry.io/collector/exporter/debugexporter v0.104.0 h1:1Z63H/xxv6IzMP7GPmI6v/lQAqZwYZCVC0rWYcYOomw=
go.opentelemetry.io/collector/exporter/debugexporter v0.104.0/go.mod h1:NHVzTM0Z/bomgR7SAe3ysx4CZzh2UJ3TXWSCnaOB1Wo=
-go.opentelemetry.io/collector/exporter/loggingexporter v0.104.0 h1:MaBTuHmK/HAQ+/rLTrGf3tazKum8Sic3/CaXgNr5xnc=
-go.opentelemetry.io/collector/exporter/loggingexporter v0.104.0/go.mod h1:sXZhACvds6z71cf2fzKrojMgdJItJZxeClKlF/PI/l8=
go.opentelemetry.io/collector/exporter/nopexporter v0.104.0 h1:33JeCQiJbvhSXFqQ34R4ole/wD4iHtF5LYp2GziYVnY=
go.opentelemetry.io/collector/exporter/nopexporter v0.104.0/go.mod h1:73afhI8uc5NKAl9pMJlgQQ46Ck9e7nQ2zZGXHHSzuwo=
go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0 h1:EFOdhnc2yGhqou0Tud1HsM7fsgWo/H3tdQhYYytDprQ=
@@ -2849,8 +2819,8 @@ go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2/go.mod h1:
go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6 h1:lGdhQUN/cnWdSH3291CUuxSEqc+AsGTiDxPP3r2J0l4=
go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E=
golang.org/x/arch v0.0.0-20190927153633-4e8777c89be4/go.mod h1:flIaEI6LNU6xOCD5PaJvn9wGP0agmIOqjrtsKGRguv4=
-golang.org/x/arch v0.9.0 h1:ub9TgUInamJ8mrZIGlBG6/4TqWeMszd4N8lNorbrr6k=
-golang.org/x/arch v0.9.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
+golang.org/x/arch v0.10.0 h1:S3huipmSclq3PJMNe76NGwkBR504WFkQ5dhzWzP8ZW8=
+golang.org/x/arch v0.10.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181127143415-eb0de9b17e85/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@@ -2879,8 +2849,8 @@ golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
-golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
-golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
+golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A=
+golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -2949,9 +2919,8 @@ golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
-golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0=
-golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
-golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
+golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -2981,16 +2950,13 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
@@ -3009,7 +2975,6 @@ golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20210916014120-12bc252f5db8/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
@@ -3040,8 +3005,8 @@ golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
-golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
-golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
+golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo=
+golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -3226,8 +3191,8 @@ golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg=
-golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
+golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -3244,8 +3209,8 @@ golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
-golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU=
-golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk=
+golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM=
+golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -3265,8 +3230,8 @@ golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
-golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
-golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
+golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
+golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -3358,8 +3323,8 @@ golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
-golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24=
-golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
+golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE=
+golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -3655,8 +3620,8 @@ google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5v
google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g=
google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8=
-google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c=
-google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y=
+google.golang.org/grpc v1.66.1 h1:hO5qAXR19+/Z44hmvIM4dQFMSYX9XcWsByfoxutBpAM=
+google.golang.org/grpc v1.66.1/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/grpc/examples v0.0.0-20221020162917-9127159caf5a h1:p51n6zkL483uumoZhCSGtHCem9kDeU05G5jX/wYI9gw=
google.golang.org/grpc/examples v0.0.0-20221020162917-9127159caf5a/go.mod h1:gxndsbNG1n4TZcHGgsYEfVGnTxqfEdfiDv6/DADXX9o=
diff --git a/pkg/cli/subcommands/check/command.go b/pkg/cli/subcommands/check/command.go
index ea8c9642e824d..f971da2048b6c 100644
--- a/pkg/cli/subcommands/check/command.go
+++ b/pkg/cli/subcommands/check/command.go
@@ -217,8 +217,7 @@ func MakeCommand(globalParamsGetter func() GlobalParams) *cobra.Command {
fx.Provide(func() pidmap.Component { return nil }),
getPlatformModules(),
- jmxloggerimpl.Module(),
- fx.Supply(jmxloggerimpl.NewDisabledParams()),
+ jmxloggerimpl.Module(jmxloggerimpl.NewDisabledParams()),
)
},
}
diff --git a/pkg/clusteragent/admission/mutate/agent_sidecar/providers.go b/pkg/clusteragent/admission/mutate/agent_sidecar/providers.go
index 8e8d424e17eef..d05d123a852f2 100644
--- a/pkg/clusteragent/admission/mutate/agent_sidecar/providers.go
+++ b/pkg/clusteragent/admission/mutate/agent_sidecar/providers.go
@@ -9,6 +9,7 @@ package agentsidecar
import (
"fmt"
+ "slices"
corev1 "k8s.io/api/core/v1"
@@ -33,6 +34,12 @@ const dogstatsdSocket = socketDir + "/dsd.socket"
// webhook to distinguish them easily.
const ddSocketsVolumeName = "ddsockets"
+var volumeNamesInjectedByConfigWebhook = []string{
+ configWebhook.DatadogVolumeName,
+ configWebhook.DogstatsdSocketVolumeName,
+ configWebhook.TraceAgentSocketVolumeName,
+}
+
// providerIsSupported indicates whether the provider is supported by agent sidecar injection
func providerIsSupported(provider string) bool {
switch provider {
@@ -85,10 +92,7 @@ func applyFargateOverrides(pod *corev1.Pod) (bool, error) {
return false, fmt.Errorf("can't apply profile overrides to nil pod")
}
- mutated := false
-
- deleted := deleteConfigWebhookVolumeAndMounts(pod)
- mutated = mutated || deleted
+ mutated := deleteConfigWebhookVolumesAndMounts(pod)
volume, volumeMount := socketsVolume()
injected := common.InjectVolume(pod, volume, volumeMount)
@@ -174,20 +178,19 @@ func socketsVolume() (corev1.Volume, corev1.VolumeMount) {
return volume, volumeMount
}
-// deleteConfigWebhookVolumeAndMounts deletes the volume and volumeMounts added
+// deleteConfigWebhookVolumesAndMounts deletes the volume and volumeMounts added
// by the config webhook. Returns a boolean that indicates if the pod was
// mutated.
-func deleteConfigWebhookVolumeAndMounts(pod *corev1.Pod) bool {
- mutated := false
-
+func deleteConfigWebhookVolumesAndMounts(pod *corev1.Pod) bool {
+ originalNumberOfVolumes := len(pod.Spec.Volumes)
// Delete the volume added by the config webhook
- for i, vol := range pod.Spec.Volumes {
- if vol.Name == configWebhook.DatadogVolumeName {
- pod.Spec.Volumes = append(pod.Spec.Volumes[:i], pod.Spec.Volumes[i+1:]...)
- mutated = true
- break
- }
- }
+ pod.Spec.Volumes = slices.DeleteFunc(
+ pod.Spec.Volumes,
+ func(volume corev1.Volume) bool {
+ return slices.Contains(volumeNamesInjectedByConfigWebhook, volume.Name)
+ },
+ )
+ mutated := len(pod.Spec.Volumes) != originalNumberOfVolumes
deleted := deleteConfigWebhookVolumeMounts(pod.Spec.Containers)
mutated = mutated || deleted
@@ -204,16 +207,11 @@ func deleteConfigWebhookVolumeMounts(containers []corev1.Container) bool {
mutated := false
for i, container := range containers {
- for j, volMount := range container.VolumeMounts {
- if volMount.Name == configWebhook.DatadogVolumeName {
- containers[i].VolumeMounts = append(
- containers[i].VolumeMounts[:j],
- containers[i].VolumeMounts[j+1:]...,
- )
- mutated = true
- break
- }
- }
+ originalNumberOfVolMounts := len(container.VolumeMounts)
+ containers[i].VolumeMounts = slices.DeleteFunc(container.VolumeMounts, func(volMount corev1.VolumeMount) bool {
+ return slices.Contains(volumeNamesInjectedByConfigWebhook, volMount.Name)
+ })
+ mutated = mutated || len(container.VolumeMounts) != originalNumberOfVolMounts
}
return mutated
diff --git a/pkg/clusteragent/admission/mutate/agent_sidecar/providers_test.go b/pkg/clusteragent/admission/mutate/agent_sidecar/providers_test.go
index ee8ceb1a544f5..236d9d3257031 100644
--- a/pkg/clusteragent/admission/mutate/agent_sidecar/providers_test.go
+++ b/pkg/clusteragent/admission/mutate/agent_sidecar/providers_test.go
@@ -59,7 +59,6 @@ func TestProviderIsSupported(t *testing.T) {
func TestApplyProviderOverrides(t *testing.T) {
mockConfig := configmock.New(t)
- hostPathType := corev1.HostPathDirectoryOrCreate
tests := []struct {
name string
@@ -170,7 +169,7 @@ func TestApplyProviderOverrides(t *testing.T) {
{
// This test checks that the volume and volume mounts set by the
// config webhook are replaced by ones that works on Fargate.
- name: "fargate provider - with volume set by the config webhook",
+ name: "fargate provider - with volume set by the config webhook (when the type is not socket)",
provider: "fargate",
basePod: &corev1.Pod{
Spec: corev1.PodSpec{
@@ -201,7 +200,7 @@ func TestApplyProviderOverrides(t *testing.T) {
Name: "datadog",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
- Type: &hostPathType,
+ Type: pointer.Ptr(corev1.HostPathDirectoryOrCreate),
Path: "/var/run/datadog",
},
},
@@ -276,6 +275,134 @@ func TestApplyProviderOverrides(t *testing.T) {
expectError: false,
expectMutated: true,
},
+ {
+ // Same as the previous test, but this time the injected volumes are
+ // of socket type.
+ name: "fargate provider - with volumes set by the config webhook (when the type is socket)",
+ provider: "fargate",
+ basePod: &corev1.Pod{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "app-container",
+ VolumeMounts: []corev1.VolumeMount{
+ {
+ Name: "datadog-dogstatsd",
+ MountPath: "/var/run/datadog/dsd.socket",
+ ReadOnly: true,
+ },
+ {
+ Name: "datadog-trace-agent",
+ MountPath: "/var/run/datadog/apm.socket",
+ ReadOnly: true,
+ },
+ },
+ },
+ {
+ Name: agentSidecarContainerName,
+ VolumeMounts: []corev1.VolumeMount{
+ {
+ Name: "datadog-dogstatsd",
+ MountPath: "/var/run/datadog/dsd.socket",
+ ReadOnly: true,
+ },
+ {
+ Name: "datadog-trace-agent",
+ MountPath: "/var/run/datadog/apm.socket",
+ ReadOnly: true,
+ },
+ },
+ },
+ },
+ Volumes: []corev1.Volume{
+ {
+ Name: "datadog-dogstatsd",
+ VolumeSource: corev1.VolumeSource{
+ HostPath: &corev1.HostPathVolumeSource{
+ Path: "/var/run/datadog/dsd.socket",
+ Type: pointer.Ptr(corev1.HostPathSocket),
+ },
+ },
+ },
+ {
+ Name: "datadog-trace-agent",
+ VolumeSource: corev1.VolumeSource{
+ HostPath: &corev1.HostPathVolumeSource{
+ Path: "/var/run/datadog/apm.socket",
+ Type: pointer.Ptr(corev1.HostPathSocket),
+ },
+ },
+ },
+ },
+ },
+ },
+ expectedPodAfterOverride: &corev1.Pod{
+ ObjectMeta: v1.ObjectMeta{
+ Annotations: map[string]string{
+ mutatecommon.K8sAutoscalerSafeToEvictVolumesAnnotation: "ddsockets",
+ },
+ },
+ Spec: corev1.PodSpec{
+ ShareProcessNamespace: pointer.Ptr(true),
+ Containers: []corev1.Container{
+ {
+ Name: "app-container",
+ Env: []corev1.EnvVar{
+ {
+ Name: "DD_TRACE_AGENT_URL",
+ Value: "unix:///var/run/datadog/apm.socket",
+ },
+ {
+ Name: "DD_DOGSTATSD_URL",
+ Value: "unix:///var/run/datadog/dsd.socket",
+ },
+ },
+ VolumeMounts: []corev1.VolumeMount{
+ {
+ Name: "ddsockets",
+ MountPath: "/var/run/datadog",
+ ReadOnly: false,
+ },
+ },
+ },
+ {
+ Name: agentSidecarContainerName,
+ Env: []corev1.EnvVar{
+ {
+ Name: "DD_EKS_FARGATE",
+ Value: "true",
+ },
+ {
+ Name: "DD_APM_RECEIVER_SOCKET",
+ Value: "/var/run/datadog/apm.socket",
+ },
+ {
+ Name: "DD_DOGSTATSD_SOCKET",
+ Value: "/var/run/datadog/dsd.socket",
+ },
+ },
+ VolumeMounts: []corev1.VolumeMount{
+ {
+ Name: "ddsockets",
+ MountPath: "/var/run/datadog",
+ ReadOnly: false,
+ },
+ },
+ },
+ },
+ Volumes: []corev1.Volume{
+ {
+ Name: "ddsockets",
+ VolumeSource: corev1.VolumeSource{
+ EmptyDir: &corev1.EmptyDirVolumeSource{},
+ },
+ },
+ },
+ },
+ },
+ expectError: false,
+ expectMutated: true,
+ },
{
name: "unsupported provider",
provider: "foo-provider",
diff --git a/pkg/clusteragent/admission/mutate/config/config.go b/pkg/clusteragent/admission/mutate/config/config.go
index 0d43bdfb3c229..8321f8f531e15 100644
--- a/pkg/clusteragent/admission/mutate/config/config.go
+++ b/pkg/clusteragent/admission/mutate/config/config.go
@@ -50,9 +50,15 @@ const (
socket = "socket"
service = "service"
- // DatadogVolumeName is the name of the volume used to mount the socket
+ // DatadogVolumeName is the name of the volume used to mount the sockets when the volume source is a directory
DatadogVolumeName = "datadog"
+ // TraceAgentSocketVolumeName is the name of the volume used to mount the trace agent socket
+ TraceAgentSocketVolumeName = "datadog-trace-agent"
+
+ // DogstatsdSocketVolumeName is the name of the volume used to mount the dogstatsd socket
+ DogstatsdSocketVolumeName = "datadog-dogstatsd"
+
webhookName = "agent_config"
)
@@ -184,15 +190,10 @@ func (w *Webhook) inject(pod *corev1.Pod, _ string, _ dynamic.Interface) (bool,
case service:
injectedConfig = common.InjectEnv(pod, agentHostServiceEnvVar)
case socket:
- volume, volumeMount := buildVolume(DatadogVolumeName, config.Datadog().GetString("admission_controller.inject_config.socket_path"), true)
- injectedVol := common.InjectVolume(pod, volume, volumeMount)
- if injectedVol {
- common.MarkVolumeAsSafeToEvictForAutoscaler(pod, DatadogVolumeName)
- }
-
+ injectedVolumes := injectSocketVolumes(pod)
injectedEnv := common.InjectEnv(pod, traceURLSocketEnvVar)
injectedEnv = common.InjectEnv(pod, dogstatsdURLSocketEnvVar) || injectedEnv
- injectedConfig = injectedEnv || injectedVol
+ injectedConfig = injectedVolumes || injectedEnv
default:
log.Errorf("invalid injection mode %q", w.mode)
return false, errors.New(metrics.InvalidInput)
@@ -249,14 +250,13 @@ func injectExternalDataEnvVar(pod *corev1.Pod) (injected bool) {
return
}
-func buildVolume(volumeName, path string, readOnly bool) (corev1.Volume, corev1.VolumeMount) {
- pathType := corev1.HostPathDirectoryOrCreate
+func buildVolume(volumeName, path string, hostpathType corev1.HostPathType, readOnly bool) (corev1.Volume, corev1.VolumeMount) {
volume := corev1.Volume{
Name: volumeName,
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: path,
- Type: &pathType,
+ Type: &hostpathType,
},
},
}
@@ -269,3 +269,52 @@ func buildVolume(volumeName, path string, readOnly bool) (corev1.Volume, corev1.
return volume, volumeMount
}
+
+// injectSocketVolumes injects the volumes for the dogstatsd and trace agent
+// sockets.
+//
+// The type of the volume injected can be either a directory or a socket
+// depending on the configuration. They offer different trade-offs. Using a
+// socket ensures no lost traces or dogstatsd metrics but can cause the pod to
+// wait if the agent has issues that prevent it from creating the sockets.
+//
+// This function returns true if at least one volume was injected.
+func injectSocketVolumes(pod *corev1.Pod) bool {
+ var injectedVolNames []string
+
+ if config.Datadog().GetBool("admission_controller.inject_config.type_socket_volumes") {
+ volumes := map[string]string{
+ DogstatsdSocketVolumeName: strings.TrimPrefix(
+ config.Datadog().GetString("admission_controller.inject_config.dogstatsd_socket"), "unix://",
+ ),
+ TraceAgentSocketVolumeName: strings.TrimPrefix(
+ config.Datadog().GetString("admission_controller.inject_config.trace_agent_socket"), "unix://",
+ ),
+ }
+
+ for volumeName, volumePath := range volumes {
+ volume, volumeMount := buildVolume(volumeName, volumePath, corev1.HostPathSocket, true)
+ injectedVol := common.InjectVolume(pod, volume, volumeMount)
+ if injectedVol {
+ injectedVolNames = append(injectedVolNames, volumeName)
+ }
+ }
+ } else {
+ volume, volumeMount := buildVolume(
+ DatadogVolumeName,
+ config.Datadog().GetString("admission_controller.inject_config.socket_path"),
+ corev1.HostPathDirectoryOrCreate,
+ true,
+ )
+ injectedVol := common.InjectVolume(pod, volume, volumeMount)
+ if injectedVol {
+ injectedVolNames = append(injectedVolNames, DatadogVolumeName)
+ }
+ }
+
+ for _, volName := range injectedVolNames {
+ common.MarkVolumeAsSafeToEvictForAutoscaler(pod, volName)
+ }
+
+ return len(injectedVolNames) > 0
+}
diff --git a/pkg/clusteragent/admission/mutate/config/config_test.go b/pkg/clusteragent/admission/mutate/config/config_test.go
index c8dd5437edf85..6321412ac1d0b 100644
--- a/pkg/clusteragent/admission/mutate/config/config_test.go
+++ b/pkg/clusteragent/admission/mutate/config/config_test.go
@@ -10,6 +10,7 @@ package config
import (
"encoding/json"
"os"
+ "strings"
"testing"
"github.com/stretchr/testify/assert"
@@ -307,6 +308,7 @@ func TestInjectSocket(t *testing.T) {
injected, err := webhook.inject(pod, "", nil)
assert.Nil(t, err)
assert.True(t, injected)
+
assert.Contains(t, pod.Spec.Containers[0].Env, mutatecommon.FakeEnvWithValue("DD_TRACE_AGENT_URL", "unix:///var/run/datadog/apm.socket"))
assert.Contains(t, pod.Spec.Containers[0].Env, mutatecommon.FakeEnvWithValue("DD_DOGSTATSD_URL", "unix:///var/run/datadog/dsd.socket"))
assert.Equal(t, pod.Spec.Containers[0].VolumeMounts[0].MountPath, "/var/run/datadog")
@@ -318,6 +320,67 @@ func TestInjectSocket(t *testing.T) {
assert.Equal(t, "datadog", pod.Annotations[mutatecommon.K8sAutoscalerSafeToEvictVolumesAnnotation])
}
+func TestInjectSocket_VolumeTypeSocket(t *testing.T) {
+ pod := mutatecommon.FakePodWithContainer("foo-pod", corev1.Container{})
+ pod = mutatecommon.WithLabels(pod, map[string]string{"admission.datadoghq.com/enabled": "true", "admission.datadoghq.com/config.mode": "socket"})
+ wmeta := fxutil.Test[workloadmeta.Component](
+ t,
+ core.MockBundle(),
+ workloadmetafxmock.MockModule(workloadmeta.NewParams()),
+ fx.Replace(config.MockParams{
+ Overrides: map[string]interface{}{"admission_controller.inject_config.type_socket_volumes": true},
+ }),
+ )
+ webhook := NewWebhook(wmeta, autoinstrumentation.GetInjectionFilter())
+ injected, err := webhook.inject(pod, "", nil)
+ assert.Nil(t, err)
+ assert.True(t, injected)
+
+ assert.Contains(t, pod.Spec.Containers[0].Env, mutatecommon.FakeEnvWithValue("DD_TRACE_AGENT_URL", "unix:///var/run/datadog/apm.socket"))
+ assert.Contains(t, pod.Spec.Containers[0].Env, mutatecommon.FakeEnvWithValue("DD_DOGSTATSD_URL", "unix:///var/run/datadog/dsd.socket"))
+
+ expectedVolumeMounts := []corev1.VolumeMount{
+ {
+ Name: "datadog-dogstatsd",
+ MountPath: "/var/run/datadog/dsd.socket",
+ ReadOnly: true,
+ },
+ {
+ Name: "datadog-trace-agent",
+ MountPath: "/var/run/datadog/apm.socket",
+ ReadOnly: true,
+ },
+ }
+ assert.ElementsMatch(t, pod.Spec.Containers[0].VolumeMounts, expectedVolumeMounts)
+
+ expectedVolumes := []corev1.Volume{
+ {
+ Name: "datadog-dogstatsd",
+ VolumeSource: corev1.VolumeSource{
+ HostPath: &corev1.HostPathVolumeSource{
+ Path: "/var/run/datadog/dsd.socket",
+ Type: pointer.Ptr(corev1.HostPathSocket),
+ },
+ },
+ },
+ {
+ Name: "datadog-trace-agent",
+ VolumeSource: corev1.VolumeSource{
+ HostPath: &corev1.HostPathVolumeSource{
+ Path: "/var/run/datadog/apm.socket",
+ Type: pointer.Ptr(corev1.HostPathSocket),
+ },
+ },
+ },
+ }
+ assert.ElementsMatch(t, pod.Spec.Volumes, expectedVolumes)
+
+ safeToEvictVolumes := strings.Split(pod.Annotations[mutatecommon.K8sAutoscalerSafeToEvictVolumesAnnotation], ",")
+ assert.Len(t, safeToEvictVolumes, 2)
+ assert.Contains(t, safeToEvictVolumes, "datadog-dogstatsd")
+ assert.Contains(t, safeToEvictVolumes, "datadog-trace-agent")
+}
+
func TestInjectSocketWithConflictingVolumeAndInitContainer(t *testing.T) {
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
@@ -339,7 +402,11 @@ func TestInjectSocketWithConflictingVolumeAndInitContainer(t *testing.T) {
VolumeMounts: []corev1.VolumeMount{
{
Name: "foo",
- MountPath: "/var/run/datadog",
+ MountPath: "/var/run/datadog/dsd.socket",
+ },
+ {
+ Name: "bar",
+ MountPath: "/var/run/datadog/apm.socket",
},
},
},
diff --git a/pkg/clusteragent/autoscaling/workload/controller.go b/pkg/clusteragent/autoscaling/workload/controller.go
index fb874e4bd3483..27ae6f77070aa 100644
--- a/pkg/clusteragent/autoscaling/workload/controller.go
+++ b/pkg/clusteragent/autoscaling/workload/controller.go
@@ -26,7 +26,10 @@ import (
"github.com/DataDog/datadog-agent/pkg/aggregator/sender"
"github.com/DataDog/datadog-agent/pkg/clusteragent/autoscaling"
"github.com/DataDog/datadog-agent/pkg/clusteragent/autoscaling/workload/model"
+ "github.com/DataDog/datadog-agent/pkg/util/kubernetes"
"github.com/DataDog/datadog-agent/pkg/util/log"
+
+ "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common"
)
const (
@@ -272,22 +275,16 @@ func (c *Controller) syncPodAutoscaler(ctx context.Context, key, ns, name string
// Reaching this point, we had an error in processing, clearing up global error
podAutoscalerInternal.SetError(nil)
- // Now that everything is synced, we can perform the actual processing
- result, err := c.handleScaling(ctx, podAutoscaler, &podAutoscalerInternal)
-
- // Update status based on latest state
- statusErr := c.updatePodAutoscalerStatus(ctx, podAutoscalerInternal, podAutoscaler)
- if statusErr != nil {
- log.Errorf("Failed to update status for PodAutoscaler: %s/%s, err: %v", ns, name, statusErr)
-
- // We want to return the status error if none to count in the requeue retries.
- if err == nil {
- err = statusErr
- }
+ // Validate autoscaler requirements
+ validationErr := c.validateAutoscaler(podAutoscaler)
+ if validationErr != nil {
+ podAutoscalerInternal.SetError(validationErr)
+ return autoscaling.NoRequeue, c.updateAutoscalerStatusAndUnlock(ctx, key, ns, name, validationErr, podAutoscalerInternal, podAutoscaler)
}
- c.store.UnlockSet(key, podAutoscalerInternal, c.ID)
- return result, err
+ // Now that everything is synced, we can perform the actual processing
+ result, scalingErr := c.handleScaling(ctx, podAutoscaler, &podAutoscalerInternal)
+ return result, c.updateAutoscalerStatusAndUnlock(ctx, key, ns, name, scalingErr, podAutoscalerInternal, podAutoscaler)
}
func (c *Controller) handleScaling(ctx context.Context, podAutoscaler *datadoghq.DatadogPodAutoscaler, podAutoscalerInternal *model.PodAutoscalerInternal) (autoscaling.ProcessResult, error) {
@@ -389,3 +386,42 @@ func (c *Controller) deletePodAutoscaler(ns, name string) error {
}
return nil
}
+
+func (c *Controller) validateAutoscaler(podAutoscaler *datadoghq.DatadogPodAutoscaler) error {
+ // Check that targetRef is not set to the cluster agent
+ clusterAgentPodName, err := common.GetSelfPodName()
+ if err != nil {
+ return fmt.Errorf("Unable to get the cluster agent pod name: %w", err)
+ }
+
+ var resourceName string
+ switch owner := podAutoscaler.Spec.TargetRef.Kind; owner {
+ case "Deployment":
+ resourceName = kubernetes.ParseDeploymentForPodName(clusterAgentPodName)
+ case "ReplicaSet":
+ resourceName = kubernetes.ParseReplicaSetForPodName(clusterAgentPodName)
+ }
+
+ clusterAgentNs := common.GetMyNamespace()
+
+ if podAutoscaler.Namespace == clusterAgentNs && podAutoscaler.Spec.TargetRef.Name == resourceName {
+ return fmt.Errorf("Autoscaling target cannot be set to the cluster agent")
+ }
+ return nil
+}
+
+func (c *Controller) updateAutoscalerStatusAndUnlock(ctx context.Context, key, ns, name string, err error, podAutoscalerInternal model.PodAutoscalerInternal, podAutoscaler *datadoghq.DatadogPodAutoscaler) error {
+ // Update status based on latest state
+ statusErr := c.updatePodAutoscalerStatus(ctx, podAutoscalerInternal, podAutoscaler)
+ if statusErr != nil {
+ log.Errorf("Failed to update status for PodAutoscaler: %s/%s, err: %v", ns, name, statusErr)
+
+ // We want to return the status error if none to count in the requeue retries.
+ if err == nil {
+ err = statusErr
+ }
+ }
+
+ c.store.UnlockSet(key, podAutoscalerInternal, c.ID)
+ return err
+}
diff --git a/pkg/clusteragent/autoscaling/workload/controller_test.go b/pkg/clusteragent/autoscaling/workload/controller_test.go
index 3e9b8a2eace6e..be6233bf73825 100644
--- a/pkg/clusteragent/autoscaling/workload/controller_test.go
+++ b/pkg/clusteragent/autoscaling/workload/controller_test.go
@@ -8,6 +8,8 @@
package workload
import (
+ "errors"
+ "fmt"
"testing"
"time"
@@ -26,6 +28,7 @@ import (
"github.com/DataDog/datadog-agent/pkg/clusteragent/autoscaling"
"github.com/DataDog/datadog-agent/pkg/clusteragent/autoscaling/workload/model"
+ "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common"
)
type fixture struct {
@@ -228,3 +231,121 @@ func TestLeaderCreateDeleteRemote(t *testing.T) {
f.RunControllerSync(true, "default/dpa-0")
assert.Len(t, f.store.GetAll(), 0)
}
+
+func TestDatadogPodAutoscalerTargetingClusterAgentErrors(t *testing.T) {
+ tests := []struct {
+ name string
+ targetRef autoscalingv2.CrossVersionObjectReference
+ }{
+ {
+ "target set to cluster agent deployment",
+ autoscalingv2.CrossVersionObjectReference{
+ Kind: "Deployment",
+ Name: "datadog-agent-cluster-agent",
+ APIVersion: "apps/v1",
+ },
+ },
+ {
+ "target set to cluster agent replicaset",
+ autoscalingv2.CrossVersionObjectReference{
+ Kind: "ReplicaSet",
+ Name: "datadog-agent-cluster-agent-7dbf798595",
+ APIVersion: "apps/v1",
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ testTime := time.Now()
+ f := newFixture(t, testTime)
+
+ t.Setenv("DD_POD_NAME", "datadog-agent-cluster-agent-7dbf798595-tp9lg")
+ currentNs := common.GetMyNamespace()
+ id := fmt.Sprintf("%s/dpa-dca", currentNs)
+
+ dpaSpec := datadoghq.DatadogPodAutoscalerSpec{
+ TargetRef: tt.targetRef,
+ // Local owner means .Spec source of truth is K8S
+ Owner: datadoghq.DatadogPodAutoscalerLocalOwner,
+ }
+
+ dpa, dpaTyped := newFakePodAutoscaler(currentNs, "dpa-dca", 1, dpaSpec, datadoghq.DatadogPodAutoscalerStatus{})
+ f.InformerObjects = append(f.InformerObjects, dpa)
+
+ expectedDPAError := &datadoghq.DatadogPodAutoscaler{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "DatadogPodAutoscaler",
+ APIVersion: "datadoghq.com/v1alpha1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "dpa-dca",
+ Namespace: currentNs,
+ Generation: 1,
+ UID: dpa.GetUID(),
+ },
+ Spec: datadoghq.DatadogPodAutoscalerSpec{
+ TargetRef: autoscalingv2.CrossVersionObjectReference{
+ Kind: "",
+ Name: "",
+ APIVersion: "",
+ },
+ Owner: "",
+ },
+ Status: datadoghq.DatadogPodAutoscalerStatus{
+ Conditions: []datadoghq.DatadogPodAutoscalerCondition{
+ {
+ Type: datadoghq.DatadogPodAutoscalerErrorCondition,
+ Status: corev1.ConditionTrue,
+ LastTransitionTime: metav1.NewTime(testTime),
+ Reason: "Autoscaling target cannot be set to the cluster agent",
+ },
+ {
+ Type: datadoghq.DatadogPodAutoscalerActiveCondition,
+ Status: corev1.ConditionTrue,
+ LastTransitionTime: metav1.NewTime(testTime),
+ },
+ {
+ Type: datadoghq.DatadogPodAutoscalerHorizontalAbleToRecommendCondition,
+ Status: corev1.ConditionUnknown,
+ LastTransitionTime: metav1.NewTime(testTime),
+ },
+ {
+ Type: datadoghq.DatadogPodAutoscalerVerticalAbleToRecommendCondition,
+ Status: corev1.ConditionUnknown,
+ LastTransitionTime: metav1.NewTime(testTime),
+ },
+ {
+ Type: datadoghq.DatadogPodAutoscalerHorizontalScalingLimitedCondition,
+ Status: corev1.ConditionFalse,
+ LastTransitionTime: metav1.NewTime(testTime),
+ },
+ {
+ Type: datadoghq.DatadogPodAutoscalerHorizontalAbleToScaleCondition,
+ Status: corev1.ConditionUnknown,
+ LastTransitionTime: metav1.NewTime(testTime),
+ },
+ {
+ Type: datadoghq.DatadogPodAutoscalerVerticalAbleToApply,
+ Status: corev1.ConditionUnknown,
+ LastTransitionTime: metav1.NewTime(testTime),
+ },
+ },
+ },
+ }
+ expectedUnstructuredError, err := autoscaling.ToUnstructured(expectedDPAError)
+ assert.NoError(t, err)
+ f.RunControllerSync(true, id)
+
+ f.Objects = append(f.Objects, dpaTyped)
+ f.Actions = nil
+
+ f.ExpectUpdateStatusAction(expectedUnstructuredError)
+ f.RunControllerSync(true, id)
+ assert.Len(t, f.store.GetAll(), 1)
+ pai, found := f.store.Get(id)
+ assert.Truef(t, found, "Expected to find DatadogPodAutoscaler in store")
+ assert.Equal(t, errors.New("Autoscaling target cannot be set to the cluster agent"), pai.Error())
+ })
+ }
+}
diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common.go
index 2509961cdcd01..7054a232e7282 100644
--- a/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common.go
+++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common.go
@@ -260,13 +260,39 @@ func getInvolvedObjectTags(involvedObject v1.ObjectReference, taggerInstance tag
fmt.Sprintf("namespace:%s", involvedObject.Namespace),
)
- namespaceEntityID := fmt.Sprintf("kubernetes_metadata://%s", string(util.GenerateKubeMetadataEntityID("", "namespaces", "", involvedObject.Namespace)))
+ namespaceEntityID := types.NewEntityID(types.KubernetesMetadata, string(util.GenerateKubeMetadataEntityID("", "namespaces", "", involvedObject.Namespace))).String()
namespaceEntity, err := taggerInstance.GetEntity(namespaceEntityID)
if err == nil {
tagList = append(tagList, namespaceEntity.GetTags(types.HighCardinality)...)
}
}
+ var entityID string
+
+ switch involvedObject.Kind {
+ case podKind:
+ entityID = types.NewEntityID(types.KubernetesPodUID, string(involvedObject.UID)).String()
+ case deploymentKind:
+ entityID = types.NewEntityID(types.KubernetesDeployment, fmt.Sprintf("%s/%s", involvedObject.Namespace, involvedObject.Name)).String()
+ default:
+ var apiGroup string
+ apiVersionParts := strings.Split(involvedObject.APIVersion, "/")
+ if len(apiVersionParts) == 2 {
+ apiGroup = apiVersionParts[0]
+ } else {
+ apiGroup = ""
+ }
+ resourceType := strings.ToLower(involvedObject.Kind) + "s"
+ entityID = types.NewEntityID(types.KubernetesMetadata, string(util.GenerateKubeMetadataEntityID(apiGroup, resourceType, involvedObject.Namespace, involvedObject.Name))).String()
+ }
+
+ entity, err := taggerInstance.GetEntity(entityID)
+ if err == nil {
+ tagList = append(tagList, entity.GetTags(types.HighCardinality)...)
+ } else {
+ log.Debugf("error getting entity for entity ID '%s': tags may be missing", entityID)
+ }
+
kindTag := getKindTag(involvedObject.Kind, involvedObject.Name)
if kindTag != "" {
tagList = append(tagList, kindTag)
@@ -276,8 +302,9 @@ func getInvolvedObjectTags(involvedObject v1.ObjectReference, taggerInstance tag
}
const (
- podKind = "Pod"
- nodeKind = "Node"
+ podKind = "Pod"
+ nodeKind = "Node"
+ deploymentKind = "Deployment"
)
func getEventHostInfo(clusterName string, ev *v1.Event) eventHostInfo {
diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common_test.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common_test.go
index 693751cb20d14..34aff34230dda 100644
--- a/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common_test.go
+++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common_test.go
@@ -59,7 +59,11 @@ func Test_getInvolvedObjectTags(t *testing.T) {
telemetryStore := telemetry.NewStore(telemetryComponent)
cfg := configmock.New(t)
taggerInstance := local.NewFakeTagger(cfg, telemetryStore)
+ taggerInstance.SetTags("kubernetes_pod_uid://nginx", "workloadmeta-kubernetes_pod", nil, []string{"additional_pod_tag:nginx"}, nil, nil)
+ taggerInstance.SetTags("deployment://workload-redis/my-deployment-1", "workloadmeta-kubernetes_deployment", nil, []string{"deployment_tag:redis-1"}, nil, nil)
+ taggerInstance.SetTags("deployment://default/my-deployment-2", "workloadmeta-kubernetes_deployment", nil, []string{"deployment_tag:redis-2"}, nil, nil)
taggerInstance.SetTags("kubernetes_metadata:///namespaces//default", "workloadmeta-kubernetes_node", []string{"team:container-int"}, nil, nil, nil)
+ taggerInstance.SetTags("kubernetes_metadata://api-group/resourcetypes/default/generic-resource", "workloadmeta-kubernetes_resource", []string{"generic_tag:generic-resource"}, nil, nil, nil)
tests := []struct {
name string
involvedObject v1.ObjectReference
@@ -68,6 +72,7 @@ func Test_getInvolvedObjectTags(t *testing.T) {
{
name: "get pod basic tags",
involvedObject: v1.ObjectReference{
+ UID: "nginx",
Kind: "Pod",
Name: "my-pod",
Namespace: "my-namespace",
@@ -80,11 +85,13 @@ func Test_getInvolvedObjectTags(t *testing.T) {
"kube_namespace:my-namespace",
"namespace:my-namespace",
"pod_name:my-pod",
+ "additional_pod_tag:nginx",
},
},
{
name: "get pod namespace tags",
involvedObject: v1.ObjectReference{
+ UID: "nginx",
Kind: "Pod",
Name: "my-pod",
Namespace: "default",
@@ -98,6 +105,63 @@ func Test_getInvolvedObjectTags(t *testing.T) {
"namespace:default",
"team:container-int", // this tag is coming from the namespace
"pod_name:my-pod",
+ "additional_pod_tag:nginx",
+ },
+ },
+ {
+ name: "get deployment basic tags",
+ involvedObject: v1.ObjectReference{
+ Kind: "Deployment",
+ Name: "my-deployment-1",
+ Namespace: "workload-redis",
+ },
+ tags: []string{
+ "kube_kind:Deployment",
+ "kube_name:my-deployment-1",
+ "kubernetes_kind:Deployment",
+ "name:my-deployment-1",
+ "kube_namespace:workload-redis",
+ "namespace:workload-redis",
+ "kube_deployment:my-deployment-1",
+ "deployment_tag:redis-1",
+ },
+ },
+ {
+ name: "get deployment namespace tags",
+ involvedObject: v1.ObjectReference{
+ Kind: "Deployment",
+ Name: "my-deployment-2",
+ Namespace: "default",
+ },
+ tags: []string{
+ "kube_kind:Deployment",
+ "kube_name:my-deployment-2",
+ "kubernetes_kind:Deployment",
+ "name:my-deployment-2",
+ "kube_namespace:default",
+ "namespace:default",
+ "kube_deployment:my-deployment-2",
+ "team:container-int", // this tag is coming from the namespace
+ "deployment_tag:redis-2",
+ },
+ },
+ {
+ name: "get tags for any metadata resource",
+ involvedObject: v1.ObjectReference{
+ Kind: "ResourceType",
+ Name: "generic-resource",
+ Namespace: "default",
+ APIVersion: "api-group/v1",
+ },
+ tags: []string{
+ "kube_kind:ResourceType",
+ "kube_name:generic-resource",
+ "kubernetes_kind:ResourceType",
+ "name:generic-resource",
+ "kube_namespace:default",
+ "namespace:default",
+ "team:container-int", // this tag is coming from the namespace
+ "generic_tag:generic-resource",
},
},
}
diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/unbundled_events.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/unbundled_events.go
index d6ae0ca1a4a0c..8cd906d244ff3 100644
--- a/pkg/collector/corechecks/cluster/kubernetesapiserver/unbundled_events.go
+++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/unbundled_events.go
@@ -151,15 +151,14 @@ func (c *unbundledTransformer) buildEventTags(ev *v1.Event, involvedObject v1.Ob
tagsAccumulator.Append(getInvolvedObjectTags(involvedObject, c.taggerInstance)...)
// Finally tags from the tagger
- c.getTagsFromTagger(involvedObject, tagsAccumulator)
+ c.getTagsFromTagger(tagsAccumulator)
tagsAccumulator.SortUniq()
return tagsAccumulator.Get()
}
-// getTagsFromTagger add to the TagsAccumulator associated object tags from the tagger.
-// For now only Pod object kind is supported.
-func (c *unbundledTransformer) getTagsFromTagger(obj v1.ObjectReference, tagsAcc tagset.TagsAccumulator) {
+// getTagsFromTagger add to the TagsAccumulator global tags from the tagger
+func (c *unbundledTransformer) getTagsFromTagger(tagsAcc tagset.TagsAccumulator) {
if c.taggerInstance == nil {
return
}
@@ -169,20 +168,6 @@ func (c *unbundledTransformer) getTagsFromTagger(obj v1.ObjectReference, tagsAcc
log.Debugf("error getting global tags: %s", err)
}
tagsAcc.Append(globalTags...)
-
- switch obj.Kind {
- case podKind:
- entityID := fmt.Sprintf("kubernetes_pod_uid://%s", obj.UID)
- entity, err := c.taggerInstance.GetEntity(entityID)
- if err == nil {
- // we can get high Cardinality because tags on events is seemless.
- tagsAcc.Append(entity.GetTags(types.HighCardinality)...)
- } else {
- log.Debugf("error getting pod entity for entity ID: %s, pod tags may be missing", err)
- }
-
- default:
- }
}
func (c *unbundledTransformer) shouldCollect(ev *v1.Event) bool {
diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/unbundled_events_test.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/unbundled_events_test.go
index 0c1e03893b675..768f0812e3460 100644
--- a/pkg/collector/corechecks/cluster/kubernetesapiserver/unbundled_events_test.go
+++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/unbundled_events_test.go
@@ -901,7 +901,6 @@ func TestUnbundledEventsTransformFiltering(t *testing.T) {
func TestGetTagsFromTagger(t *testing.T) {
taggerInstance := taggerimpl.SetupFakeTagger(t)
- taggerInstance.SetTags("kubernetes_pod_uid://nginx", "workloadmeta-kubernetes_pod", nil, []string{"pod_name:nginx"}, nil, nil)
taggerInstance.SetGlobalTags([]string{"global:here"}, nil, nil, nil)
tests := []struct {
@@ -910,7 +909,7 @@ func TestGetTagsFromTagger(t *testing.T) {
expectedTags *tagset.HashlessTagsAccumulator
}{
{
- name: "accumulates basic pod tags",
+ name: "accumulates global tags",
obj: v1.ObjectReference{
UID: "redis",
Kind: "Pod",
@@ -919,16 +918,6 @@ func TestGetTagsFromTagger(t *testing.T) {
},
expectedTags: tagset.NewHashlessTagsAccumulatorFromSlice([]string{"global:here"}),
},
- {
- name: "add tagger pod tags",
- obj: v1.ObjectReference{
- UID: "nginx",
- Kind: "Pod",
- Namespace: "default",
- Name: "nginx",
- },
- expectedTags: tagset.NewHashlessTagsAccumulatorFromSlice([]string{"global:here", "pod_name:nginx"}),
- },
}
for _, tt := range tests {
@@ -938,7 +927,7 @@ func TestGetTagsFromTagger(t *testing.T) {
}
transformer := newUnbundledTransformer("test-cluster", taggerInstance, collectedTypes, false, false)
accumulator := tagset.NewHashlessTagsAccumulator()
- transformer.(*unbundledTransformer).getTagsFromTagger(tt.obj, accumulator)
+ transformer.(*unbundledTransformer).getTagsFromTagger(accumulator)
assert.Equal(t, tt.expectedTags, accumulator)
})
}
diff --git a/pkg/collector/corechecks/ebpf/c/runtime/cgroup.h b/pkg/collector/corechecks/ebpf/c/runtime/cgroup.h
index 58c63dfa59636..124081af8be78 100644
--- a/pkg/collector/corechecks/ebpf/c/runtime/cgroup.h
+++ b/pkg/collector/corechecks/ebpf/c/runtime/cgroup.h
@@ -9,28 +9,32 @@
#include "bpf_tracing.h"
#include "bpf_builtins.h"
-static __always_inline int get_cgroup_name(char *buf, size_t sz) {
- if (!bpf_helper_exists(BPF_FUNC_get_current_task)) {
- return 0;
- }
+static __always_inline int get_cgroup_name_for_task(struct task_struct *task, char *buf, size_t sz) {
bpf_memset(buf, 0, sz);
- struct task_struct *cur_tsk = (struct task_struct *)bpf_get_current_task();
+ #ifdef COMPILE_CORE
+ enum cgroup_subsys_id___local {
+ memory_cgrp_id___local = 123, /* value doesn't matter */
+ };
+ int cgrp_id = bpf_core_enum_value(enum cgroup_subsys_id___local, memory_cgrp_id___local);
+ #else
+ int cgrp_id = memory_cgrp_id;
+ #endif
-#ifdef COMPILE_CORE
- enum cgroup_subsys_id___local {
- memory_cgrp_id___local = 123, /* value doesn't matter */
- };
- int cgrp_id = bpf_core_enum_value(enum cgroup_subsys_id___local, memory_cgrp_id___local);
-#else
- int cgrp_id = memory_cgrp_id;
-#endif
- const char *name = BPF_CORE_READ(cur_tsk, cgroups, subsys[cgrp_id], cgroup, kn, name);
+ const char *name = BPF_CORE_READ(task, cgroups, subsys[cgrp_id], cgroup, kn, name);
if (bpf_probe_read_kernel(buf, sz, name) < 0) {
return 0;
}
-
return 1;
}
+static __always_inline int get_cgroup_name(char *buf, size_t sz) {
+ if (!bpf_helper_exists(BPF_FUNC_get_current_task)) {
+ return 0;
+ }
+
+ struct task_struct *cur_tsk = (struct task_struct *)bpf_get_current_task();
+ return get_cgroup_name_for_task(cur_tsk, buf, sz);
+}
+
#endif /* defined(BPF_CGROUP_H) */
diff --git a/pkg/collector/corechecks/ebpf/c/runtime/oom-kill-kern-user.h b/pkg/collector/corechecks/ebpf/c/runtime/oom-kill-kern-user.h
index 6153c6ef6a711..c95008774b44f 100644
--- a/pkg/collector/corechecks/ebpf/c/runtime/oom-kill-kern-user.h
+++ b/pkg/collector/corechecks/ebpf/c/runtime/oom-kill-kern-user.h
@@ -9,14 +9,14 @@
struct oom_stats {
char cgroup_name[129];
- // Pid of triggering process
- __u32 pid;
// Pid of killed process
- __u32 tpid;
- // Name of triggering process
- char fcomm[TASK_COMM_LEN];
+ __u32 victim_pid;
+ // Pid of triggering process
+ __u32 trigger_pid;
// Name of killed process
- char tcomm[TASK_COMM_LEN];
+ char victim_comm[TASK_COMM_LEN];
+ // Name of triggering process
+ char trigger_comm[TASK_COMM_LEN];
// OOM score of killed process
__s64 score;
// OOM score adjustment of killed process
diff --git a/pkg/collector/corechecks/ebpf/c/runtime/oom-kill-kern.c b/pkg/collector/corechecks/ebpf/c/runtime/oom-kill-kern.c
index ee2f1a76523a7..35c7b4be7165d 100644
--- a/pkg/collector/corechecks/ebpf/c/runtime/oom-kill-kern.c
+++ b/pkg/collector/corechecks/ebpf/c/runtime/oom-kill-kern.c
@@ -25,16 +25,17 @@
* the statistics per pid
*/
-BPF_HASH_MAP(oom_stats, u32, struct oom_stats, 10240)
+BPF_HASH_MAP(oom_stats, u64, struct oom_stats, 10240)
SEC("kprobe/oom_kill_process")
int BPF_KPROBE(kprobe__oom_kill_process, struct oom_control *oc) {
struct oom_stats zero = {};
struct oom_stats new = {};
+ u64 ts = bpf_ktime_get_ns();
u32 pid = bpf_get_current_pid_tgid() >> 32;
- bpf_map_update_elem(&oom_stats, &pid, &zero, BPF_NOEXIST);
- struct oom_stats *s = bpf_map_lookup_elem(&oom_stats, &pid);
+ bpf_map_update_elem(&oom_stats, &ts, &zero, BPF_NOEXIST);
+ struct oom_stats *s = bpf_map_lookup_elem(&oom_stats, &ts);
if (!s) {
return 0;
}
@@ -43,15 +44,14 @@ int BPF_KPROBE(kprobe__oom_kill_process, struct oom_control *oc) {
// expected a pointer to stack memory. Therefore, we work on stack
// variable and update the map value at the end
bpf_memcpy(&new, s, sizeof(struct oom_stats));
-
- new.pid = pid;
- get_cgroup_name(new.cgroup_name, sizeof(new.cgroup_name));
+ new.trigger_pid = pid;
struct task_struct *p = (struct task_struct *)BPF_CORE_READ(oc, chosen);
if (!p) {
return 0;
}
- BPF_CORE_READ_INTO(&new.tpid, p, pid);
+ get_cgroup_name_for_task(p, new.cgroup_name, sizeof(new.cgroup_name));
+ BPF_CORE_READ_INTO(&new.victim_pid, p, pid);
BPF_CORE_READ_INTO(&new.score, oc, chosen_points);
#ifdef COMPILE_CORE
if (bpf_core_field_exists(p->signal->oom_score_adj)) {
@@ -63,11 +63,11 @@ int BPF_KPROBE(kprobe__oom_kill_process, struct oom_control *oc) {
bpf_probe_read_kernel(&new.score_adj, sizeof(new.score_adj), &sig->oom_score_adj);
#endif
if (bpf_helper_exists(BPF_FUNC_get_current_comm)) {
- bpf_get_current_comm(new.fcomm, sizeof(new.fcomm));
+ bpf_get_current_comm(new.trigger_comm, sizeof(new.trigger_comm));
}
- BPF_CORE_READ_INTO(&new.tcomm, p, comm);
- new.tcomm[TASK_COMM_LEN - 1] = 0;
+ BPF_CORE_READ_INTO(&new.victim_comm, p, comm);
+ new.victim_comm[TASK_COMM_LEN - 1] = 0;
struct mem_cgroup *memcg = NULL;
#ifdef COMPILE_CORE
diff --git a/pkg/collector/corechecks/ebpf/oomkill/oom_kill.go b/pkg/collector/corechecks/ebpf/oomkill/oom_kill.go
index 75797cb5352a0..f7ff6312e769b 100644
--- a/pkg/collector/corechecks/ebpf/oomkill/oom_kill.go
+++ b/pkg/collector/corechecks/ebpf/oomkill/oom_kill.go
@@ -132,8 +132,8 @@ func (m *OOMKillCheck) Run() error {
triggerTypeText = "This OOM kill was invoked by the system."
}
tags = append(tags, "trigger_type:"+triggerType)
- tags = append(tags, "trigger_process_name:"+line.FComm)
- tags = append(tags, "process_name:"+line.TComm)
+ tags = append(tags, "trigger_process_name:"+line.TriggerComm)
+ tags = append(tags, "process_name:"+line.VictimComm)
// submit counter metric
sender.Count("oom_kill.oom_process.count", 1, "", tags)
@@ -145,7 +145,7 @@ func (m *OOMKillCheck) Run() error {
SourceTypeName: CheckName,
EventType: CheckName,
AggregationKey: containerID,
- Title: fmt.Sprintf("Process OOM Killed: oom_kill_process called on %s (pid: %d)", line.TComm, line.TPid),
+ Title: fmt.Sprintf("Process OOM Killed: oom_kill_process called on %s (pid: %d)", line.VictimComm, line.VictimPid),
Tags: tags,
}
@@ -155,10 +155,10 @@ func (m *OOMKillCheck) Run() error {
if line.ScoreAdj != 0 {
oomScoreAdj = fmt.Sprintf(", oom_score_adj: %d", line.ScoreAdj)
}
- if line.Pid == line.TPid {
- fmt.Fprintf(&b, "Process `%s` (pid: %d, oom_score: %d%s) triggered an OOM kill on itself.", line.FComm, line.Pid, line.Score, oomScoreAdj)
+ if line.VictimPid == line.TriggerPid {
+ fmt.Fprintf(&b, "Process `%s` (pid: %d, oom_score: %d%s) triggered an OOM kill on itself.", line.VictimComm, line.VictimPid, line.Score, oomScoreAdj)
} else {
- fmt.Fprintf(&b, "Process `%s` (pid: %d) triggered an OOM kill on process `%s` (pid: %d, oom_score: %d%s).", line.FComm, line.Pid, line.TComm, line.TPid, line.Score, oomScoreAdj)
+ fmt.Fprintf(&b, "Process `%s` (pid: %d) triggered an OOM kill on process `%s` (pid: %d, oom_score: %d%s).", line.TriggerComm, line.TriggerPid, line.VictimComm, line.VictimPid, line.Score, oomScoreAdj)
}
fmt.Fprintf(&b, "\n The process had reached %d pages in size. \n\n", line.Pages)
b.WriteString(triggerTypeText)
diff --git a/pkg/collector/corechecks/ebpf/probe/oomkill/c_types_linux.go b/pkg/collector/corechecks/ebpf/probe/oomkill/c_types_linux.go
index 062036afd3cb7..9637c6f16adcf 100644
--- a/pkg/collector/corechecks/ebpf/probe/oomkill/c_types_linux.go
+++ b/pkg/collector/corechecks/ebpf/probe/oomkill/c_types_linux.go
@@ -4,14 +4,14 @@
package oomkill
type oomStats struct {
- Cgroup_name [129]byte
- Pid uint32
- Tpid uint32
- Fcomm [16]byte
- Tcomm [16]byte
- Score int64
- Score_adj int16
- Pages uint64
- Memcg_oom uint32
- Pad_cgo_0 [4]byte
+ Cgroup_name [129]byte
+ Victim_pid uint32
+ Trigger_pid uint32
+ Victim_comm [16]byte
+ Trigger_comm [16]byte
+ Score int64
+ Score_adj int16
+ Pages uint64
+ Memcg_oom uint32
+ Pad_cgo_0 [4]byte
}
diff --git a/pkg/collector/corechecks/ebpf/probe/oomkill/model/oom_kill_types.go b/pkg/collector/corechecks/ebpf/probe/oomkill/model/oom_kill_types.go
index 42272d27dbf7f..f065837bb3094 100644
--- a/pkg/collector/corechecks/ebpf/probe/oomkill/model/oom_kill_types.go
+++ b/pkg/collector/corechecks/ebpf/probe/oomkill/model/oom_kill_types.go
@@ -8,13 +8,13 @@ package model
// OOMKillStats contains the statistics of a given socket
type OOMKillStats struct {
- CgroupName string `json:"cgroupName"`
- Pid uint32 `json:"pid"`
- TPid uint32 `json:"tpid"`
- FComm string `json:"fcomm"`
- TComm string `json:"tcomm"`
- Score int64 `json:"score"`
- ScoreAdj int16 `json:"scoreAdj"`
- Pages uint64 `json:"pages"`
- MemCgOOM uint32 `json:"memcgoom"`
+ CgroupName string `json:"cgroupName"`
+ VictimPid uint32 `json:"victimPid"`
+ TriggerPid uint32 `json:"triggerPid"`
+ VictimComm string `json:"victimComm"`
+ TriggerComm string `json:"triggerComm"`
+ Score int64 `json:"score"`
+ ScoreAdj int16 `json:"scoreAdj"`
+ Pages uint64 `json:"pages"`
+ MemCgOOM uint32 `json:"memcgoom"`
}
diff --git a/pkg/collector/corechecks/ebpf/probe/oomkill/oom_kill.go b/pkg/collector/corechecks/ebpf/probe/oomkill/oom_kill.go
index fc9e27a78a31a..e277b3f48381b 100644
--- a/pkg/collector/corechecks/ebpf/probe/oomkill/oom_kill.go
+++ b/pkg/collector/corechecks/ebpf/probe/oomkill/oom_kill.go
@@ -34,7 +34,7 @@ const oomMapName = "oom_stats"
// Probe is the eBPF side of the OOM Kill check
type Probe struct {
m *manager.Manager
- oomMap *maps.GenericMap[uint32, oomStats]
+ oomMap *maps.GenericMap[uint64, oomStats]
}
// NewProbe creates a [Probe]
@@ -117,7 +117,7 @@ func startOOMKillProbe(buf bytecode.AssetReader, managerOptions manager.Options)
return nil, fmt.Errorf("failed to start manager: %w", err)
}
- oomMap, err := maps.GetMap[uint32, oomStats](m, oomMapName)
+ oomMap, err := maps.GetMap[uint64, oomStats](m, oomMapName)
if err != nil {
return nil, fmt.Errorf("failed to get map '%s': %w", oomMapName, err)
}
@@ -139,19 +139,21 @@ func (k *Probe) Close() {
// GetAndFlush gets the stats
func (k *Probe) GetAndFlush() (results []model.OOMKillStats) {
- var pid uint32
+ var allTimestamps []uint64
+ var ts uint64
var stat oomStats
it := k.oomMap.Iterate()
- for it.Next(&pid, &stat) {
+ for it.Next(&ts, &stat) {
results = append(results, convertStats(stat))
+ allTimestamps = append(allTimestamps, ts)
}
if err := it.Err(); err != nil {
log.Warnf("failed to iterate on OOM stats while flushing: %s", err)
}
- for _, r := range results {
- if err := k.oomMap.Delete(&r.Pid); err != nil {
+ for _, ts := range allTimestamps {
+ if err := k.oomMap.Delete(&ts); err != nil {
log.Warnf("failed to delete stat: %s", err)
}
}
@@ -161,12 +163,12 @@ func (k *Probe) GetAndFlush() (results []model.OOMKillStats) {
func convertStats(in oomStats) (out model.OOMKillStats) {
out.CgroupName = unix.ByteSliceToString(in.Cgroup_name[:])
- out.Pid = in.Pid
- out.TPid = in.Tpid
+ out.VictimPid = in.Victim_pid
+ out.TriggerPid = in.Trigger_pid
out.Score = in.Score
out.ScoreAdj = in.Score_adj
- out.FComm = unix.ByteSliceToString(in.Fcomm[:])
- out.TComm = unix.ByteSliceToString(in.Tcomm[:])
+ out.VictimComm = unix.ByteSliceToString(in.Victim_comm[:])
+ out.TriggerComm = unix.ByteSliceToString(in.Trigger_comm[:])
out.Pages = in.Pages
out.MemCgOOM = in.Memcg_oom
return
diff --git a/pkg/collector/corechecks/ebpf/probe/oomkill/oom_kill_test.go b/pkg/collector/corechecks/ebpf/probe/oomkill/oom_kill_test.go
index 96f7b67bd043b..02e76e112b679 100644
--- a/pkg/collector/corechecks/ebpf/probe/oomkill/oom_kill_test.go
+++ b/pkg/collector/corechecks/ebpf/probe/oomkill/oom_kill_test.go
@@ -92,7 +92,7 @@ func TestOOMKillProbe(t *testing.T) {
var result model.OOMKillStats
require.Eventually(t, func() bool {
for _, r := range oomKillProbe.GetAndFlush() {
- if r.TPid == uint32(cmd.Process.Pid) {
+ if r.TriggerPid == uint32(cmd.Process.Pid) {
result = r
return true
}
@@ -101,11 +101,11 @@ func TestOOMKillProbe(t *testing.T) {
}, 10*time.Second, 500*time.Millisecond, "failed to find an OOM killed process with pid %d", cmd.Process.Pid)
assert.Regexp(t, regexp.MustCompile("run-([0-9|a-z]*).scope"), result.CgroupName, "cgroup name")
- assert.Equal(t, result.TPid, result.Pid, "tpid == pid")
+ assert.Equal(t, result.TriggerPid, result.VictimPid, "tpid == pid")
assert.NotZero(t, result.Score, "score")
assert.Equal(t, int16(42), result.ScoreAdj, "score adj")
- assert.Equal(t, "dd", result.FComm, "fcomm")
- assert.Equal(t, "dd", result.TComm, "tcomm")
+ assert.Equal(t, "dd", result.VictimComm, "victim comm")
+ assert.Equal(t, "dd", result.TriggerComm, "trigger comm")
assert.NotZero(t, result.Pages, "pages")
assert.Equal(t, uint32(1), result.MemCgOOM, "memcg oom")
})
diff --git a/pkg/collector/corechecks/network-devices/cisco-sdwan/client/client.go b/pkg/collector/corechecks/network-devices/cisco-sdwan/client/client.go
index 43967280a41ae..636a599ead969 100644
--- a/pkg/collector/corechecks/network-devices/cisco-sdwan/client/client.go
+++ b/pkg/collector/corechecks/network-devices/cisco-sdwan/client/client.go
@@ -24,7 +24,7 @@ const (
defaultMaxAttempts = 3
defaultMaxPages = 100
defaultMaxCount = "2000"
- defaultLookback = 10 * time.Minute
+ defaultLookback = 30 * time.Minute
defaultHTTPTimeout = 10
defaultHTTPScheme = "https"
)
diff --git a/pkg/collector/corechecks/network-devices/cisco-sdwan/client/client_test.go b/pkg/collector/corechecks/network-devices/cisco-sdwan/client/client_test.go
index 62470f296e97d..1a09f467c5174 100644
--- a/pkg/collector/corechecks/network-devices/cisco-sdwan/client/client_test.go
+++ b/pkg/collector/corechecks/network-devices/cisco-sdwan/client/client_test.go
@@ -273,7 +273,7 @@ func TestGetInterfacesMetrics(t *testing.T) {
require.Equal(t, "2000", count)
require.Equal(t, "UTC", timeZone)
- require.Equal(t, "1999-12-31T23:50:00", startDate)
+ require.Equal(t, "1999-12-31T23:30:00", startDate)
require.Equal(t, "2000-01-01T00:00:00", endDate)
w.WriteHeader(http.StatusOK)
@@ -322,7 +322,7 @@ func TestGetDeviceHardwareMetrics(t *testing.T) {
require.Equal(t, "2000", count)
require.Equal(t, "UTC", timeZone)
- require.Equal(t, "1999-12-31T23:50:00", startDate)
+ require.Equal(t, "1999-12-31T23:30:00", startDate)
require.Equal(t, "2000-01-01T00:00:00", endDate)
w.WriteHeader(http.StatusOK)
@@ -365,7 +365,7 @@ func TestGetApplicationAwareRoutingMetrics(t *testing.T) {
require.Equal(t, "2000", count)
require.Equal(t, "UTC", timeZone)
- require.Equal(t, "1999-12-31T23:50:00", startDate)
+ require.Equal(t, "1999-12-31T23:30:00", startDate)
require.Equal(t, "2000-01-01T00:00:00", endDate)
w.WriteHeader(http.StatusOK)
@@ -552,7 +552,7 @@ func TestGetCloudExpressMetrics(t *testing.T) {
require.Equal(t, "2000", count)
require.Equal(t, "UTC", timeZone)
- require.Equal(t, "1999-12-31T23:50:00", startDate)
+ require.Equal(t, "1999-12-31T23:30:00", startDate)
require.Equal(t, "2000-01-01T00:00:00", endDate)
w.WriteHeader(http.StatusOK)
diff --git a/pkg/collector/corechecks/networkpath/config.go b/pkg/collector/corechecks/networkpath/config.go
index 5efb936eb0db2..60b805d076c22 100644
--- a/pkg/collector/corechecks/networkpath/config.go
+++ b/pkg/collector/corechecks/networkpath/config.go
@@ -12,15 +12,26 @@ import (
"github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration"
coreconfig "github.com/DataDog/datadog-agent/pkg/config"
+ "github.com/DataDog/datadog-agent/pkg/config/setup"
"github.com/DataDog/datadog-agent/pkg/networkpath/payload"
"gopkg.in/yaml.v2"
)
-const defaultCheckInterval time.Duration = 1 * time.Minute
+const (
+ defaultCheckInterval time.Duration = 1 * time.Minute
+)
+
+// Number is a type that is used to make a generic version
+// of the firstNonZero function
+type Number interface {
+ ~int | ~int64 | ~uint8
+}
// InitConfig is used to deserialize integration init config
type InitConfig struct {
- MinCollectionInterval int `yaml:"min_collection_interval"`
+ MinCollectionInterval int64 `yaml:"min_collection_interval"`
+ TimeoutMs int64 `yaml:"timeout"`
+ MaxTTL uint8 `yaml:"max_ttl"`
}
// InstanceConfig is used to deserialize integration instance config
@@ -36,7 +47,7 @@ type InstanceConfig struct {
MaxTTL uint8 `yaml:"max_ttl"`
- TimeoutMs uint `yaml:"timeout"` // millisecond
+ TimeoutMs int64 `yaml:"timeout"`
MinCollectionInterval int `yaml:"min_collection_interval"`
@@ -52,7 +63,7 @@ type CheckConfig struct {
DestinationService string
MaxTTL uint8
Protocol payload.Protocol
- TimeoutMs uint
+ Timeout time.Duration
MinCollectionInterval time.Duration
Tags []string
Namespace string
@@ -79,8 +90,6 @@ func NewCheckConfig(rawInstance integration.Data, rawInitConfig integration.Data
c.DestPort = instance.DestPort
c.SourceService = instance.SourceService
c.DestinationService = instance.DestinationService
- c.MaxTTL = instance.MaxTTL
- c.TimeoutMs = instance.TimeoutMs
c.Protocol = payload.Protocol(strings.ToUpper(instance.Protocol))
c.MinCollectionInterval = firstNonZero(
@@ -92,13 +101,28 @@ func NewCheckConfig(rawInstance integration.Data, rawInitConfig integration.Data
return nil, fmt.Errorf("min collection interval must be > 0")
}
+ c.Timeout = firstNonZero(
+ time.Duration(instance.TimeoutMs)*time.Millisecond,
+ time.Duration(initConfig.TimeoutMs)*time.Millisecond,
+ setup.DefaultNetworkPathTimeout*time.Millisecond,
+ )
+ if c.Timeout <= 0 {
+ return nil, fmt.Errorf("timeout must be > 0")
+ }
+
+ c.MaxTTL = firstNonZero(
+ instance.MaxTTL,
+ initConfig.MaxTTL,
+ setup.DefaultNetworkPathMaxTTL,
+ )
+
c.Tags = instance.Tags
c.Namespace = coreconfig.Datadog().GetString("network_devices.namespace")
return c, nil
}
-func firstNonZero(values ...time.Duration) time.Duration {
+func firstNonZero[T Number](values ...T) T {
for _, value := range values {
if value != 0 {
return value
diff --git a/pkg/collector/corechecks/networkpath/config_test.go b/pkg/collector/corechecks/networkpath/config_test.go
index 6d058f2c5f0ec..bc1999876a46d 100644
--- a/pkg/collector/corechecks/networkpath/config_test.go
+++ b/pkg/collector/corechecks/networkpath/config_test.go
@@ -11,6 +11,7 @@ import (
"github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration"
coreconfig "github.com/DataDog/datadog-agent/pkg/config"
+ "github.com/DataDog/datadog-agent/pkg/config/setup"
"github.com/DataDog/datadog-agent/pkg/networkpath/payload"
"github.com/stretchr/testify/assert"
)
@@ -34,6 +35,8 @@ hostname: 1.2.3.4
DestHostname: "1.2.3.4",
MinCollectionInterval: time.Duration(60) * time.Second,
Namespace: "my-namespace",
+ Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond,
+ MaxTTL: setup.DefaultNetworkPathMaxTTL,
},
},
{
@@ -68,6 +71,8 @@ min_collection_interval: 10
DestHostname: "1.2.3.4",
MinCollectionInterval: time.Duration(42) * time.Second,
Namespace: "my-namespace",
+ Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond,
+ MaxTTL: setup.DefaultNetworkPathMaxTTL,
},
},
{
@@ -82,6 +87,8 @@ min_collection_interval: 10
DestHostname: "1.2.3.4",
MinCollectionInterval: time.Duration(10) * time.Second,
Namespace: "my-namespace",
+ Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond,
+ MaxTTL: setup.DefaultNetworkPathMaxTTL,
},
},
{
@@ -93,6 +100,8 @@ hostname: 1.2.3.4
DestHostname: "1.2.3.4",
MinCollectionInterval: time.Duration(1) * time.Minute,
Namespace: "my-namespace",
+ Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond,
+ MaxTTL: setup.DefaultNetworkPathMaxTTL,
},
},
{
@@ -109,6 +118,8 @@ destination_service: service-b
DestinationService: "service-b",
MinCollectionInterval: time.Duration(60) * time.Second,
Namespace: "my-namespace",
+ Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond,
+ MaxTTL: setup.DefaultNetworkPathMaxTTL,
},
},
{
@@ -123,6 +134,8 @@ protocol: udp
MinCollectionInterval: time.Duration(60) * time.Second,
Namespace: "my-namespace",
Protocol: payload.ProtocolUDP,
+ Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond,
+ MaxTTL: setup.DefaultNetworkPathMaxTTL,
},
},
{
@@ -137,6 +150,8 @@ protocol: UDP
MinCollectionInterval: time.Duration(60) * time.Second,
Namespace: "my-namespace",
Protocol: payload.ProtocolUDP,
+ Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond,
+ MaxTTL: setup.DefaultNetworkPathMaxTTL,
},
},
{
@@ -151,6 +166,147 @@ protocol: TCP
MinCollectionInterval: time.Duration(60) * time.Second,
Namespace: "my-namespace",
Protocol: payload.ProtocolTCP,
+ Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond,
+ MaxTTL: setup.DefaultNetworkPathMaxTTL,
+ },
+ },
+ {
+ name: "timeout from instance config",
+ rawInstance: []byte(`
+hostname: 1.2.3.4
+timeout: 50000
+min_collection_interval: 42
+`),
+ rawInitConfig: []byte(`
+min_collection_interval: 10
+`),
+ expectedConfig: &CheckConfig{
+ DestHostname: "1.2.3.4",
+ MinCollectionInterval: time.Duration(42) * time.Second,
+ Namespace: "my-namespace",
+ Timeout: 50000 * time.Millisecond,
+ MaxTTL: setup.DefaultNetworkPathMaxTTL,
+ },
+ },
+ {
+ name: "timeout from instance config preferred over init config",
+ rawInstance: []byte(`
+hostname: 1.2.3.4
+timeout: 50000
+min_collection_interval: 42
+`),
+ rawInitConfig: []byte(`
+min_collection_interval: 10
+timeout: 70000
+`),
+ expectedConfig: &CheckConfig{
+ DestHostname: "1.2.3.4",
+ MinCollectionInterval: time.Duration(42) * time.Second,
+ Namespace: "my-namespace",
+ Timeout: 50000 * time.Millisecond,
+ MaxTTL: setup.DefaultNetworkPathMaxTTL,
+ },
+ },
+ {
+ name: "timeout from init config",
+ rawInstance: []byte(`
+hostname: 1.2.3.4
+min_collection_interval: 42
+`),
+ rawInitConfig: []byte(`
+min_collection_interval: 10
+timeout: 70000
+`),
+ expectedConfig: &CheckConfig{
+ DestHostname: "1.2.3.4",
+ MinCollectionInterval: time.Duration(42) * time.Second,
+ Namespace: "my-namespace",
+ Timeout: 70000 * time.Millisecond,
+ MaxTTL: setup.DefaultNetworkPathMaxTTL,
+ },
+ },
+ {
+ name: "default timeout",
+ rawInstance: []byte(`
+hostname: 1.2.3.4
+min_collection_interval: 42
+`),
+ rawInitConfig: []byte(`
+min_collection_interval: 10
+`),
+ expectedConfig: &CheckConfig{
+ DestHostname: "1.2.3.4",
+ MinCollectionInterval: time.Duration(42) * time.Second,
+ Namespace: "my-namespace",
+ Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond,
+ MaxTTL: setup.DefaultNetworkPathMaxTTL,
+ },
+ },
+ {
+ name: "negative timeout returns an error",
+ rawInstance: []byte(`
+hostname: 1.2.3.4
+min_collection_interval: 42
+`),
+ rawInitConfig: []byte(`
+min_collection_interval: 10
+timeout: -1
+`),
+ expectedError: "timeout must be > 0",
+ },
+ {
+ name: "maxTTL from instance config",
+ rawInstance: []byte(`
+hostname: 1.2.3.4
+max_ttl: 50
+min_collection_interval: 42
+`),
+ rawInitConfig: []byte(`
+min_collection_interval: 10
+`),
+ expectedConfig: &CheckConfig{
+ DestHostname: "1.2.3.4",
+ MinCollectionInterval: time.Duration(42) * time.Second,
+ Namespace: "my-namespace",
+ Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond,
+ MaxTTL: 50,
+ },
+ },
+ {
+ name: "maxTTL from instance config preferred over init config",
+ rawInstance: []byte(`
+hostname: 1.2.3.4
+max_ttl: 50
+min_collection_interval: 42
+`),
+ rawInitConfig: []byte(`
+min_collection_interval: 10
+max_ttl: 64
+`),
+ expectedConfig: &CheckConfig{
+ DestHostname: "1.2.3.4",
+ MinCollectionInterval: time.Duration(42) * time.Second,
+ Namespace: "my-namespace",
+ Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond,
+ MaxTTL: 50,
+ },
+ },
+ {
+ name: "maxTTL from init config",
+ rawInstance: []byte(`
+hostname: 1.2.3.4
+min_collection_interval: 42
+`),
+ rawInitConfig: []byte(`
+min_collection_interval: 10
+max_ttl: 64
+`),
+ expectedConfig: &CheckConfig{
+ DestHostname: "1.2.3.4",
+ MinCollectionInterval: time.Duration(42) * time.Second,
+ Namespace: "my-namespace",
+ Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond,
+ MaxTTL: 64,
},
},
}
diff --git a/pkg/collector/corechecks/networkpath/networkpath.go b/pkg/collector/corechecks/networkpath/networkpath.go
index b1e9b735e1eab..e7ba9465f268b 100644
--- a/pkg/collector/corechecks/networkpath/networkpath.go
+++ b/pkg/collector/corechecks/networkpath/networkpath.go
@@ -54,7 +54,7 @@ func (c *Check) Run() error {
DestHostname: c.config.DestHostname,
DestPort: c.config.DestPort,
MaxTTL: c.config.MaxTTL,
- TimeoutMs: c.config.TimeoutMs,
+ Timeout: c.config.Timeout,
Protocol: c.config.Protocol,
}
diff --git a/pkg/collector/corechecks/oracle/activity.go b/pkg/collector/corechecks/oracle/activity.go
index 9b3ce2fa3c134..3979e1e38eff6 100644
--- a/pkg/collector/corechecks/oracle/activity.go
+++ b/pkg/collector/corechecks/oracle/activity.go
@@ -469,6 +469,9 @@ AND status = 'ACTIVE'`)
return err
}
sendMetricWithDefaultTags(c, gauge, "dd.oracle.activity.time_ms", float64(time.Since(start).Milliseconds()))
+ TlmOracleActivityLatency.Observe(float64(time.Since(start).Milliseconds()))
+ TlmOracleActivitySamplesCount.Add(float64(len(sessionRows)))
+
sender.Commit()
return nil
diff --git a/pkg/collector/corechecks/oracle/statements.go b/pkg/collector/corechecks/oracle/statements.go
index bacc29624bea7..910cc06acdd73 100644
--- a/pkg/collector/corechecks/oracle/statements.go
+++ b/pkg/collector/corechecks/oracle/statements.go
@@ -868,8 +868,10 @@ func (c *Check) StatementMetrics() (int, error) {
sender.EventPlatformEvent(payloadBytes, "dbm-metrics")
sendMetricWithDefaultTags(c, gauge, "dd.oracle.statements_metrics.time_ms", float64(time.Since(start).Milliseconds()))
+ TlmOracleStatementMetricsLatency.Observe(float64(time.Since(start).Milliseconds()))
if c.config.ExecutionPlans.Enabled {
sendMetricWithDefaultTags(c, gauge, "dd.oracle.plan_errors.count", float64(planErrors))
+ TlmOracleStatementMetricsErrorCount.Add(float64(planErrors))
}
sender.Commit()
diff --git a/pkg/collector/corechecks/oracle/telemetry.go b/pkg/collector/corechecks/oracle/telemetry.go
new file mode 100644
index 0000000000000..34be6ef378d6b
--- /dev/null
+++ b/pkg/collector/corechecks/oracle/telemetry.go
@@ -0,0 +1,25 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build oracle
+
+package oracle
+
+import (
+ "github.com/DataDog/datadog-agent/pkg/telemetry"
+)
+
+// These collectors gather telemetry data for cross-org analysis
+// They are not expected to appear in the originiating org's metrics
+var (
+ // TlmOracleActivityLatency is the time for the activity gathering to complete
+ TlmOracleActivityLatency = telemetry.NewHistogram("oracle", "activity_latency", nil, "Histogram of activity query latency in ms", []float64{10, 25, 50, 75, 100, 250, 500, 1000, 10000})
+ // TlmOracleActivitySamplesCount is the number of activity samples collected
+ TlmOracleActivitySamplesCount = telemetry.NewCounter("oracle", "activity_samples_count", nil, "Number of activity samples collected")
+ // TlmOracleStatementMetricsLatency is the time for the statement metrics gathering to complete
+ TlmOracleStatementMetricsLatency = telemetry.NewHistogram("oracle", "statement_metrics", nil, "Histogram of statement metrics latency in ms", []float64{10, 25, 50, 75, 100, 250, 500, 1000, 10000})
+ // TlmOracleStatementMetricsErrorCount is the number of statement plan errors
+ TlmOracleStatementMetricsErrorCount = telemetry.NewCounter("oracle", "statement_plan_errors", nil, "Number of statement plan errors")
+)
diff --git a/pkg/collector/corechecks/servicediscovery/apm/detect.go b/pkg/collector/corechecks/servicediscovery/apm/detect.go
index 8a5419676d0f3..289e6fc42ee96 100644
--- a/pkg/collector/corechecks/servicediscovery/apm/detect.go
+++ b/pkg/collector/corechecks/servicediscovery/apm/detect.go
@@ -118,12 +118,16 @@ func goDetector(pid int, _ []string, _ map[string]string, _ usm.DetectorContextM
}
defer elfFile.Close()
- _, err = bininspect.GetAnySymbolWithPrefix(elfFile, ddTraceGoPrefix, ddTraceGoMaxLength)
- if err != nil {
- return None
+ if _, err = bininspect.GetAnySymbolWithPrefix(elfFile, ddTraceGoPrefix, ddTraceGoMaxLength); err == nil {
+ return Provided
}
- return Provided
+ // We failed to find symbols in the regular symbols section, now we can try the pclntab
+ if _, err = bininspect.GetAnySymbolWithPrefixPCLNTAB(elfFile, ddTraceGoPrefix, ddTraceGoMaxLength); err == nil {
+ return Provided
+ }
+ return None
+
}
func pythonDetectorFromMapsReader(reader io.Reader) Instrumentation {
diff --git a/pkg/collector/corechecks/servicediscovery/apm/detect_nix_test.go b/pkg/collector/corechecks/servicediscovery/apm/detect_nix_test.go
index f2977cd14f998..b2e551d63fb5a 100644
--- a/pkg/collector/corechecks/servicediscovery/apm/detect_nix_test.go
+++ b/pkg/collector/corechecks/servicediscovery/apm/detect_nix_test.go
@@ -176,18 +176,29 @@ func Test_pythonDetector(t *testing.T) {
func TestGoDetector(t *testing.T) {
curDir, err := testutil.CurDir()
require.NoError(t, err)
- serverBin, err := usmtestutil.BuildGoBinaryWrapper(filepath.Join(curDir, "testutil"), "instrumented")
+ serverBinWithSymbols, err := usmtestutil.BuildGoBinaryWrapper(filepath.Join(curDir, "testutil"), "instrumented")
+ require.NoError(t, err)
+ serverBinWithoutSymbols, err := usmtestutil.BuildGoBinaryWrapperWithoutSymbols(filepath.Join(curDir, "testutil"), "instrumented")
require.NoError(t, err)
- cmd := exec.Command(serverBin)
- require.NoError(t, cmd.Start())
+ cmdWithSymbols := exec.Command(serverBinWithSymbols)
+ require.NoError(t, cmdWithSymbols.Start())
t.Cleanup(func() {
- _ = cmd.Process.Kill()
+ _ = cmdWithSymbols.Process.Kill()
+ })
+
+ cmdWithoutSymbols := exec.Command(serverBinWithoutSymbols)
+ require.NoError(t, cmdWithoutSymbols.Start())
+ t.Cleanup(func() {
+ _ = cmdWithoutSymbols.Process.Kill()
})
result := goDetector(os.Getpid(), nil, nil, nil)
- require.Equal(t, result, None)
+ require.Equal(t, None, result)
+
+ result = goDetector(cmdWithSymbols.Process.Pid, nil, nil, nil)
+ require.Equal(t, Provided, result)
- result = goDetector(cmd.Process.Pid, nil, nil, nil)
- require.Equal(t, result, Provided)
+ result = goDetector(cmdWithoutSymbols.Process.Pid, nil, nil, nil)
+ require.Equal(t, Provided, result)
}
diff --git a/pkg/collector/corechecks/servicediscovery/apm/testutil/instrumented/instrumented.go b/pkg/collector/corechecks/servicediscovery/apm/testutil/instrumented/instrumented.go
index ab82025a4488a..74b92599e66cb 100644
--- a/pkg/collector/corechecks/servicediscovery/apm/testutil/instrumented/instrumented.go
+++ b/pkg/collector/corechecks/servicediscovery/apm/testutil/instrumented/instrumented.go
@@ -8,9 +8,12 @@
package main
import (
+ "time"
+
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
)
func main() {
tracer.Start()
+ time.Sleep(time.Second * 20)
}
diff --git a/pkg/collector/corechecks/servicediscovery/events.go b/pkg/collector/corechecks/servicediscovery/events.go
index fe22df5e15326..aa02df577a7e9 100644
--- a/pkg/collector/corechecks/servicediscovery/events.go
+++ b/pkg/collector/corechecks/servicediscovery/events.go
@@ -26,20 +26,23 @@ const (
)
type eventPayload struct {
- NamingSchemaVersion string `json:"naming_schema_version"`
- ServiceName string `json:"service_name"`
- HostName string `json:"host_name"`
- Env string `json:"env"`
- ServiceLanguage string `json:"service_language"`
- ServiceType string `json:"service_type"`
- StartTime int64 `json:"start_time"`
- LastSeen int64 `json:"last_seen"`
- APMInstrumentation string `json:"apm_instrumentation"`
- ServiceNameSource string `json:"service_name_source"`
- Ports []uint16 `json:"ports"`
- PID int `json:"pid"`
- CommandLine []string `json:"command_line"`
- RSSMemory uint64 `json:"rss_memory"`
+ NamingSchemaVersion string `json:"naming_schema_version"`
+ ServiceName string `json:"service_name"`
+ GeneratedServiceName string `json:"generated_service_name"`
+ DDService string `json:"dd_service,omitempty"`
+ HostName string `json:"host_name"`
+ Env string `json:"env"`
+ ServiceLanguage string `json:"service_language"`
+ ServiceType string `json:"service_type"`
+ StartTime int64 `json:"start_time"`
+ LastSeen int64 `json:"last_seen"`
+ APMInstrumentation string `json:"apm_instrumentation"`
+ ServiceNameSource string `json:"service_name_source,omitempty"`
+ Ports []uint16 `json:"ports"`
+ PID int `json:"pid"`
+ CommandLine []string `json:"command_line"`
+ RSSMemory uint64 `json:"rss_memory"`
+ CPUCores float64 `json:"cpu_cores"`
}
type event struct {
@@ -57,24 +60,35 @@ func (ts *telemetrySender) newEvent(t eventType, svc serviceInfo) *event {
host := ts.hostname.GetSafe(context.Background())
env := pkgconfig.Datadog().GetString("env")
+ nameSource := ""
+ if svc.service.DDService != "" {
+ nameSource = "provided"
+ if svc.service.DDServiceInjected {
+ nameSource = "injected"
+ }
+ }
+
return &event{
RequestType: t,
APIVersion: "v2",
Payload: &eventPayload{
- NamingSchemaVersion: "1",
- ServiceName: svc.meta.Name,
- HostName: host,
- Env: env,
- ServiceLanguage: svc.meta.Language,
- ServiceType: svc.meta.Type,
- StartTime: int64(svc.service.StartTimeSecs),
- LastSeen: svc.LastHeartbeat.Unix(),
- APMInstrumentation: svc.meta.APMInstrumentation,
- ServiceNameSource: svc.meta.NameSource,
- Ports: svc.service.Ports,
- PID: svc.service.PID,
- CommandLine: svc.service.CommandLine,
- RSSMemory: svc.service.RSS,
+ NamingSchemaVersion: "1",
+ ServiceName: svc.meta.Name,
+ GeneratedServiceName: svc.service.GeneratedName,
+ DDService: svc.service.DDService,
+ HostName: host,
+ Env: env,
+ ServiceLanguage: svc.meta.Language,
+ ServiceType: svc.meta.Type,
+ StartTime: int64(svc.service.StartTimeSecs),
+ LastSeen: svc.LastHeartbeat.Unix(),
+ APMInstrumentation: svc.meta.APMInstrumentation,
+ ServiceNameSource: nameSource,
+ Ports: svc.service.Ports,
+ PID: svc.service.PID,
+ CommandLine: svc.service.CommandLine,
+ RSSMemory: svc.service.RSS,
+ CPUCores: svc.service.CPUCores,
},
}
}
diff --git a/pkg/collector/corechecks/servicediscovery/events_test.go b/pkg/collector/corechecks/servicediscovery/events_test.go
index 747d49e5dcbf2..0675a7dedd4c9 100644
--- a/pkg/collector/corechecks/servicediscovery/events_test.go
+++ b/pkg/collector/corechecks/servicediscovery/events_test.go
@@ -56,18 +56,21 @@ func Test_telemetrySender(t *testing.T) {
svc := serviceInfo{
service: model.Service{
- PID: 99,
- CommandLine: []string{"test-service", "--args"},
- Ports: []uint16{80, 8080},
- StartTimeSecs: uint64(now.Add(-20 * time.Minute).Unix()),
- RSS: 500 * 1024 * 1024,
+ PID: 99,
+ CommandLine: []string{"test-service", "--args"},
+ Ports: []uint16{80, 8080},
+ StartTimeSecs: uint64(now.Add(-20 * time.Minute).Unix()),
+ RSS: 500 * 1024 * 1024,
+ GeneratedName: "generated-name",
+ DDService: "dd-service",
+ DDServiceInjected: true,
+ CPUCores: 1.5,
},
meta: ServiceMetadata{
Name: "test-service",
Language: "jvm",
Type: "web_service",
APMInstrumentation: "injected",
- NameSource: "generated",
},
LastHeartbeat: now,
}
@@ -81,60 +84,69 @@ func Test_telemetrySender(t *testing.T) {
RequestType: "start-service",
APIVersion: "v2",
Payload: &eventPayload{
- NamingSchemaVersion: "1",
- ServiceName: "test-service",
- HostName: "test-host",
- Env: "",
- ServiceLanguage: "jvm",
- ServiceType: "web_service",
- StartTime: 1715557200,
- LastSeen: 1715558400,
- APMInstrumentation: "injected",
- ServiceNameSource: "generated",
- Ports: []uint16{80, 8080},
- PID: 99,
- CommandLine: []string{"test-service", "--args"},
- RSSMemory: 500 * 1024 * 1024,
+ NamingSchemaVersion: "1",
+ ServiceName: "test-service",
+ GeneratedServiceName: "generated-name",
+ DDService: "dd-service",
+ ServiceNameSource: "injected",
+ HostName: "test-host",
+ Env: "",
+ ServiceLanguage: "jvm",
+ ServiceType: "web_service",
+ StartTime: 1715557200,
+ LastSeen: 1715558400,
+ APMInstrumentation: "injected",
+ Ports: []uint16{80, 8080},
+ PID: 99,
+ CommandLine: []string{"test-service", "--args"},
+ RSSMemory: 500 * 1024 * 1024,
+ CPUCores: 1.5,
},
},
{
RequestType: "heartbeat-service",
APIVersion: "v2",
Payload: &eventPayload{
- NamingSchemaVersion: "1",
- ServiceName: "test-service",
- HostName: "test-host",
- Env: "",
- ServiceLanguage: "jvm",
- ServiceType: "web_service",
- StartTime: 1715557200,
- LastSeen: 1715558400,
- APMInstrumentation: "injected",
- ServiceNameSource: "generated",
- Ports: []uint16{80, 8080},
- PID: 99,
- CommandLine: []string{"test-service", "--args"},
- RSSMemory: 500 * 1024 * 1024,
+ NamingSchemaVersion: "1",
+ ServiceName: "test-service",
+ GeneratedServiceName: "generated-name",
+ DDService: "dd-service",
+ ServiceNameSource: "injected",
+ HostName: "test-host",
+ Env: "",
+ ServiceLanguage: "jvm",
+ ServiceType: "web_service",
+ StartTime: 1715557200,
+ LastSeen: 1715558400,
+ APMInstrumentation: "injected",
+ Ports: []uint16{80, 8080},
+ PID: 99,
+ CommandLine: []string{"test-service", "--args"},
+ RSSMemory: 500 * 1024 * 1024,
+ CPUCores: 1.5,
},
},
{
RequestType: "end-service",
APIVersion: "v2",
Payload: &eventPayload{
- NamingSchemaVersion: "1",
- ServiceName: "test-service",
- HostName: "test-host",
- Env: "",
- ServiceLanguage: "jvm",
- ServiceType: "web_service",
- StartTime: 1715557200,
- LastSeen: 1715558400,
- APMInstrumentation: "injected",
- ServiceNameSource: "generated",
- Ports: []uint16{80, 8080},
- PID: 99,
- CommandLine: []string{"test-service", "--args"},
- RSSMemory: 500 * 1024 * 1024,
+ NamingSchemaVersion: "1",
+ ServiceName: "test-service",
+ GeneratedServiceName: "generated-name",
+ DDService: "dd-service",
+ ServiceNameSource: "injected",
+ HostName: "test-host",
+ Env: "",
+ ServiceLanguage: "jvm",
+ ServiceType: "web_service",
+ StartTime: 1715557200,
+ LastSeen: 1715558400,
+ APMInstrumentation: "injected",
+ Ports: []uint16{80, 8080},
+ PID: 99,
+ CommandLine: []string{"test-service", "--args"},
+ RSSMemory: 500 * 1024 * 1024,
+ CPUCores: 1.5,
},
},
}
@@ -168,13 +180,14 @@ func Test_telemetrySender_name_provided(t *testing.T) {
PID: 55,
CommandLine: []string{"foo", "--option"},
StartTimeSecs: uint64(now.Add(-20 * time.Minute).Unix()),
+ GeneratedName: "generated-name2",
+ DDService: "dd-service-provided",
},
meta: ServiceMetadata{
Name: "test-service",
Language: "jvm",
Type: "web_service",
APMInstrumentation: "injected",
- NameSource: "provided",
},
LastHeartbeat: now,
}
@@ -188,54 +201,60 @@ func Test_telemetrySender_name_provided(t *testing.T) {
RequestType: "start-service",
APIVersion: "v2",
Payload: &eventPayload{
- NamingSchemaVersion: "1",
- ServiceName: "test-service",
- HostName: "test-host",
- Env: "",
- ServiceLanguage: "jvm",
- ServiceType: "web_service",
- StartTime: 1715557200,
- LastSeen: 1715558400,
- APMInstrumentation: "injected",
- ServiceNameSource: "provided",
- PID: 55,
- CommandLine: []string{"foo", "--option"},
+ NamingSchemaVersion: "1",
+ ServiceName: "test-service",
+ GeneratedServiceName: "generated-name2",
+ DDService: "dd-service-provided",
+ ServiceNameSource: "provided",
+ HostName: "test-host",
+ Env: "",
+ ServiceLanguage: "jvm",
+ ServiceType: "web_service",
+ StartTime: 1715557200,
+ LastSeen: 1715558400,
+ APMInstrumentation: "injected",
+ PID: 55,
+ CommandLine: []string{"foo", "--option"},
},
},
{
RequestType: "heartbeat-service",
APIVersion: "v2",
Payload: &eventPayload{
- NamingSchemaVersion: "1",
- ServiceName: "test-service",
- HostName: "test-host",
- Env: "",
- ServiceLanguage: "jvm",
- ServiceType: "web_service",
- StartTime: 1715557200,
- LastSeen: 1715558400,
- APMInstrumentation: "injected",
- ServiceNameSource: "provided",
- PID: 55,
- CommandLine: []string{"foo", "--option"},
+ NamingSchemaVersion: "1",
+ ServiceName: "test-service",
+ GeneratedServiceName: "generated-name2",
+ DDService: "dd-service-provided",
+ ServiceNameSource: "provided",
+ HostName: "test-host",
+ Env: "",
+ ServiceLanguage: "jvm",
+ ServiceType: "web_service",
+ StartTime: 1715557200,
+ LastSeen: 1715558400,
+ APMInstrumentation: "injected",
+ PID: 55,
+ CommandLine: []string{"foo", "--option"},
},
},
{
RequestType: "end-service",
APIVersion: "v2",
Payload: &eventPayload{
- NamingSchemaVersion: "1",
- ServiceName: "test-service",
- HostName: "test-host",
- Env: "",
- ServiceLanguage: "jvm",
- ServiceType: "web_service",
- StartTime: 1715557200,
- LastSeen: 1715558400,
- APMInstrumentation: "injected",
- ServiceNameSource: "provided",
- PID: 55,
- CommandLine: []string{"foo", "--option"},
+ NamingSchemaVersion: "1",
+ ServiceName: "test-service",
+ GeneratedServiceName: "generated-name2",
+ DDService: "dd-service-provided",
+ ServiceNameSource: "provided",
+ HostName: "test-host",
+ Env: "",
+ ServiceLanguage: "jvm",
+ ServiceType: "web_service",
+ StartTime: 1715557200,
+ LastSeen: 1715558400,
+ APMInstrumentation: "injected",
+ PID: 55,
+ CommandLine: []string{"foo", "--option"},
},
},
}
diff --git a/pkg/collector/corechecks/servicediscovery/impl_linux.go b/pkg/collector/corechecks/servicediscovery/impl_linux.go
index db48fbfe6de20..27ef6aa36cc66 100644
--- a/pkg/collector/corechecks/servicediscovery/impl_linux.go
+++ b/pkg/collector/corechecks/servicediscovery/impl_linux.go
@@ -78,6 +78,7 @@ func (li *linuxImpl) DiscoverServices() (*discoveredServices, error) {
if service, ok := serviceMap[pid]; ok {
svc.LastHeartbeat = now
svc.service.RSS = service.RSS
+ svc.service.CPUCores = service.CPUCores
li.aliveServices[pid] = svc
events.start = append(events.start, *svc)
}
@@ -112,6 +113,7 @@ func (li *linuxImpl) DiscoverServices() (*discoveredServices, error) {
} else if now.Sub(svc.LastHeartbeat).Truncate(time.Minute) >= heartbeatTime {
svc.LastHeartbeat = now
svc.service.RSS = service.RSS
+ svc.service.CPUCores = service.CPUCores
events.heartbeat = append(events.heartbeat, *svc)
}
}
@@ -137,14 +139,13 @@ func (li *linuxImpl) getServiceInfo(service model.Service) serviceInfo {
// for now, docker-proxy is going on the ignore list
- serviceType := servicetype.Detect(service.Name, service.Ports)
+ serviceType := servicetype.Detect(service.Ports)
meta := ServiceMetadata{
Name: service.Name,
Language: service.Language,
Type: string(serviceType),
APMInstrumentation: service.APMInstrumentation,
- NameSource: service.NameSource,
}
return serviceInfo{
diff --git a/pkg/collector/corechecks/servicediscovery/impl_linux_test.go b/pkg/collector/corechecks/servicediscovery/impl_linux_test.go
index 563dabbbd8dd4..b8ea7760d724d 100644
--- a/pkg/collector/corechecks/servicediscovery/impl_linux_test.go
+++ b/pkg/collector/corechecks/servicediscovery/impl_linux_test.go
@@ -70,41 +70,50 @@ var (
portTCP8080 = model.Service{
PID: procTestService1.pid,
Name: "test-service-1",
+ GeneratedName: "test-service-1-generated",
+ DDService: "test-service-1",
+ DDServiceInjected: true,
Ports: []uint16{8080},
APMInstrumentation: string(apm.None),
- NameSource: "provided",
RSS: 100 * 1024 * 1024,
+ CPUCores: 1.5,
CommandLine: []string{"test-service-1"},
StartTimeSecs: procLaunchedSeconds,
}
portTCP8080UpdatedRSS = model.Service{
PID: procTestService1.pid,
- Name: "test-service-1",
+ GeneratedName: "test-service-1-generated",
+ DDService: "test-service-1",
+ DDServiceInjected: true,
Ports: []uint16{8080},
APMInstrumentation: string(apm.None),
- NameSource: "provided",
RSS: 200 * 1024 * 1024,
+ CPUCores: 1.5,
CommandLine: []string{"test-service-1"},
StartTimeSecs: procLaunchedSeconds,
}
portTCP8080DifferentPID = model.Service{
PID: procTestService1DifferentPID.pid,
Name: "test-service-1",
+ GeneratedName: "test-service-1-generated",
+ DDService: "test-service-1",
+ DDServiceInjected: true,
Ports: []uint16{8080},
APMInstrumentation: string(apm.Injected),
- NameSource: "generated",
CommandLine: []string{"test-service-1"},
StartTimeSecs: procLaunchedSeconds,
}
portTCP8081 = model.Service{
PID: procIgnoreService1.pid,
Name: "ignore-1",
+ GeneratedName: "ignore-1",
Ports: []uint16{8081},
StartTimeSecs: procLaunchedSeconds,
}
portTCP5000 = model.Service{
PID: procPythonService.pid,
Name: "python-service",
+ GeneratedName: "python-service",
Language: "python",
Ports: []uint16{5000},
CommandLine: pythonCommandLine,
@@ -113,6 +122,7 @@ var (
portTCP5432 = model.Service{
PID: procTestService1Repeat.pid,
Name: "test-service-1",
+ GeneratedName: "test-service-1",
Ports: []uint16{5432},
CommandLine: []string{"test-service-1"},
StartTimeSecs: procLaunchedSeconds,
@@ -206,91 +216,102 @@ func Test_linuxImpl(t *testing.T) {
RequestType: "start-service",
APIVersion: "v2",
Payload: &eventPayload{
- NamingSchemaVersion: "1",
- ServiceName: "test-service-1",
- ServiceType: "web_service",
- HostName: host,
- Env: "",
- StartTime: calcTime(0).Unix(),
- LastSeen: calcTime(1 * time.Minute).Unix(),
- Ports: []uint16{8080},
- PID: 99,
- CommandLine: []string{"test-service-1"},
- APMInstrumentation: "none",
- ServiceNameSource: "provided",
- RSSMemory: 100 * 1024 * 1024,
+ NamingSchemaVersion: "1",
+ ServiceName: "test-service-1",
+ GeneratedServiceName: "test-service-1-generated",
+ DDService: "test-service-1",
+ ServiceNameSource: "injected",
+ ServiceType: "web_service",
+ HostName: host,
+ Env: "",
+ StartTime: calcTime(0).Unix(),
+ LastSeen: calcTime(1 * time.Minute).Unix(),
+ Ports: []uint16{8080},
+ PID: 99,
+ CommandLine: []string{"test-service-1"},
+ APMInstrumentation: "none",
+ RSSMemory: 100 * 1024 * 1024,
+ CPUCores: 1.5,
},
},
{
RequestType: "heartbeat-service",
APIVersion: "v2",
Payload: &eventPayload{
- NamingSchemaVersion: "1",
- ServiceName: "test-service-1",
- ServiceType: "web_service",
- HostName: host,
- Env: "",
- StartTime: calcTime(0).Unix(),
- LastSeen: calcTime(20 * time.Minute).Unix(),
- Ports: []uint16{8080},
- PID: 99,
- CommandLine: []string{"test-service-1"},
- APMInstrumentation: "none",
- ServiceNameSource: "provided",
- RSSMemory: 200 * 1024 * 1024,
+ NamingSchemaVersion: "1",
+ ServiceName: "test-service-1",
+ GeneratedServiceName: "test-service-1-generated",
+ DDService: "test-service-1",
+ ServiceNameSource: "injected",
+ ServiceType: "web_service",
+ HostName: host,
+ Env: "",
+ StartTime: calcTime(0).Unix(),
+ LastSeen: calcTime(20 * time.Minute).Unix(),
+ Ports: []uint16{8080},
+ PID: 99,
+ CommandLine: []string{"test-service-1"},
+ APMInstrumentation: "none",
+ RSSMemory: 200 * 1024 * 1024,
+ CPUCores: 1.5,
},
},
{
RequestType: "end-service",
APIVersion: "v2",
Payload: &eventPayload{
- NamingSchemaVersion: "1",
- ServiceName: "test-service-1",
- ServiceType: "web_service",
- HostName: host,
- Env: "",
- StartTime: calcTime(0).Unix(),
- LastSeen: calcTime(20 * time.Minute).Unix(),
- Ports: []uint16{8080},
- PID: 99,
- CommandLine: []string{"test-service-1"},
- APMInstrumentation: "none",
- ServiceNameSource: "provided",
- RSSMemory: 200 * 1024 * 1024,
+ NamingSchemaVersion: "1",
+ ServiceName: "test-service-1",
+ GeneratedServiceName: "test-service-1-generated",
+ DDService: "test-service-1",
+ ServiceNameSource: "injected",
+ ServiceType: "web_service",
+ HostName: host,
+ Env: "",
+ StartTime: calcTime(0).Unix(),
+ LastSeen: calcTime(20 * time.Minute).Unix(),
+ Ports: []uint16{8080},
+ PID: 99,
+ CommandLine: []string{"test-service-1"},
+ APMInstrumentation: "none",
+ RSSMemory: 200 * 1024 * 1024,
+ CPUCores: 1.5,
},
},
{
RequestType: "start-service",
APIVersion: "v2",
Payload: &eventPayload{
- NamingSchemaVersion: "1",
- ServiceName: "python-service",
- ServiceType: "web_service",
- HostName: host,
- Env: "",
- StartTime: calcTime(0).Unix(),
- LastSeen: calcTime(1 * time.Minute).Unix(),
- Ports: []uint16{5000},
- PID: 500,
- ServiceLanguage: "python",
- CommandLine: pythonCommandLine,
+ NamingSchemaVersion: "1",
+ ServiceName: "python-service",
+ GeneratedServiceName: "python-service",
+ ServiceType: "web_service",
+ HostName: host,
+ Env: "",
+ StartTime: calcTime(0).Unix(),
+ LastSeen: calcTime(1 * time.Minute).Unix(),
+ Ports: []uint16{5000},
+ PID: 500,
+ ServiceLanguage: "python",
+ CommandLine: pythonCommandLine,
},
},
{
RequestType: "heartbeat-service",
APIVersion: "v2",
Payload: &eventPayload{
- NamingSchemaVersion: "1",
- ServiceName: "python-service",
- ServiceType: "web_service",
- HostName: host,
- Env: "",
- StartTime: calcTime(0).Unix(),
- LastSeen: calcTime(20 * time.Minute).Unix(),
- Ports: []uint16{5000},
- PID: 500,
- ServiceLanguage: "python",
- CommandLine: pythonCommandLine,
+ NamingSchemaVersion: "1",
+ ServiceName: "python-service",
+ GeneratedServiceName: "python-service",
+ ServiceType: "web_service",
+ HostName: host,
+ Env: "",
+ StartTime: calcTime(0).Unix(),
+ LastSeen: calcTime(20 * time.Minute).Unix(),
+ Ports: []uint16{5000},
+ PID: 500,
+ ServiceLanguage: "python",
+ CommandLine: pythonCommandLine,
},
},
},
@@ -334,86 +355,95 @@ func Test_linuxImpl(t *testing.T) {
RequestType: "start-service",
APIVersion: "v2",
Payload: &eventPayload{
- NamingSchemaVersion: "1",
- ServiceName: "test-service-1",
- ServiceType: "db",
- HostName: host,
- Env: "",
- StartTime: calcTime(0).Unix(),
- LastSeen: calcTime(1 * time.Minute).Unix(),
- Ports: []uint16{5432},
- PID: 101,
- CommandLine: []string{"test-service-1"},
+ NamingSchemaVersion: "1",
+ ServiceName: "test-service-1",
+ GeneratedServiceName: "test-service-1",
+ ServiceType: "db",
+ HostName: host,
+ Env: "",
+ StartTime: calcTime(0).Unix(),
+ LastSeen: calcTime(1 * time.Minute).Unix(),
+ Ports: []uint16{5432},
+ PID: 101,
+ CommandLine: []string{"test-service-1"},
},
},
{
RequestType: "start-service",
APIVersion: "v2",
Payload: &eventPayload{
- NamingSchemaVersion: "1",
- ServiceName: "test-service-1",
- ServiceType: "web_service",
- HostName: host,
- Env: "",
- StartTime: calcTime(0).Unix(),
- LastSeen: calcTime(1 * time.Minute).Unix(),
- Ports: []uint16{8080},
- PID: 99,
- CommandLine: []string{"test-service-1"},
- APMInstrumentation: "none",
- ServiceNameSource: "provided",
- RSSMemory: 100 * 1024 * 1024,
+ NamingSchemaVersion: "1",
+ ServiceName: "test-service-1",
+ GeneratedServiceName: "test-service-1-generated",
+ DDService: "test-service-1",
+ ServiceNameSource: "injected",
+ ServiceType: "web_service",
+ HostName: host,
+ Env: "",
+ StartTime: calcTime(0).Unix(),
+ LastSeen: calcTime(1 * time.Minute).Unix(),
+ Ports: []uint16{8080},
+ PID: 99,
+ CommandLine: []string{"test-service-1"},
+ APMInstrumentation: "none",
+ RSSMemory: 100 * 1024 * 1024,
+ CPUCores: 1.5,
},
},
{
RequestType: "heartbeat-service",
APIVersion: "v2",
Payload: &eventPayload{
- NamingSchemaVersion: "1",
- ServiceName: "test-service-1",
- ServiceType: "db",
- HostName: host,
- Env: "",
- StartTime: calcTime(0).Unix(),
- LastSeen: calcTime(20 * time.Minute).Unix(),
- Ports: []uint16{5432},
- PID: 101,
- CommandLine: []string{"test-service-1"},
+ NamingSchemaVersion: "1",
+ ServiceName: "test-service-1",
+ GeneratedServiceName: "test-service-1",
+ ServiceType: "db",
+ HostName: host,
+ Env: "",
+ StartTime: calcTime(0).Unix(),
+ LastSeen: calcTime(20 * time.Minute).Unix(),
+ Ports: []uint16{5432},
+ PID: 101,
+ CommandLine: []string{"test-service-1"},
},
},
{
RequestType: "end-service",
APIVersion: "v2",
Payload: &eventPayload{
- NamingSchemaVersion: "1",
- ServiceName: "test-service-1",
- ServiceType: "db",
- HostName: host,
- Env: "",
- StartTime: calcTime(0).Unix(),
- LastSeen: calcTime(20 * time.Minute).Unix(),
- Ports: []uint16{5432},
- PID: 101,
- CommandLine: []string{"test-service-1"},
+ NamingSchemaVersion: "1",
+ ServiceName: "test-service-1",
+ GeneratedServiceName: "test-service-1",
+ ServiceType: "db",
+ HostName: host,
+ Env: "",
+ StartTime: calcTime(0).Unix(),
+ LastSeen: calcTime(20 * time.Minute).Unix(),
+ Ports: []uint16{5432},
+ PID: 101,
+ CommandLine: []string{"test-service-1"},
},
},
{
RequestType: "heartbeat-service",
APIVersion: "v2",
Payload: &eventPayload{
- NamingSchemaVersion: "1",
- ServiceName: "test-service-1",
- ServiceType: "web_service",
- HostName: host,
- Env: "",
- StartTime: calcTime(0).Unix(),
- LastSeen: calcTime(20 * time.Minute).Unix(),
- Ports: []uint16{8080},
- PID: 99,
- CommandLine: []string{"test-service-1"},
- APMInstrumentation: "none",
- ServiceNameSource: "provided",
- RSSMemory: 100 * 1024 * 1024,
+ NamingSchemaVersion: "1",
+ ServiceName: "test-service-1",
+ GeneratedServiceName: "test-service-1-generated",
+ DDService: "test-service-1",
+ ServiceNameSource: "injected",
+ ServiceType: "web_service",
+ HostName: host,
+ Env: "",
+ StartTime: calcTime(0).Unix(),
+ LastSeen: calcTime(20 * time.Minute).Unix(),
+ Ports: []uint16{8080},
+ PID: 99,
+ CommandLine: []string{"test-service-1"},
+ APMInstrumentation: "none",
+ RSSMemory: 100 * 1024 * 1024,
+ CPUCores: 1.5,
},
},
},
@@ -455,37 +485,42 @@ func Test_linuxImpl(t *testing.T) {
RequestType: "start-service",
APIVersion: "v2",
Payload: &eventPayload{
- NamingSchemaVersion: "1",
- ServiceName: "test-service-1",
- ServiceType: "web_service",
- HostName: host,
- Env: "",
- StartTime: calcTime(0).Unix(),
- LastSeen: calcTime(1 * time.Minute).Unix(),
- Ports: []uint16{8080},
- PID: 99,
- CommandLine: []string{"test-service-1"},
- APMInstrumentation: "none",
- ServiceNameSource: "provided",
- RSSMemory: 100 * 1024 * 1024,
+ NamingSchemaVersion: "1",
+ ServiceName: "test-service-1",
+ GeneratedServiceName: "test-service-1-generated",
+ DDService: "test-service-1",
+ ServiceNameSource: "injected",
+ ServiceType: "web_service",
+ HostName: host,
+ Env: "",
+ StartTime: calcTime(0).Unix(),
+ LastSeen: calcTime(1 * time.Minute).Unix(),
+ Ports: []uint16{8080},
+ PID: 99,
+ CommandLine: []string{"test-service-1"},
+ APMInstrumentation: "none",
+ RSSMemory: 100 * 1024 * 1024,
+ CPUCores: 1.5,
},
},
{
RequestType: "start-service",
APIVersion: "v2",
Payload: &eventPayload{
- NamingSchemaVersion: "1",
- ServiceName: "test-service-1",
- ServiceType: "web_service",
- HostName: host,
- Env: "",
- StartTime: calcTime(0).Unix(),
- LastSeen: calcTime(22 * time.Minute).Unix(),
- Ports: []uint16{8080},
- PID: 102,
- CommandLine: []string{"test-service-1"},
- APMInstrumentation: "injected",
- ServiceNameSource: "generated",
+ NamingSchemaVersion: "1",
+ ServiceName: "test-service-1",
+ GeneratedServiceName: "test-service-1-generated",
+ DDService: "test-service-1",
+ ServiceNameSource: "injected",
+ ServiceType: "web_service",
+ HostName: host,
+ Env: "",
+ StartTime: calcTime(0).Unix(),
+ LastSeen: calcTime(22 * time.Minute).Unix(),
+ Ports: []uint16{8080},
+ PID: 102,
+ CommandLine: []string{"test-service-1"},
+ APMInstrumentation: "injected",
},
},
},
diff --git a/pkg/collector/corechecks/servicediscovery/language/language.go b/pkg/collector/corechecks/servicediscovery/language/language.go
index dd4b224f9bc47..fdb16329d58d5 100644
--- a/pkg/collector/corechecks/servicediscovery/language/language.go
+++ b/pkg/collector/corechecks/servicediscovery/language/language.go
@@ -3,20 +3,9 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-//go:build linux
-
// Package language provides functionality to detect the programming language for a given process.
package language
-import (
- "path/filepath"
-
- "github.com/DataDog/datadog-agent/pkg/languagedetection"
- "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels"
- "github.com/DataDog/datadog-agent/pkg/languagedetection/privileged"
- "github.com/DataDog/datadog-agent/pkg/process/procutil"
-)
-
// Language represents programming languages.
type Language string
@@ -40,67 +29,3 @@ const (
// PHP represents PHP.
PHP Language = "php"
)
-
-var (
- // languageNameToLanguageMap translates the constants rom the
- // languagedetection package to the constants used in this file. The latter
- // are shared with the backend, and at least java/jvm differs in the name
- // from the languagedetection package.
- languageNameToLanguageMap = map[languagemodels.LanguageName]Language{
- languagemodels.Go: Go,
- languagemodels.Node: Node,
- languagemodels.Dotnet: DotNet,
- languagemodels.Python: Python,
- languagemodels.Java: Java,
- languagemodels.Ruby: Ruby,
- }
-)
-
-// ProcessInfo holds information about a process.
-type ProcessInfo struct {
- Args []string
- Envs map[string]string
-}
-
-// FindInArgs tries to detect the language only using the provided command line arguments.
-func FindInArgs(exe string, args []string) Language {
- // empty slice passed in
- if len(args) == 0 {
- return ""
- }
-
- langs := languagedetection.DetectLanguage([]languagemodels.Process{&procutil.Process{
- // Pid doesn't matter since sysprobeConfig is nil
- Pid: 0,
- Cmdline: args,
- Comm: filepath.Base(exe),
- }}, nil)
- if len(langs) == 0 {
- return ""
- }
-
- lang := langs[0]
- if lang == nil {
- return ""
- }
- if outLang, ok := languageNameToLanguageMap[lang.Name]; ok {
- return outLang
- }
-
- return ""
-}
-
-// FindUsingPrivilegedDetector tries to detect the language using the provided command line arguments
-func FindUsingPrivilegedDetector(detector privileged.LanguageDetector, pid int32) Language {
- langs := detector.DetectWithPrivileges([]languagemodels.Process{&procutil.Process{Pid: pid}})
- if len(langs) == 0 {
- return ""
- }
-
- lang := langs[0]
- if outLang, ok := languageNameToLanguageMap[lang.Name]; ok {
- return outLang
- }
-
- return ""
-}
diff --git a/pkg/collector/corechecks/servicediscovery/language/language_linux.go b/pkg/collector/corechecks/servicediscovery/language/language_linux.go
new file mode 100644
index 0000000000000..4149070faf06d
--- /dev/null
+++ b/pkg/collector/corechecks/servicediscovery/language/language_linux.go
@@ -0,0 +1,79 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux
+
+package language
+
+import (
+ "path/filepath"
+
+ "github.com/DataDog/datadog-agent/pkg/languagedetection"
+ "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels"
+ "github.com/DataDog/datadog-agent/pkg/languagedetection/privileged"
+ "github.com/DataDog/datadog-agent/pkg/process/procutil"
+)
+
+// languageNameToLanguageMap translates the constants rom the
+// languagedetection package to the constants used in this file. The latter
+// are shared with the backend, and at least java/jvm differs in the name
+// from the languagedetection package.
+var languageNameToLanguageMap = map[languagemodels.LanguageName]Language{
+ languagemodels.Go: Go,
+ languagemodels.Node: Node,
+ languagemodels.Dotnet: DotNet,
+ languagemodels.Python: Python,
+ languagemodels.Java: Java,
+ languagemodels.Ruby: Ruby,
+}
+
+// ProcessInfo holds information about a process.
+type ProcessInfo struct {
+ Args []string
+ Envs map[string]string
+}
+
+// FindInArgs tries to detect the language only using the provided command line arguments.
+func FindInArgs(exe string, args []string) Language {
+ // empty slice passed in
+ if len(args) == 0 {
+ return ""
+ }
+
+ langs := languagedetection.DetectLanguage([]languagemodels.Process{&procutil.Process{
+ // Pid doesn't matter since sysprobeConfig is nil
+ Pid: 0,
+ Cmdline: args,
+ Comm: filepath.Base(exe),
+ }}, nil)
+ if len(langs) == 0 {
+ return ""
+ }
+
+ lang := langs[0]
+ if lang == nil {
+ return ""
+ }
+ if outLang, ok := languageNameToLanguageMap[lang.Name]; ok {
+ return outLang
+ }
+
+ return ""
+}
+
+// FindUsingPrivilegedDetector tries to detect the language using the provided command line arguments
+func FindUsingPrivilegedDetector(detector privileged.LanguageDetector, pid int32) Language {
+ langs := detector.DetectWithPrivileges([]languagemodels.Process{&procutil.Process{Pid: pid}})
+ if len(langs) == 0 {
+ return ""
+ }
+
+ lang := langs[0]
+ if outLang, ok := languageNameToLanguageMap[lang.Name]; ok {
+ return outLang
+ }
+
+ return ""
+}
diff --git a/pkg/collector/corechecks/servicediscovery/model/model.go b/pkg/collector/corechecks/servicediscovery/model/model.go
index 2c563860fcfed..93a3585167214 100644
--- a/pkg/collector/corechecks/servicediscovery/model/model.go
+++ b/pkg/collector/corechecks/servicediscovery/model/model.go
@@ -10,13 +10,16 @@ package model
type Service struct {
PID int `json:"pid"`
Name string `json:"name"`
- NameSource string `json:"name_source"`
+ GeneratedName string `json:"generated_name"`
+ DDService string `json:"dd_service"`
+ DDServiceInjected bool `json:"dd_service_injected"`
Ports []uint16 `json:"ports"`
APMInstrumentation string `json:"apm_instrumentation"`
Language string `json:"language"`
RSS uint64 `json:"rss"`
CommandLine []string `json:"cmdline"`
StartTimeSecs uint64 `json:"start_time"`
+ CPUCores float64 `json:"cpu_cores"`
}
// ServicesResponse is the response for the system-probe /discovery/services endpoint.
diff --git a/pkg/collector/corechecks/servicediscovery/module/envs.go b/pkg/collector/corechecks/servicediscovery/module/envs.go
index 08f5c54994565..5ae6ebf797e96 100644
--- a/pkg/collector/corechecks/servicediscovery/module/envs.go
+++ b/pkg/collector/corechecks/servicediscovery/module/envs.go
@@ -15,24 +15,40 @@ import (
"strings"
"github.com/DataDog/datadog-agent/pkg/util/kernel"
+ "github.com/DataDog/datadog-agent/pkg/util/log"
"github.com/shirou/gopsutil/v3/process"
)
const (
- // injectorMemFdName is the name the injector (Datadog/auto_inject) uses.
- injectorMemFdName = "dd_environ"
- injectorMemFdPath = "/memfd:" + injectorMemFdName + " (deleted)"
+ injectorMemFdName = "dd_process_inject_info.msgpack"
+ injectorMemFdPath = "/memfd:" + injectorMemFdName
// memFdMaxSize is used to limit the amount of data we read from the memfd.
// This is for safety to limit our memory usage in the case of a corrupt
// file.
- memFdMaxSize = 4096
+ // matches limit in the [auto injector](https://github.com/DataDog/auto_inject/blob/5ae819d01d8625c24dcf45b8fef32a7d94927d13/librouter.c#L52)
+ memFdMaxSize = 65536
)
-// readEnvsFile reads the env file created by the auto injector. The file
-// contains the variables in a format similar to /proc/$PID/environ: ENV=VAL,
-// separated by \000.
-func readEnvsFile(path string) ([]string, error) {
+// getInjectionMeta gets metadata from auto injector injection, if
+// present. The auto injector creates a memfd file where it writes
+// injection metadata such as injected environment variables, or versions
+// of the auto injector and the library.
+func getInjectionMeta(proc *process.Process) (*InjectedProcess, bool) {
+ path, found := findInjectorFile(proc)
+ if !found {
+ return nil, false
+ }
+ injectionMeta, err := extractInjectionMeta(path)
+ if err != nil {
+ log.Warnf("failed extracting injected envs: %s", err)
+ return nil, false
+ }
+ return injectionMeta, true
+
+}
+
+func extractInjectionMeta(path string) (*InjectedProcess, error) {
reader, err := os.Open(path)
if err != nil {
return nil, err
@@ -43,13 +59,19 @@ func readEnvsFile(path string) ([]string, error) {
if err != nil {
return nil, err
}
+ if len(data) == memFdMaxSize {
+ return nil, io.ErrShortBuffer
+ }
- return strings.Split(string(data), "\000"), nil
+ var injectedProc InjectedProcess
+ if _, err = injectedProc.UnmarshalMsg(data); err != nil {
+ return nil, err
+ }
+ return &injectedProc, nil
}
-// getInjectedEnvs gets environment variables injected by the auto injector, if
-// present. The auto injector creates a memfd file with a specific name into which
-// it writes the environment variables. In order to find the correct file, we
+// findInjectorFile searches for the injector file in the process open file descriptors.
+// In order to find the correct file, we
// need to iterate the list of files (named after file descriptor numbers) in
// /proc/$PID/fd and get the name from the target of the symbolic link.
//
@@ -59,57 +81,75 @@ func readEnvsFile(path string) ([]string, error) {
// lrwx------ 1 foo foo 64 Aug 13 14:24 0 -> /dev/pts/6
// lrwx------ 1 foo foo 64 Aug 13 14:24 1 -> /dev/pts/6
// lrwx------ 1 foo foo 64 Aug 13 14:24 2 -> /dev/pts/6
-// lrwx------ 1 foo foo 64 Aug 13 14:24 3 -> '/memfd:dd_environ (deleted)'
+// lrwx------ 1 foo foo 64 Aug 13 14:24 3 -> '/dd_process_inject_info.msgpac (deleted)'
// ```
-func getInjectedEnvs(proc *process.Process) []string {
+func findInjectorFile(proc *process.Process) (string, bool) {
fdsPath := kernel.HostProc(strconv.Itoa(int(proc.Pid)), "fd")
- entries, err := os.ReadDir(fdsPath)
+ // quick path, the shadow file is the first opened file by the process
+ // unless there are inherited fds
+ path := filepath.Join(fdsPath, "3")
+ if isInjectorFile(path) {
+ return path, true
+ }
+ fdDir, err := os.Open(fdsPath)
if err != nil {
- return nil
+ log.Warnf("failed to open %s: %s", fdsPath, err)
+ return "", false
}
-
- for _, entry := range entries {
- path := filepath.Join(fdsPath, entry.Name())
- name, err := os.Readlink(path)
- if err != nil {
- continue
- }
-
- if name != injectorMemFdPath {
+ defer fdDir.Close()
+ fds, err := fdDir.Readdirnames(-1)
+ if err != nil {
+ log.Warnf("failed to read %s: %s", fdsPath, err)
+ return "", false
+ }
+ for _, fd := range fds {
+ switch fd {
+ case "0", "1", "2", "3":
continue
+ default:
+ path := filepath.Join(fdsPath, fd)
+ if isInjectorFile(path) {
+ return path, true
+ }
}
-
- envs, _ := readEnvsFile(path)
- return envs
}
+ return "", false
+}
- return nil
+func isInjectorFile(path string) bool {
+ name, err := os.Readlink(path)
+ if err != nil {
+ return false
+ }
+ return strings.HasPrefix(name, injectorMemFdPath)
}
-// envsToMap splits a list of strings containing environment variables of the
+// addEnvToMap splits a list of strings containing environment variables of the
// format NAME=VAL to a map.
-func envsToMap(envs ...string) map[string]string {
- envMap := make(map[string]string, len(envs))
- for _, env := range envs {
- name, val, found := strings.Cut(env, "=")
- if !found {
- continue
- }
-
- envMap[name] = val
+func addEnvToMap(env string, envs map[string]string) {
+ name, val, found := strings.Cut(env, "=")
+ if found {
+ envs[name] = val
}
-
- return envMap
}
// getEnvs gets the environment variables for the process, both the initial
// ones, and if present, the ones injected via the auto injector.
func getEnvs(proc *process.Process) (map[string]string, error) {
- envs, err := proc.Environ()
+ procEnvs, err := proc.Environ()
if err != nil {
return nil, err
}
-
- envs = append(envs, getInjectedEnvs(proc)...)
- return envsToMap(envs...), nil
+ envs := make(map[string]string, len(procEnvs))
+ for _, env := range procEnvs {
+ addEnvToMap(env, envs)
+ }
+ injectionMeta, ok := getInjectionMeta(proc)
+ if !ok {
+ return envs, nil
+ }
+ for _, env := range injectionMeta.InjectedEnv {
+ addEnvToMap(string(env), envs)
+ }
+ return envs, nil
}
diff --git a/pkg/collector/corechecks/servicediscovery/module/envs_test.go b/pkg/collector/corechecks/servicediscovery/module/envs_test.go
index 7ef168b963f67..9a56755c097b6 100644
--- a/pkg/collector/corechecks/servicediscovery/module/envs_test.go
+++ b/pkg/collector/corechecks/servicediscovery/module/envs_test.go
@@ -8,7 +8,6 @@
package module
import (
- "bytes"
"fmt"
"os"
"strings"
@@ -23,8 +22,9 @@ func TestInjectedEnvBasic(t *testing.T) {
curPid := os.Getpid()
proc, err := process.NewProcess(int32(curPid))
require.NoError(t, err)
- envs := getInjectedEnvs(proc)
- require.Nil(t, envs)
+ injectionMeta, ok := getInjectionMeta(proc)
+ require.Nil(t, injectionMeta)
+ require.False(t, ok)
// Provide an injected replacement for some already-present env variable
first := os.Environ()[0]
@@ -49,12 +49,10 @@ func TestInjectedEnvLimit(t *testing.T) {
full := []string{env}
createEnvsMemfd(t, full)
- expected := []string{full[0][:memFdMaxSize]}
-
proc, err := process.NewProcess(int32(os.Getpid()))
require.NoError(t, err)
- envs := getInjectedEnvs(proc)
- require.Equal(t, expected, envs)
+ _, ok := getInjectionMeta(proc)
+ require.False(t, ok)
}
// createEnvsMemfd creates an memfd in the current process with the specified
@@ -62,16 +60,14 @@ func TestInjectedEnvLimit(t *testing.T) {
func createEnvsMemfd(t *testing.T, envs []string) {
t.Helper()
- var b bytes.Buffer
+ var injectionMeta InjectedProcess
for _, env := range envs {
- _, err := b.WriteString(env)
- require.NoError(t, err)
-
- err = b.WriteByte(0)
- require.NoError(t, err)
+ injectionMeta.InjectedEnv = append(injectionMeta.InjectedEnv, []byte(env))
}
+ encodedInjectionMeta, err := injectionMeta.MarshalMsg(nil)
+ require.NoError(t, err)
- memfd, err := memfile(injectorMemFdName, b.Bytes())
+ memfd, err := memfile(injectorMemFdName, encodedInjectionMeta)
require.NoError(t, err)
t.Cleanup(func() { unix.Close(memfd) })
}
diff --git a/pkg/collector/corechecks/servicediscovery/module/impl_linux.go b/pkg/collector/corechecks/servicediscovery/module/impl_linux.go
index 690b74e5e6e7d..009b3ace69758 100644
--- a/pkg/collector/corechecks/servicediscovery/module/impl_linux.go
+++ b/pkg/collector/corechecks/servicediscovery/module/impl_linux.go
@@ -7,12 +7,14 @@ package module
import (
"bufio"
+ "cmp"
"errors"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
+ "slices"
"strconv"
"strings"
"sync"
@@ -28,6 +30,7 @@ import (
"github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/model"
"github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/usm"
"github.com/DataDog/datadog-agent/pkg/languagedetection/privileged"
+ "github.com/DataDog/datadog-agent/pkg/network"
"github.com/DataDog/datadog-agent/pkg/process/procutil"
"github.com/DataDog/datadog-agent/pkg/util/kernel"
"github.com/DataDog/datadog-agent/pkg/util/log"
@@ -43,12 +46,14 @@ var _ module.Module = &discovery{}
// serviceInfo holds process data that should be cached between calls to the
// endpoint.
type serviceInfo struct {
- name string
- nameFromDDService bool
+ generatedName string
+ ddServiceName string
+ ddServiceInjected bool
language language.Language
apmInstrumentation apm.Instrumentation
cmdLine []string
startTimeSecs uint64
+ cpuTime uint64
}
// discovery is an implementation of the Module interface for the discovery module.
@@ -62,6 +67,10 @@ type discovery struct {
// scrubber is used to remove potentially sensitive data from the command line
scrubber *procutil.DataScrubber
+
+ // lastGlobalCPUTime stores the total cpu time of the system from the last time
+ // the endpoint was called.
+ lastGlobalCPUTime uint64
}
// NewDiscoveryModule creates a new discovery system probe module.
@@ -224,7 +233,7 @@ func parseNetIPSocketLine(fields []string, expectedState uint64) (uint64, uint16
// newNetIPSocket reads the content of the provided file and returns a map of socket inodes to ports.
// Based on newNetIPSocket() in net_ip_socket.go from github.com/prometheus/procfs
-func newNetIPSocket(file string, expectedState uint64) (map[uint64]uint16, error) {
+func newNetIPSocket(file string, expectedState uint64, shouldIgnore func(uint16) bool) (map[uint64]uint16, error) {
f, err := os.Open(file)
if err != nil {
return nil, err
@@ -242,6 +251,11 @@ func newNetIPSocket(file string, expectedState uint64) (map[uint64]uint16, error
if err != nil {
continue
}
+
+ if shouldIgnore != nil && shouldIgnore(port) {
+ continue
+ }
+
netIPSocket[inode] = port
}
if err := s.Err(); err != nil {
@@ -254,19 +268,31 @@ func newNetIPSocket(file string, expectedState uint64) (map[uint64]uint16, error
// protocols for the provided namespace. Based on snapshotBoundSockets() in
// pkg/security/security_profile/activity_tree/process_node_snapshot.go.
func getNsInfo(pid int) (*namespaceInfo, error) {
- tcp, err := newNetIPSocket(kernel.HostProc(fmt.Sprintf("%d/net/tcp", pid)), tcpListen)
+ // Don't ignore ephemeral ports on TCP, unlike on UDP (see below).
+ var noIgnore func(uint16) bool
+ tcp, err := newNetIPSocket(kernel.HostProc(fmt.Sprintf("%d/net/tcp", pid)), tcpListen, noIgnore)
if err != nil {
log.Debugf("couldn't snapshot TCP sockets: %v", err)
}
- udp, err := newNetIPSocket(kernel.HostProc(fmt.Sprintf("%d/net/udp", pid)), udpListen)
+ udp, err := newNetIPSocket(kernel.HostProc(fmt.Sprintf("%d/net/udp", pid)), udpListen,
+ func(port uint16) bool {
+ // As in NPM (see initializePortBind() in
+ // pkg/network/tracer/connection): Ignore ephemeral port binds on
+ // UDP as they are more likely to be from clients calling bind with
+ // port 0.
+ return network.IsPortInEphemeralRange(network.AFINET, network.UDP, port) == network.EphemeralTrue
+ })
if err != nil {
log.Debugf("couldn't snapshot UDP sockets: %v", err)
}
- tcpv6, err := newNetIPSocket(kernel.HostProc(fmt.Sprintf("%d/net/tcp6", pid)), tcpListen)
+ tcpv6, err := newNetIPSocket(kernel.HostProc(fmt.Sprintf("%d/net/tcp6", pid)), tcpListen, noIgnore)
if err != nil {
log.Debugf("couldn't snapshot TCP6 sockets: %v", err)
}
- udpv6, err := newNetIPSocket(kernel.HostProc(fmt.Sprintf("%d/net/udp6", pid)), udpListen)
+ udpv6, err := newNetIPSocket(kernel.HostProc(fmt.Sprintf("%d/net/udp6", pid)), udpListen,
+ func(port uint16) bool {
+ return network.IsPortInEphemeralRange(network.AFINET6, network.UDP, port) == network.EphemeralTrue
+ })
if err != nil {
log.Debugf("couldn't snapshot UDP6 sockets: %v", err)
}
@@ -287,8 +313,9 @@ func getNsInfo(pid int) (*namespaceInfo, error) {
// parsingContext holds temporary context not preserved between invocations of
// the endpoint.
type parsingContext struct {
- procRoot string
- netNsInfo map[uint32]*namespaceInfo
+ procRoot string
+ netNsInfo map[uint32]*namespaceInfo
+ globalCPUTime uint64
}
// getServiceInfo gets the service information for a process using the
@@ -317,18 +344,19 @@ func (s *discovery) getServiceInfo(proc *process.Process) (*serviceInfo, error)
contextMap := make(usm.DetectorContextMap)
root := kernel.HostProc(strconv.Itoa(int(proc.Pid)), "root")
- name, fromDDService := servicediscovery.GetServiceName(cmdline, envs, root, contextMap)
lang := language.FindInArgs(exe, cmdline)
if lang == "" {
lang = language.FindUsingPrivilegedDetector(s.privilegedDetector, proc.Pid)
}
+ nameMeta := servicediscovery.GetServiceName(cmdline, envs, root, lang, contextMap)
apmInstrumentation := apm.Detect(int(proc.Pid), cmdline, envs, lang, contextMap)
return &serviceInfo{
- name: name,
+ generatedName: nameMeta.Name,
+ ddServiceName: nameMeta.DDService,
language: lang,
apmInstrumentation: apmInstrumentation,
- nameFromDDService: fromDDService,
+ ddServiceInjected: nameMeta.DDServiceInjected,
cmdLine: sanitizeCmdLine(s.scrubber, cmdline),
startTimeSecs: uint64(createTime / 1000),
}, nil
@@ -364,6 +392,10 @@ var ignoreComms = map[string]struct{}{
"docker-proxy": {},
}
+// maxNumberOfPorts is the maximum number of listening ports which we report per
+// service.
+const maxNumberOfPorts = 50
+
// getService gets information for a single service.
func (s *discovery) getService(context parsingContext, pid int32) *model.Service {
proc, err := customNewProcess(pid)
@@ -425,6 +457,16 @@ func (s *discovery) getService(context parsingContext, pid int32) *model.Service
return nil
}
+ if len(ports) > maxNumberOfPorts {
+ // Sort the list so that non-ephemeral ports are given preference when
+ // we trim the list.
+ portCmp := func(a, b uint16) int {
+ return cmp.Compare(a, b)
+ }
+ slices.SortFunc(ports, portCmp)
+ ports = ports[:maxNumberOfPorts]
+ }
+
rss, err := getRSS(proc)
if err != nil {
return nil
@@ -447,21 +489,29 @@ func (s *discovery) getService(context parsingContext, pid int32) *model.Service
s.mux.Unlock()
}
- nameSource := "generated"
- if info.nameFromDDService {
- nameSource = "provided"
+ name := info.ddServiceName
+ if name == "" {
+ name = info.generatedName
+ }
+
+ cpu, err := updateCPUCoresStats(proc, info, s.lastGlobalCPUTime, context.globalCPUTime)
+ if err != nil {
+ return nil
}
return &model.Service{
PID: int(pid),
- Name: info.name,
- NameSource: nameSource,
+ Name: name,
+ GeneratedName: info.generatedName,
+ DDService: info.ddServiceName,
+ DDServiceInjected: info.ddServiceInjected,
Ports: ports,
APMInstrumentation: string(info.apmInstrumentation),
Language: string(info.language),
RSS: rss,
CommandLine: info.cmdLine,
StartTimeSecs: info.startTimeSecs,
+ CPUCores: cpu,
}
}
@@ -488,9 +538,15 @@ func (s *discovery) getServices() (*[]model.Service, error) {
return nil, err
}
+ globalCPUTime, err := getGlobalCPUTime()
+ if err != nil {
+ return nil, err
+ }
+
context := parsingContext{
- procRoot: procRoot,
- netNsInfo: make(map[uint32]*namespaceInfo),
+ procRoot: procRoot,
+ netNsInfo: make(map[uint32]*namespaceInfo),
+ globalCPUTime: globalCPUTime,
}
var services []model.Service
@@ -508,6 +564,7 @@ func (s *discovery) getServices() (*[]model.Service, error) {
}
s.cleanCache(alivePids)
+ s.lastGlobalCPUTime = context.globalCPUTime
return &services, nil
}
diff --git a/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go b/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go
index 8028da0225c70..ba93cf883c933 100644
--- a/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go
+++ b/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go
@@ -20,6 +20,7 @@ import (
"path/filepath"
"regexp"
"runtime"
+ "slices"
"strconv"
"strings"
"syscall"
@@ -44,6 +45,7 @@ import (
"github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/apm"
"github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/language"
"github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/model"
+ "github.com/DataDog/datadog-agent/pkg/network"
"github.com/DataDog/datadog-agent/pkg/network/protocols/http/testutil"
protocolUtils "github.com/DataDog/datadog-agent/pkg/network/protocols/testutil"
"github.com/DataDog/datadog-agent/pkg/network/protocols/tls/nodejs"
@@ -110,8 +112,8 @@ func getServicesMap(t *testing.T, url string) map[int]model.Service {
return servicesMap
}
-func startTCPServer(t *testing.T, proto string) (*os.File, *net.TCPAddr) {
- listener, err := net.Listen(proto, "")
+func startTCPServer(t *testing.T, proto string, address string) (*os.File, *net.TCPAddr) {
+ listener, err := net.Listen(proto, address)
require.NoError(t, err)
t.Cleanup(func() { _ = listener.Close() })
tcpAddr := listener.Addr().(*net.TCPAddr)
@@ -135,8 +137,8 @@ func startTCPClient(t *testing.T, proto string, server *net.TCPAddr) (*os.File,
return f, client.LocalAddr().(*net.TCPAddr)
}
-func startUDPServer(t *testing.T, proto string) (*os.File, *net.UDPAddr) {
- lnPacket, err := net.ListenPacket(proto, "")
+func startUDPServer(t *testing.T, proto string, address string) (*os.File, *net.UDPAddr) {
+ lnPacket, err := net.ListenPacket(proto, address)
require.NoError(t, err)
t.Cleanup(func() { _ = lnPacket.Close() })
@@ -189,7 +191,7 @@ func TestBasic(t *testing.T) {
expectedPorts := make(map[int]int)
var startTCP = func(proto string) {
- f, server := startTCPServer(t, proto)
+ f, server := startTCPServer(t, proto, "")
cmd := startProcessWithFile(t, f)
expectedPIDs = append(expectedPIDs, cmd.Process.Pid)
expectedPorts[cmd.Process.Pid] = server.Port
@@ -200,7 +202,7 @@ func TestBasic(t *testing.T) {
}
var startUDP = func(proto string) {
- f, server := startUDPServer(t, proto)
+ f, server := startUDPServer(t, proto, ":8083")
cmd := startProcessWithFile(t, f)
expectedPIDs = append(expectedPIDs, cmd.Process.Pid)
expectedPorts[cmd.Process.Pid] = server.Port
@@ -241,7 +243,7 @@ func TestPorts(t *testing.T) {
var unexpectedPorts []uint16
var startTCP = func(proto string) {
- serverf, server := startTCPServer(t, proto)
+ serverf, server := startTCPServer(t, proto, "")
t.Cleanup(func() { serverf.Close() })
clientf, client := startTCPClient(t, proto, server)
t.Cleanup(func() { clientf.Close() })
@@ -251,13 +253,17 @@ func TestPorts(t *testing.T) {
}
var startUDP = func(proto string) {
- serverf, server := startUDPServer(t, proto)
+ serverf, server := startUDPServer(t, proto, ":8083")
t.Cleanup(func() { _ = serverf.Close() })
clientf, client := startUDPClient(t, proto, server)
t.Cleanup(func() { clientf.Close() })
expectedPorts = append(expectedPorts, uint16(server.Port))
unexpectedPorts = append(unexpectedPorts, uint16(client.Port))
+
+ ephemeralf, ephemeral := startUDPServer(t, proto, "")
+ t.Cleanup(func() { _ = ephemeralf.Close() })
+ unexpectedPorts = append(unexpectedPorts, uint16(ephemeral.Port))
}
startTCP("tcp4")
@@ -276,6 +282,40 @@ func TestPorts(t *testing.T) {
}
}
+func TestPortsLimits(t *testing.T) {
+ url := setupDiscoveryModule(t)
+
+ var expectedPorts []int
+
+ var openPort = func(address string) {
+ serverf, server := startTCPServer(t, "tcp4", address)
+ t.Cleanup(func() { serverf.Close() })
+
+ expectedPorts = append(expectedPorts, server.Port)
+ }
+
+ openPort("127.0.0.1:8081")
+
+ for i := 0; i < maxNumberOfPorts; i++ {
+ openPort("")
+ }
+
+ openPort("127.0.0.1:8082")
+
+ slices.Sort(expectedPorts)
+
+ serviceMap := getServicesMap(t, url)
+ pid := os.Getpid()
+ require.Contains(t, serviceMap, pid)
+ ports := serviceMap[pid].Ports
+ assert.Contains(t, ports, uint16(8081))
+ assert.Contains(t, ports, uint16(8082))
+ assert.Len(t, ports, maxNumberOfPorts)
+ for i := 0; i < maxNumberOfPorts-2; i++ {
+ assert.Contains(t, ports, uint16(expectedPorts[i]))
+ }
+}
+
func TestServiceName(t *testing.T) {
url := setupDiscoveryModule(t)
@@ -295,7 +335,7 @@ func TestServiceName(t *testing.T) {
cmd := exec.CommandContext(ctx, "sleep", "1000")
cmd.Dir = "/tmp/"
cmd.Env = append(cmd.Env, "OTHER_ENV=test")
- cmd.Env = append(cmd.Env, "DD_SERVICE=foobar")
+ cmd.Env = append(cmd.Env, "DD_SERVICE=foo😀bar")
cmd.Env = append(cmd.Env, "YET_OTHER_ENV=test")
err = cmd.Start()
require.NoError(t, err)
@@ -306,8 +346,11 @@ func TestServiceName(t *testing.T) {
require.EventuallyWithT(t, func(collect *assert.CollectT) {
portMap := getServicesMap(t, url)
assert.Contains(collect, portMap, pid)
- assert.Equal(t, "foobar", portMap[pid].Name)
- assert.Equal(t, "provided", portMap[pid].NameSource)
+ // Non-ASCII character removed due to normalization.
+ assert.Equal(t, "foo_bar", portMap[pid].DDService)
+ assert.Equal(t, portMap[pid].DDService, portMap[pid].Name)
+ assert.Equal(t, "sleep", portMap[pid].GeneratedName)
+ assert.False(t, portMap[pid].DDServiceInjected)
}, 30*time.Second, 100*time.Millisecond)
}
@@ -328,8 +371,13 @@ func TestInjectedServiceName(t *testing.T) {
pid := os.Getpid()
portMap := getServicesMap(t, url)
require.Contains(t, portMap, pid)
- require.Equal(t, "injected-service-name", portMap[pid].Name)
- require.Equal(t, "generated", portMap[pid].NameSource)
+ require.Equal(t, "injected-service-name", portMap[pid].DDService)
+ require.Equal(t, portMap[pid].DDService, portMap[pid].Name)
+ // The GeneratedName can vary depending on how the tests are run, so don't
+ // assert for a specific value.
+ require.NotEmpty(t, portMap[pid].GeneratedName)
+ require.NotEqual(t, portMap[pid].DDService, portMap[pid].GeneratedName)
+ assert.True(t, portMap[pid].DDServiceInjected)
}
func TestAPMInstrumentationInjected(t *testing.T) {
@@ -477,6 +525,7 @@ func TestAPMInstrumentationProvided(t *testing.T) {
assert.Equal(collect, string(test.language), portMap[pid].Language)
assert.Equal(collect, string(apm.Provided), portMap[pid].APMInstrumentation)
assertStat(t, portMap[pid])
+ assertCPU(t, url, pid)
}, 30*time.Second, 100*time.Millisecond)
})
}
@@ -505,6 +554,23 @@ func assertStat(t assert.TestingT, svc model.Service) {
assert.Equal(t, uint64(createTimeMs/1000), svc.StartTimeSecs)
}
+func assertCPU(t *testing.T, url string, pid int) {
+ proc, err := process.NewProcess(int32(pid))
+ require.NoError(t, err, "could not create gopsutil process handle")
+
+ // Compare CPU usage measurement over an interval.
+ _ = getServicesMap(t, url)
+ referenceValue, err := proc.Percent(1 * time.Second)
+ require.NoError(t, err, "could not get gopsutil cpu usage value")
+
+ // Calling getServicesMap a second time us the CPU usage percentage since the last call, which should be close to gopsutil value.
+ portMap := getServicesMap(t, url)
+ assert.Contains(t, portMap, pid)
+ // gopsutil reports a percentage, while we are reporting a float between 0 and $(nproc),
+ // so we convert our value to a percentage.
+ assert.InDelta(t, referenceValue, portMap[pid].CPUCores*100, 10)
+}
+
func TestCommandLineSanitization(t *testing.T) {
serverDir := buildFakeServer(t)
url := setupDiscoveryModule(t)
@@ -579,9 +645,12 @@ func TestNodeDocker(t *testing.T) {
require.EventuallyWithT(t, func(collect *assert.CollectT) {
svcMap := getServicesMap(t, url)
assert.Contains(collect, svcMap, pid)
- assert.Equal(collect, "nodejs-https-server", svcMap[pid].Name)
+ // test@... changed to test_... due to normalization.
+ assert.Equal(collect, "test_nodejs-https-server", svcMap[pid].GeneratedName)
+ assert.Equal(collect, svcMap[pid].GeneratedName, svcMap[pid].Name)
assert.Equal(collect, "provided", svcMap[pid].APMInstrumentation)
assertStat(collect, svcMap[pid])
+ assertCPU(t, url, pid)
}, 30*time.Second, 100*time.Millisecond)
}
@@ -733,7 +802,7 @@ func TestCache(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(func() { cancel() })
- f, _ := startTCPServer(t, "tcp4")
+ f, _ := startTCPServer(t, "tcp4", "")
defer f.Close()
disableCloseOnExec(t, f)
@@ -766,8 +835,8 @@ func TestCache(t *testing.T) {
for i, cmd := range cmds {
pid := int32(cmd.Process.Pid)
- require.Contains(t, discovery.cache[pid].name, serviceNames[i])
- require.True(t, discovery.cache[pid].nameFromDDService)
+ require.Equal(t, serviceNames[i], discovery.cache[pid].ddServiceName)
+ require.False(t, discovery.cache[pid].ddServiceInjected)
}
cancel()
@@ -889,12 +958,17 @@ func BenchmarkOldGetSockets(b *testing.B) {
}
// addSockets adds only listening sockets to a map to be used for later looksups.
-func addSockets[P procfs.NetTCP | procfs.NetUDP](sockMap map[uint64]socketInfo, sockets P, state uint64) {
+func addSockets[P procfs.NetTCP | procfs.NetUDP](sockMap map[uint64]socketInfo, sockets P,
+ family network.ConnectionFamily, ctype network.ConnectionType, state uint64) {
for _, sock := range sockets {
if sock.St != state {
continue
}
- sockMap[sock.Inode] = socketInfo{port: uint16(sock.LocalPort)}
+ port := uint16(sock.LocalPort)
+ if state == udpListen && network.IsPortInEphemeralRange(family, ctype, port) == network.EphemeralTrue {
+ continue
+ }
+ sockMap[sock.Inode] = socketInfo{port: port}
}
}
@@ -912,10 +986,10 @@ func getNsInfoOld(pid int) (*namespaceInfo, error) {
listeningSockets := make(map[uint64]socketInfo)
- addSockets(listeningSockets, TCP, tcpListen)
- addSockets(listeningSockets, TCP6, tcpListen)
- addSockets(listeningSockets, UDP, udpListen)
- addSockets(listeningSockets, UDP6, udpListen)
+ addSockets(listeningSockets, TCP, network.AFINET, network.TCP, tcpListen)
+ addSockets(listeningSockets, TCP6, network.AFINET6, network.TCP, tcpListen)
+ addSockets(listeningSockets, UDP, network.AFINET, network.UDP, udpListen)
+ addSockets(listeningSockets, UDP6, network.AFINET6, network.UDP, udpListen)
return &namespaceInfo{
listeningSockets: listeningSockets,
diff --git a/pkg/collector/corechecks/servicediscovery/module/injected_process.go b/pkg/collector/corechecks/servicediscovery/module/injected_process.go
new file mode 100644
index 0000000000000..5c99b0775e5ee
--- /dev/null
+++ b/pkg/collector/corechecks/servicediscovery/module/injected_process.go
@@ -0,0 +1,18 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024-present Datadog, Inc.
+
+//go:generate go run github.com/tinylib/msgp -io=false
+
+package module
+
+// InjectedProcess represents the data injected by the auto-injector into the
+// process.
+type InjectedProcess struct {
+ LocalHostname string `msgp:"local_hostname"`
+ InjectedEnv [][]byte `msgp:"injected_envs"`
+ LanguageName string `msgp:"language_name"`
+ TracerVersion string `msgp:"tracer_version"`
+ InjectorVersion string `msgp:"injector_version"`
+}
diff --git a/pkg/collector/corechecks/servicediscovery/module/injected_process_gen.go b/pkg/collector/corechecks/servicediscovery/module/injected_process_gen.go
new file mode 100644
index 0000000000000..6ecd5ef86d4c4
--- /dev/null
+++ b/pkg/collector/corechecks/servicediscovery/module/injected_process_gen.go
@@ -0,0 +1,115 @@
+package module
+
+// Code generated by github.com/tinylib/msgp DO NOT EDIT.
+
+import (
+ "github.com/tinylib/msgp/msgp"
+)
+
+// MarshalMsg implements msgp.Marshaler
+func (z *InjectedProcess) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 5
+ // string "LocalHostname"
+ o = append(o, 0x85, 0xad, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65)
+ o = msgp.AppendString(o, z.LocalHostname)
+ // string "InjectedEnv"
+ o = append(o, 0xab, 0x49, 0x6e, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x45, 0x6e, 0x76)
+ o = msgp.AppendArrayHeader(o, uint32(len(z.InjectedEnv)))
+ for za0001 := range z.InjectedEnv {
+ o = msgp.AppendBytes(o, z.InjectedEnv[za0001])
+ }
+ // string "LanguageName"
+ o = append(o, 0xac, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65)
+ o = msgp.AppendString(o, z.LanguageName)
+ // string "TracerVersion"
+ o = append(o, 0xad, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
+ o = msgp.AppendString(o, z.TracerVersion)
+ // string "InjectorVersion"
+ o = append(o, 0xaf, 0x49, 0x6e, 0x6a, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
+ o = msgp.AppendString(o, z.InjectorVersion)
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *InjectedProcess) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 uint32
+ zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "LocalHostname":
+ z.LocalHostname, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "LocalHostname")
+ return
+ }
+ case "InjectedEnv":
+ var zb0002 uint32
+ zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "InjectedEnv")
+ return
+ }
+ if cap(z.InjectedEnv) >= int(zb0002) {
+ z.InjectedEnv = (z.InjectedEnv)[:zb0002]
+ } else {
+ z.InjectedEnv = make([][]byte, zb0002)
+ }
+ for za0001 := range z.InjectedEnv {
+ z.InjectedEnv[za0001], bts, err = msgp.ReadBytesBytes(bts, z.InjectedEnv[za0001])
+ if err != nil {
+ err = msgp.WrapError(err, "InjectedEnv", za0001)
+ return
+ }
+ }
+ case "LanguageName":
+ z.LanguageName, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "LanguageName")
+ return
+ }
+ case "TracerVersion":
+ z.TracerVersion, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "TracerVersion")
+ return
+ }
+ case "InjectorVersion":
+ z.InjectorVersion, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "InjectorVersion")
+ return
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *InjectedProcess) Msgsize() (s int) {
+ s = 1 + 14 + msgp.StringPrefixSize + len(z.LocalHostname) + 12 + msgp.ArrayHeaderSize
+ for za0001 := range z.InjectedEnv {
+ s += msgp.BytesPrefixSize + len(z.InjectedEnv[za0001])
+ }
+ s += 13 + msgp.StringPrefixSize + len(z.LanguageName) + 14 + msgp.StringPrefixSize + len(z.TracerVersion) + 16 + msgp.StringPrefixSize + len(z.InjectorVersion)
+ return
+}
diff --git a/pkg/collector/corechecks/servicediscovery/module/injected_process_gen_test.go b/pkg/collector/corechecks/servicediscovery/module/injected_process_gen_test.go
new file mode 100644
index 0000000000000..dbbe388c42a8b
--- /dev/null
+++ b/pkg/collector/corechecks/servicediscovery/module/injected_process_gen_test.go
@@ -0,0 +1,67 @@
+package module
+
+// Code generated by github.com/tinylib/msgp DO NOT EDIT.
+
+import (
+ "testing"
+
+ "github.com/tinylib/msgp/msgp"
+)
+
+func TestMarshalUnmarshalInjectedProcess(t *testing.T) {
+ v := InjectedProcess{}
+ bts, err := v.MarshalMsg(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func BenchmarkMarshalMsgInjectedProcess(b *testing.B) {
+ v := InjectedProcess{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgInjectedProcess(b *testing.B) {
+ v := InjectedProcess{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts, _ = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts, _ = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalInjectedProcess(b *testing.B) {
+ v := InjectedProcess{}
+ bts, _ := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/pkg/collector/corechecks/servicediscovery/module/stat.go b/pkg/collector/corechecks/servicediscovery/module/stat.go
index 4e12e840d741c..25077dc6d75a9 100644
--- a/pkg/collector/corechecks/servicediscovery/module/stat.go
+++ b/pkg/collector/corechecks/servicediscovery/module/stat.go
@@ -8,8 +8,11 @@
package module
import (
+ "bufio"
+ "bytes"
"errors"
"os"
+ "runtime"
"strconv"
"strings"
@@ -47,3 +50,79 @@ func getRSS(proc *process.Process) (uint64, error) {
return rssPages * pageSize, nil
}
+
+func getGlobalCPUTime() (uint64, error) {
+ globalStatPath := kernel.HostProc("stat")
+
+ // This file is very small so just read it fully.
+ file, err := os.Open(globalStatPath)
+ if err != nil {
+ return 0, err
+ }
+ defer file.Close()
+
+ scanner := bufio.NewScanner(file)
+ // Try to read the first line; it contains all the info we need.
+ if !scanner.Scan() {
+ return 0, scanner.Err()
+ }
+
+ // See proc(5) for a description of the format of statm and the fields.
+ fields := strings.Fields(scanner.Text())
+ if fields[0] != "cpu" {
+ return 0, errors.New("invalid /proc/stat file")
+ }
+
+ var totalTime uint64
+ for _, field := range fields[1:] {
+ val, err := strconv.ParseUint(field, 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ totalTime += val
+ }
+
+ return totalTime, nil
+}
+
+func updateCPUCoresStats(proc *process.Process, info *serviceInfo, lastGlobalCPUTime, currentGlobalCPUTime uint64) (float64, error) {
+ statPath := kernel.HostProc(strconv.Itoa(int(proc.Pid)), "stat")
+
+ // This file is very small so just read it fully.
+ content, err := os.ReadFile(statPath)
+ if err != nil {
+ return 0, err
+ }
+
+ startIndex := bytes.LastIndexByte(content, byte(')'))
+ if startIndex == -1 || startIndex+1 >= len(content) {
+ return 0, errors.New("invalid stat format")
+ }
+
+ // See proc(5) for a description of the format of statm and the fields.
+ fields := strings.Fields(string(content[startIndex+1:]))
+ if len(fields) < 50 {
+ return 0, errors.New("invalid stat format")
+ }
+
+ // Parse fields number 14 and 15, resp. User and System CPU time.
+ // See proc_pid_stat(5), for details.
+ // Here we address 11 & 12 since we skipped the first two fields.
+ usrTime, err := strconv.ParseUint(fields[11], 10, 64)
+ if err != nil {
+ return 0, err
+ }
+
+ sysTime, err := strconv.ParseUint(fields[12], 10, 64)
+ if err != nil {
+ return 0, err
+ }
+
+ processTimeDelta := float64(usrTime + sysTime - info.cpuTime)
+ globalTimeDelta := float64(currentGlobalCPUTime - lastGlobalCPUTime)
+ cpuUsage := processTimeDelta / globalTimeDelta * float64(runtime.NumCPU())
+
+ info.cpuTime = usrTime + sysTime
+
+ return cpuUsage, nil
+}
diff --git a/pkg/collector/corechecks/servicediscovery/service_detector.go b/pkg/collector/corechecks/servicediscovery/service_detector.go
index 529c8c92a64e7..f403ec3bbde4a 100644
--- a/pkg/collector/corechecks/servicediscovery/service_detector.go
+++ b/pkg/collector/corechecks/servicediscovery/service_detector.go
@@ -9,7 +9,9 @@ import (
"slices"
"strings"
+ "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/language"
"github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/usm"
+ "github.com/DataDog/datadog-agent/pkg/trace/traceutil"
)
// ServiceMetadata stores metadata about a service.
@@ -18,7 +20,6 @@ type ServiceMetadata struct {
Language string
Type string
APMInstrumentation string
- NameSource string
}
func fixAdditionalNames(additionalNames []string) []string {
@@ -41,10 +42,27 @@ func makeFinalName(meta usm.ServiceMetadata) string {
return name
}
+// fixupMetadata performs additional adjustments on the meta data returned from
+// the meta data extraction library.
+func fixupMetadata(meta usm.ServiceMetadata, lang language.Language) usm.ServiceMetadata {
+ meta.Name = makeFinalName(meta)
+
+ langName := ""
+ if lang != language.Unknown {
+ langName = string(lang)
+ }
+ meta.Name, _ = traceutil.NormalizeService(meta.Name, langName)
+ if meta.DDService != "" {
+ meta.DDService, _ = traceutil.NormalizeService(meta.DDService, langName)
+ }
+
+ return meta
+}
+
// GetServiceName gets the service name based on the command line arguments and
// the list of environment variables.
-func GetServiceName(cmdline []string, env map[string]string, root string, contextMap usm.DetectorContextMap) (string, bool) {
+func GetServiceName(cmdline []string, env map[string]string, root string, lang language.Language, contextMap usm.DetectorContextMap) usm.ServiceMetadata {
fs := usm.NewSubDirFS(root)
- meta, _ := usm.ExtractServiceMetadata(cmdline, env, fs, contextMap)
- return makeFinalName(meta), meta.FromDDService
+ meta, _ := usm.ExtractServiceMetadata(cmdline, env, fs, lang, contextMap)
+ return fixupMetadata(meta, lang)
}
diff --git a/pkg/collector/corechecks/servicediscovery/service_detector_test.go b/pkg/collector/corechecks/servicediscovery/service_detector_test.go
new file mode 100644
index 0000000000000..5f250161237c4
--- /dev/null
+++ b/pkg/collector/corechecks/servicediscovery/service_detector_test.go
@@ -0,0 +1,32 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024-present Datadog, Inc.
+
+package servicediscovery
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/language"
+ "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/usm"
+)
+
+func TestFixup(t *testing.T) {
+ meta := fixupMetadata(usm.ServiceMetadata{Name: "fOo", DDService: "BAR"}, language.Go)
+ assert.Equal(t, meta.Name, "foo")
+ assert.Equal(t, meta.DDService, "bar")
+
+ meta = fixupMetadata(usm.ServiceMetadata{Name: ""}, language.Go)
+ assert.Equal(t, meta.Name, "unnamed-go-service")
+ assert.Equal(t, meta.DDService, "")
+
+ meta = fixupMetadata(usm.ServiceMetadata{Name: ""}, language.Unknown)
+ assert.Equal(t, meta.Name, "unnamed-service")
+ assert.Equal(t, meta.DDService, "")
+
+ meta = fixupMetadata(usm.ServiceMetadata{Name: "foo", AdditionalNames: []string{"bar", "baz"}}, language.Go)
+ assert.Equal(t, meta.Name, "foo-bar-baz")
+}
diff --git a/pkg/collector/corechecks/servicediscovery/servicediscovery.go b/pkg/collector/corechecks/servicediscovery/servicediscovery.go
index 3e8bf7eb7dab6..d395f28599b6b 100644
--- a/pkg/collector/corechecks/servicediscovery/servicediscovery.go
+++ b/pkg/collector/corechecks/servicediscovery/servicediscovery.go
@@ -58,9 +58,7 @@ type osImpl interface {
DiscoverServices() (*discoveredServices, error)
}
-var (
- newOSImpl func(ignoreCfg map[string]bool) (osImpl, error)
-)
+var newOSImpl func(ignoreCfg map[string]bool) (osImpl, error)
type config struct {
IgnoreProcesses []string `yaml:"ignore_processes"`
@@ -103,9 +101,6 @@ func newCheck() check.Check {
// Configure parses the check configuration and initializes the check
func (c *Check) Configure(senderManager sender.SenderManager, _ uint64, instanceConfig, initConfig integration.Data, source string) error {
- if !pkgconfig.SystemProbe().GetBool("discovery.enabled") {
- return errors.New("service discovery is disabled")
- }
if newOSImpl == nil {
return errors.New("service_discovery check not implemented on " + runtime.GOOS)
}
@@ -137,6 +132,10 @@ func (c *Check) Configure(senderManager sender.SenderManager, _ uint64, instance
// Run executes the check.
func (c *Check) Run() error {
+ if !pkgconfig.SystemProbe().GetBool("discovery.enabled") {
+ return nil
+ }
+
start := time.Now()
defer func() {
diff := time.Since(start).Seconds()
diff --git a/pkg/collector/corechecks/servicediscovery/servicetype/servicetype.go b/pkg/collector/corechecks/servicediscovery/servicetype/servicetype.go
index 30b2b9f597053..a55781aecc01b 100644
--- a/pkg/collector/corechecks/servicediscovery/servicetype/servicetype.go
+++ b/pkg/collector/corechecks/servicediscovery/servicetype/servicetype.go
@@ -1,7 +1,7 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016-present Datadog, Inc.
+// Copyright 2024-present Datadog, Inc.
// Package servicetype provides functionality to detect the service type for a given process.
package servicetype
@@ -84,15 +84,10 @@ var (
80: FrontEnd,
443: FrontEnd,
}
-
- // for now, this is unpopulated, but
- // as we find common service names that are listening on a
- // commonly used port, we can add them here
- nameMap = map[string]ServiceType{}
)
// Detect returns the ServiceType from the provided process information.
-func Detect(name string, ports []uint16) ServiceType {
+func Detect(ports []uint16) ServiceType {
// start with ports
for _, v := range ports {
if st, ok := portMap[v]; ok {
@@ -100,10 +95,6 @@ func Detect(name string, ports []uint16) ServiceType {
}
}
- // next check name
- if st, ok := nameMap[name]; ok {
- return st
- }
// anything else is a webservice
return WebService
diff --git a/pkg/collector/corechecks/servicediscovery/servicetype/servicetype_test.go b/pkg/collector/corechecks/servicediscovery/servicetype/servicetype_test.go
index edbff8dd73934..e895bea26c9bf 100644
--- a/pkg/collector/corechecks/servicediscovery/servicetype/servicetype_test.go
+++ b/pkg/collector/corechecks/servicediscovery/servicetype/servicetype_test.go
@@ -1,7 +1,7 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016-present Datadog, Inc.
+// Copyright 2024-present Datadog, Inc.
package servicetype_test
@@ -14,44 +14,38 @@ import (
func TestDetect(t *testing.T) {
data := []struct {
name string
- serviceName string
ports []uint16
serviceType servicetype.ServiceType
}{
{
name: "redis",
- serviceName: "redis",
ports: []uint16{9443},
serviceType: servicetype.DB,
},
{
name: "mongo",
- serviceName: "mongo",
ports: []uint16{27017, 27018, 27019, 27020},
serviceType: servicetype.DB,
},
{
name: "elastic",
- serviceName: "elastic",
ports: []uint16{9200},
serviceType: servicetype.Storage,
},
{
name: "web",
- serviceName: "apache",
ports: []uint16{80},
serviceType: servicetype.FrontEnd,
},
{
name: "internal",
- serviceName: "myService",
ports: []uint16{8080},
serviceType: servicetype.WebService,
},
}
for _, d := range data {
t.Run(d.name, func(t *testing.T) {
- serviceType := servicetype.Detect(d.serviceName, d.ports)
+ serviceType := servicetype.Detect(d.ports)
if serviceType != d.serviceType {
t.Errorf("expected %v, got %v", d.serviceType, serviceType)
}
diff --git a/pkg/collector/corechecks/servicediscovery/usm/java.go b/pkg/collector/corechecks/servicediscovery/usm/java.go
index 7c017301620e2..6512c30c04029 100644
--- a/pkg/collector/corechecks/servicediscovery/usm/java.go
+++ b/pkg/collector/corechecks/servicediscovery/usm/java.go
@@ -18,12 +18,10 @@ func newJavaDetector(ctx DetectionContext) detector {
return &javaDetector{ctx: ctx}
}
-func (jd javaDetector) detect(args []string) (ServiceMetadata, bool) {
+func (jd javaDetector) detect(args []string) (metadata ServiceMetadata, success bool) {
// Look for dd.service
if index := slices.IndexFunc(args, func(arg string) bool { return strings.HasPrefix(arg, "-Ddd.service=") }); index != -1 {
- metadata := NewServiceMetadata(strings.TrimPrefix(args[index], "-Ddd.service="))
- metadata.FromDDService = true
- return metadata, true
+ metadata.DDService = strings.TrimPrefix(args[index], "-Ddd.service=")
}
prevArgIsFlag := false
var additionalNames []string
@@ -45,30 +43,40 @@ func (jd javaDetector) detect(args []string) (ServiceMetadata, bool) {
// try to see if the application is a spring boot archive and extract its application name
if len(additionalNames) == 0 {
if springAppName, ok := newSpringBootParser(jd.ctx).GetSpringBootAppName(a); ok {
- return NewServiceMetadata(springAppName), true
+ success = true
+ metadata.Name = springAppName
+ return
}
}
- return NewServiceMetadata(arg[:len(arg)-len(javaJarExtension)], additionalNames...), true
+ success = true
+ metadata.SetNames(arg[:len(arg)-len(javaJarExtension)], additionalNames...)
+ return
}
if strings.HasPrefix(arg, javaApachePrefix) {
// take the project name after the package 'org.apache.' while stripping off the remaining package
// and class name
arg = arg[len(javaApachePrefix):]
if idx := strings.Index(arg, "."); idx != -1 {
- return NewServiceMetadata(arg[:idx], additionalNames...), true
+ success = true
+ metadata.SetNames(arg[:idx], additionalNames...)
+ return
}
}
if idx := strings.LastIndex(arg, "."); idx != -1 && idx+1 < len(arg) {
// take just the class name without the package
- return NewServiceMetadata(arg[idx+1:], additionalNames...), true
+ success = true
+ metadata.SetNames(arg[idx+1:], additionalNames...)
+ return
}
- return NewServiceMetadata(arg, additionalNames...), true
+ success = true
+ metadata.SetNames(arg, additionalNames...)
+ return
}
}
prevArgIsFlag = hasFlagPrefix && !includesAssignment && a != javaJarFlag
}
- return ServiceMetadata{}, false
+ return
}
diff --git a/pkg/collector/corechecks/servicediscovery/usm/service.go b/pkg/collector/corechecks/servicediscovery/usm/service.go
index f8bed0ad59541..36ddc7e804d35 100644
--- a/pkg/collector/corechecks/servicediscovery/usm/service.go
+++ b/pkg/collector/corechecks/servicediscovery/usm/service.go
@@ -17,6 +17,8 @@ import (
"slices"
"strings"
"unicode"
+
+ "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/language"
)
type detectorCreatorFn func(ctx DetectionContext) detector
@@ -44,9 +46,10 @@ const (
// ServiceMetadata holds information about a service.
type ServiceMetadata struct {
- Name string
- AdditionalNames []string
- FromDDService bool
+ Name string
+ AdditionalNames []string
+ DDService string
+ DDServiceInjected bool
// for future usage: we can detect also the type, vendor, frameworks, etc
}
@@ -59,6 +62,21 @@ func NewServiceMetadata(name string, additional ...string) ServiceMetadata {
return ServiceMetadata{Name: name, AdditionalNames: additional}
}
+// SetAdditionalNames set additional names for the service
+func (s *ServiceMetadata) SetAdditionalNames(additional ...string) {
+ if len(additional) > 1 {
+ // names are discovered in unpredictable order. We need to keep them sorted if we're going to join them
+ slices.Sort(additional)
+ }
+ s.AdditionalNames = additional
+}
+
+// SetNames sets generated names for the service.
+func (s *ServiceMetadata) SetNames(name string, additional ...string) {
+ s.Name = name
+ s.SetAdditionalNames(additional...)
+}
+
// GetServiceKey returns the key for the service.
func (s ServiceMetadata) GetServiceKey() string {
if len(s.AdditionalNames) > 0 {
@@ -149,38 +167,37 @@ func SizeVerifiedReader(file fs.File) (io.Reader, error) {
return io.LimitReader(file, min(size, maxParseFileSize)), nil
}
-// List of binaries that usually have additional process context of what's running
-var binsWithContext = map[string]detectorCreatorFn{
- "python": newPythonDetector,
- "python2.7": newPythonDetector,
- "python3": newPythonDetector,
- "python3.7": newPythonDetector,
- "ruby2.3": newSimpleDetector,
- "ruby": newSimpleDetector,
- "java": newJavaDetector,
- "sudo": newSimpleDetector,
- "node": newNodeDetector,
- "dotnet": newDotnetDetector,
- "php": newPhpDetector,
- "gunicorn": newGunicornDetector,
-}
-
-func checkForInjectionNaming(envs map[string]string) bool {
- fromDDService := true
+// Map languages to their context detectors
+var languageDetectors = map[language.Language]detectorCreatorFn{
+ language.Python: newPythonDetector,
+ language.Ruby: newSimpleDetector,
+ language.Java: newJavaDetector,
+ language.Node: newNodeDetector,
+ language.DotNet: newDotnetDetector,
+ language.PHP: newPhpDetector,
+}
+
+// Map executables that usually have additional process context of what's
+// running, to context detectors
+var executableDetectors = map[string]detectorCreatorFn{
+ "sudo": newSimpleDetector,
+ "gunicorn": newGunicornDetector,
+}
+
+func serviceNameInjected(envs map[string]string) bool {
if env, ok := envs["DD_INJECTION_ENABLED"]; ok {
values := strings.Split(env, ",")
for _, v := range values {
if v == "service_name" {
- fromDDService = false
- break
+ return true
}
}
}
- return fromDDService
+ return false
}
// ExtractServiceMetadata attempts to detect ServiceMetadata from the given process.
-func ExtractServiceMetadata(args []string, envs map[string]string, fs fs.SubFS, contextMap DetectorContextMap) (ServiceMetadata, bool) {
+func ExtractServiceMetadata(args []string, envs map[string]string, fs fs.SubFS, lang language.Language, contextMap DetectorContextMap) (metadata ServiceMetadata, success bool) {
dc := DetectionContext{
args: args,
envs: envs,
@@ -189,14 +206,15 @@ func ExtractServiceMetadata(args []string, envs map[string]string, fs fs.SubFS,
}
cmd := dc.args
if len(cmd) == 0 || len(cmd[0]) == 0 {
- return ServiceMetadata{}, false
+ return
}
+ // We always return a service name from here on
+ success = true
+
if value, ok := chooseServiceNameFromEnvs(dc.envs); ok {
- metadata := NewServiceMetadata(value)
- // we only want to set FromDDService to true if the name wasn't assigned by injection
- metadata.FromDDService = checkForInjectionNaming(dc.envs)
- return metadata, true
+ metadata.DDService = value
+ metadata.DDServiceInjected = serviceNameInjected(envs)
}
exe := cmd[0]
@@ -219,9 +237,25 @@ func ExtractServiceMetadata(args []string, envs map[string]string, fs fs.SubFS,
exe = normalizeExeName(exe)
- if detectorProvider, ok := binsWithContext[exe]; ok {
- if metadata, ok := detectorProvider(dc).detect(cmd[1:]); ok {
- return metadata, true
+ detectorProvider, ok := executableDetectors[exe]
+ if !ok {
+ detectorProvider, ok = languageDetectors[lang]
+ }
+
+ if ok {
+ langMeta, ok := detectorProvider(dc).detect(cmd[1:])
+
+ // The detector could return a DD Service name (eg. Java, from the
+ // dd.service property), but still fail to generate a service name (ok =
+ // false) so check this first.
+ if langMeta.DDService != "" {
+ metadata.DDService = langMeta.DDService
+ }
+
+ if ok {
+ metadata.Name = langMeta.Name
+ metadata.SetAdditionalNames(langMeta.AdditionalNames...)
+ return
}
}
@@ -230,7 +264,8 @@ func ExtractServiceMetadata(args []string, envs map[string]string, fs fs.SubFS,
exe = exe[:i]
}
- return NewServiceMetadata(exe), true
+ metadata.Name = exe
+ return
}
func removeFilePath(s string) string {
diff --git a/pkg/collector/corechecks/servicediscovery/usm/service_test.go b/pkg/collector/corechecks/servicediscovery/usm/service_test.go
index a0403966e141b..d3e4f2df2b6a3 100644
--- a/pkg/collector/corechecks/servicediscovery/usm/service_test.go
+++ b/pkg/collector/corechecks/servicediscovery/usm/service_test.go
@@ -18,6 +18,7 @@ import (
"github.com/stretchr/testify/require"
+ "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/language"
"github.com/DataDog/datadog-agent/pkg/network/protocols/http/testutil"
)
@@ -50,121 +51,131 @@ func TestExtractServiceMetadata(t *testing.T) {
name string
cmdline []string
envs map[string]string
- expectedServiceTag string
+ lang language.Language
+ expectedGeneratedName string
+ expectedDDService string
expectedAdditionalServices []string
- fromDDService bool
+ ddServiceInjected bool
fs *SubDirFS
skipOnWindows bool
}{
{
- name: "empty",
- cmdline: []string{},
- expectedServiceTag: "",
+ name: "empty",
+ cmdline: []string{},
+ expectedGeneratedName: "",
},
{
- name: "blank",
- cmdline: []string{""},
- expectedServiceTag: "",
+ name: "blank",
+ cmdline: []string{""},
+ expectedGeneratedName: "",
},
{
name: "single arg executable",
cmdline: []string{
"./my-server.sh",
},
- expectedServiceTag: "my-server",
+ expectedGeneratedName: "my-server",
},
{
name: "single arg executable with DD_SERVICE",
cmdline: []string{
"./my-server.sh",
},
- envs: map[string]string{"DD_SERVICE": "my-service"},
- expectedServiceTag: "my-service",
- fromDDService: true,
+ envs: map[string]string{"DD_SERVICE": "my-service"},
+ expectedDDService: "my-service",
+ expectedGeneratedName: "my-server",
},
{
name: "single arg executable with DD_TAGS",
cmdline: []string{
"./my-server.sh",
},
- envs: map[string]string{"DD_TAGS": "service:my-service"},
- expectedServiceTag: "my-service",
- fromDDService: true,
+ envs: map[string]string{"DD_TAGS": "service:my-service"},
+ expectedDDService: "my-service",
+ expectedGeneratedName: "my-server",
},
{
name: "single arg executable with special chars",
cmdline: []string{
"./-my-server.sh-",
},
- expectedServiceTag: "my-server",
+ expectedGeneratedName: "my-server",
},
{
name: "sudo",
cmdline: []string{
"sudo", "-E", "-u", "dog", "/usr/local/bin/myApp", "-items=0,1,2,3", "-foo=bar",
},
- expectedServiceTag: "myApp",
+ expectedGeneratedName: "myApp",
},
{
name: "python flask argument",
cmdline: []string{
"/opt/python/2.7.11/bin/python2.7", "flask", "run", "--host=0.0.0.0",
},
- expectedServiceTag: "flask",
- envs: map[string]string{"PWD": "testdata/python"},
- fs: &subUsmTestData,
+ lang: language.Python,
+ expectedGeneratedName: "flask",
+ envs: map[string]string{"PWD": "testdata/python"},
+ fs: &subUsmTestData,
},
{
name: "python - flask argument in path",
cmdline: []string{
"/opt/python/2.7.11/bin/python2.7", "testdata/python/flask", "run", "--host=0.0.0.0", "--without-threads",
},
- expectedServiceTag: "flask",
- fs: &subUsmTestData,
+ lang: language.Python,
+ expectedGeneratedName: "flask",
+ fs: &subUsmTestData,
},
{
name: "python flask in single argument",
cmdline: []string{
"/opt/python/2.7.11/bin/python2.7 flask run --host=0.0.0.0",
},
- envs: map[string]string{"PWD": "testdata/python"},
- expectedServiceTag: "flask",
- fs: &subUsmTestData,
+ lang: language.Python,
+ envs: map[string]string{"PWD": "testdata/python"},
+ expectedGeneratedName: "flask",
+ fs: &subUsmTestData,
},
{
name: "python - module hello",
cmdline: []string{
"python3", "-m", "hello",
},
- expectedServiceTag: "hello",
+ lang: language.Python,
+ expectedGeneratedName: "hello",
},
{
name: "ruby - td-agent",
cmdline: []string{
"ruby", "/usr/sbin/td-agent", "--log", "/var/log/td-agent/td-agent.log", "--daemon", "/var/run/td-agent/td-agent.pid",
},
- expectedServiceTag: "td-agent",
+ lang: language.Ruby,
+ expectedGeneratedName: "td-agent",
},
{
name: "java using the -jar flag to define the service",
cmdline: []string{
"java", "-Xmx4000m", "-Xms4000m", "-XX:ReservedCodeCacheSize=256m", "-jar", "/opt/sheepdog/bin/myservice.jar",
},
- expectedServiceTag: "myservice",
+ lang: language.Java,
+ expectedGeneratedName: "myservice",
},
{
name: "java class name as service",
cmdline: []string{
"java", "-Xmx4000m", "-Xms4000m", "-XX:ReservedCodeCacheSize=256m", "com.datadog.example.HelloWorld",
},
- expectedServiceTag: "HelloWorld",
+ lang: language.Java,
+ expectedGeneratedName: "HelloWorld",
},
{
name: "java kafka",
cmdline: []string{
"java", "-Xmx4000m", "-Xms4000m", "-XX:ReservedCodeCacheSize=256m", "kafka.Kafka",
},
- expectedServiceTag: "Kafka",
+ lang: language.Java,
+ expectedGeneratedName: "Kafka",
},
{
name: "java parsing for org.apache projects with cassandra as the service",
@@ -174,15 +185,18 @@ func TestExtractServiceMetadata(t *testing.T) {
"-cp", "/etc/cassandra:/usr/share/cassandra/lib/HdrHistogram-2.1.9.jar:/usr/share/cassandra/lib/cassandra-driver-core-3.0.1-shaded.jar",
"org.apache.cassandra.service.CassandraDaemon",
},
- expectedServiceTag: "cassandra",
+ lang: language.Java,
+ expectedGeneratedName: "cassandra",
},
{
name: "java space in java executable path",
cmdline: []string{
"/home/dd/my java dir/java", "com.dog.cat",
},
- expectedServiceTag: "cat",
- }, {
+ lang: language.Java,
+ expectedGeneratedName: "cat",
+ },
+ {
name: "node js with package.json not present",
cmdline: []string{
"/usr/bin/node",
@@ -192,7 +206,8 @@ func TestExtractServiceMetadata(t *testing.T) {
"--",
"/somewhere/index.js",
},
- expectedServiceTag: "node",
+ lang: language.Node,
+ expectedGeneratedName: "node",
},
{
name: "node js with a broken package.json",
@@ -200,7 +215,8 @@ func TestExtractServiceMetadata(t *testing.T) {
"/usr/bin/node",
"./testdata/inner/index.js",
},
- expectedServiceTag: "node",
+ lang: language.Node,
+ expectedGeneratedName: "node",
},
{
name: "node js with a valid package.json",
@@ -212,8 +228,9 @@ func TestExtractServiceMetadata(t *testing.T) {
"--",
"./testdata/index.js",
},
- expectedServiceTag: "my-awesome-package",
- fs: &subUsmTestData,
+ lang: language.Node,
+ expectedGeneratedName: "my-awesome-package",
+ fs: &subUsmTestData,
},
{
name: "node js with a symlink to a .js file and valid package.json",
@@ -225,9 +242,10 @@ func TestExtractServiceMetadata(t *testing.T) {
"./testdata/bins/broken",
"./testdata/bins/json-server",
},
- expectedServiceTag: "json-server-package",
- skipOnWindows: true,
- fs: &subUsmTestData,
+ lang: language.Node,
+ expectedGeneratedName: "json-server-package",
+ skipOnWindows: true,
+ fs: &subUsmTestData,
},
{
name: "node js with a valid nested package.json and cwd",
@@ -239,9 +257,10 @@ func TestExtractServiceMetadata(t *testing.T) {
"--",
"index.js",
},
- envs: map[string]string{"PWD": "testdata/deep"}, // it's relative but it's ok for testing purposes
- fs: &subUsmTestData,
- expectedServiceTag: "my-awesome-package",
+ lang: language.Node,
+ envs: map[string]string{"PWD": "testdata/deep"}, // it's relative but it's ok for testing purposes
+ fs: &subUsmTestData,
+ expectedGeneratedName: "my-awesome-package",
},
{
name: "spring boot default options",
@@ -250,11 +269,13 @@ func TestExtractServiceMetadata(t *testing.T) {
"-jar",
springBootAppFullPath,
},
- expectedServiceTag: "default-app",
+ lang: language.Java,
+ expectedGeneratedName: "default-app",
},
{
name: "wildfly 18 standalone",
- cmdline: []string{"home/app/.sdkman/candidates/java/17.0.4.1-tem/bin/java",
+ cmdline: []string{
+ "home/app/.sdkman/candidates/java/17.0.4.1-tem/bin/java",
"-D[Standalone]",
"-server",
"-Xms64m",
@@ -275,15 +296,18 @@ func TestExtractServiceMetadata(t *testing.T) {
"" + jbossTestAppRoot + "/modules",
"org.jboss.as.standalone",
"-Djboss.home.dir=" + jbossTestAppRoot,
- "-Djboss.server.base.dir=" + jbossTestAppRoot + "/standalone"},
- expectedServiceTag: "jboss-modules",
+ "-Djboss.server.base.dir=" + jbossTestAppRoot + "/standalone",
+ },
+ lang: language.Java,
+ expectedGeneratedName: "jboss-modules",
expectedAdditionalServices: []string{"my-jboss-webapp", "some_context_root", "web3"},
fs: &sub,
envs: map[string]string{"PWD": "/sibiling"},
},
{
name: "wildfly 18 domain",
- cmdline: []string{"/home/app/.sdkman/candidates/java/17.0.4.1-tem/bin/java",
+ cmdline: []string{
+ "/home/app/.sdkman/candidates/java/17.0.4.1-tem/bin/java",
"--add-exports=java.base/sun.nio.ch=ALL-UNNAMED",
"--add-exports=jdk.unsupported/sun.reflect=ALL-UNNAMED",
"--add-exports=jdk.unsupported/sun.misc=ALL-UNNAMED",
@@ -307,8 +331,10 @@ func TestExtractServiceMetadata(t *testing.T) {
"" + jbossTestAppRoot + "/jboss-modules.jar",
"-mp",
"" + jbossTestAppRoot + "/modules",
- "org.jboss.as.server"},
- expectedServiceTag: "jboss-modules",
+ "org.jboss.as.server",
+ },
+ lang: language.Java,
+ expectedGeneratedName: "jboss-modules",
expectedAdditionalServices: []string{"web3", "web4"},
fs: &sub,
envs: map[string]string{"PWD": "/sibiling"},
@@ -316,7 +342,8 @@ func TestExtractServiceMetadata(t *testing.T) {
{
name: "weblogic 12",
fs: &sub,
- cmdline: []string{"/u01/jdk/bin/java",
+ cmdline: []string{
+ "/u01/jdk/bin/java",
"-Djava.security.egd=file:/dev/./urandom",
"-cp",
"/u01/oracle/wlserver/server/lib/weblogic-launcher.jar",
@@ -328,9 +355,11 @@ func TestExtractServiceMetadata(t *testing.T) {
"-da",
"-Dwls.home=/u01/oracle/wlserver/server",
"-Dweblogic.home=/u01/oracle/wlserver/server",
- "weblogic.Server"},
+ "weblogic.Server",
+ },
+ lang: language.Java,
envs: map[string]string{"PWD": weblogicTestAppRootAbsolute},
- expectedServiceTag: "Server",
+ expectedGeneratedName: "Server",
expectedAdditionalServices: []string{"my_context", "sample4", "some_context_root"},
},
{
@@ -338,8 +367,21 @@ func TestExtractServiceMetadata(t *testing.T) {
cmdline: []string{
"/usr/bin/java", "-Ddd.service=custom", "-jar", "app.jar",
},
- expectedServiceTag: "custom",
- fromDDService: true,
+ lang: language.Java,
+ expectedDDService: "custom",
+ expectedGeneratedName: "app",
+ },
+ {
+ // The system property takes priority over the environment variable, see
+ // https://docs.datadoghq.com/tracing/trace_collection/library_config/java/
+ name: "java with dd_service as system property and DD_SERVICE",
+ cmdline: []string{
+ "/usr/bin/java", "-Ddd.service=dd-service-from-property", "-jar", "app.jar",
+ },
+ lang: language.Java,
+ envs: map[string]string{"DD_SERVICE": "dd-service-from-env"},
+ expectedDDService: "dd-service-from-property",
+ expectedGeneratedName: "app",
},
{
name: "Tomcat 10.X",
@@ -363,7 +405,8 @@ func TestExtractServiceMetadata(t *testing.T) {
"org.apache.catalina.startup.Bootstrap",
"start",
},
- expectedServiceTag: "catalina",
+ lang: language.Java,
+ expectedGeneratedName: "catalina",
expectedAdditionalServices: []string{"app2", "custom"},
fs: &subUsmTestData,
},
@@ -372,21 +415,24 @@ func TestExtractServiceMetadata(t *testing.T) {
cmdline: []string{
"/usr/bin/dotnet", "./myservice.dll",
},
- expectedServiceTag: "myservice",
+ lang: language.DotNet,
+ expectedGeneratedName: "myservice",
},
{
name: "dotnet cmd with dll and options",
cmdline: []string{
"/usr/bin/dotnet", "-v", "--", "/app/lib/myservice.dll",
},
- expectedServiceTag: "myservice",
+ lang: language.DotNet,
+ expectedGeneratedName: "myservice",
},
{
name: "dotnet cmd with unrecognized options",
cmdline: []string{
"/usr/bin/dotnet", "run", "--project", "./projects/proj1/proj1.csproj",
},
- expectedServiceTag: "dotnet",
+ lang: language.DotNet,
+ expectedGeneratedName: "dotnet",
},
{
name: "PHP Laravel",
@@ -395,7 +441,8 @@ func TestExtractServiceMetadata(t *testing.T) {
"artisan",
"serve",
},
- expectedServiceTag: "laravel",
+ lang: language.PHP,
+ expectedGeneratedName: "laravel",
},
{
name: "Plain PHP with INI",
@@ -404,7 +451,8 @@ func TestExtractServiceMetadata(t *testing.T) {
"-ddatadog.service=foo",
"swoole-server.php",
},
- expectedServiceTag: "foo",
+ lang: language.PHP,
+ expectedGeneratedName: "foo",
},
{
name: "PHP with version number",
@@ -413,7 +461,8 @@ func TestExtractServiceMetadata(t *testing.T) {
"artisan",
"migrate:fresh",
},
- expectedServiceTag: "laravel",
+ lang: language.PHP,
+ expectedGeneratedName: "laravel",
},
{
name: "PHP with two-digit version number",
@@ -422,7 +471,8 @@ func TestExtractServiceMetadata(t *testing.T) {
"artisan",
"migrate:fresh",
},
- expectedServiceTag: "laravel",
+ lang: language.PHP,
+ expectedGeneratedName: "laravel",
},
{
name: "PHP-FPM shouldn't trigger php parsing",
@@ -430,7 +480,7 @@ func TestExtractServiceMetadata(t *testing.T) {
"php-fpm",
"artisan",
},
- expectedServiceTag: "php-fpm",
+ expectedGeneratedName: "php-fpm",
},
{
name: "PHP-FPM with version number shouldn't trigger php parsing",
@@ -438,28 +488,32 @@ func TestExtractServiceMetadata(t *testing.T) {
"php8.1-fpm",
"artisan",
},
- expectedServiceTag: "php8",
+ expectedGeneratedName: "php8",
},
{
- name: "DD_SERVICE_set_manually",
- cmdline: []string{"java", "-jar", "Foo.jar"},
- envs: map[string]string{"DD_SERVICE": "howdy"},
- expectedServiceTag: "howdy",
- fromDDService: true,
+ name: "DD_SERVICE_set_manually",
+ cmdline: []string{"java", "-jar", "Foo.jar"},
+ lang: language.Java,
+ envs: map[string]string{"DD_SERVICE": "howdy"},
+ expectedDDService: "howdy",
+ expectedGeneratedName: "Foo",
},
{
- name: "DD_SERVICE_set_manually_tags",
- cmdline: []string{"java", "-jar", "Foo.jar"},
- envs: map[string]string{"DD_TAGS": "service:howdy"},
- expectedServiceTag: "howdy",
- fromDDService: true,
+ name: "DD_SERVICE_set_manually_tags",
+ cmdline: []string{"java", "-jar", "Foo.jar"},
+ lang: language.Java,
+ envs: map[string]string{"DD_TAGS": "service:howdy"},
+ expectedDDService: "howdy",
+ expectedGeneratedName: "Foo",
},
{
- name: "DD_SERVICE_set_manually_injection",
- cmdline: []string{"java", "-jar", "Foo.jar"},
- envs: map[string]string{"DD_SERVICE": "howdy", "DD_INJECTION_ENABLED": "tracer,service_name"},
- expectedServiceTag: "howdy",
- fromDDService: false,
+ name: "DD_SERVICE_set_manually_injection",
+ cmdline: []string{"java", "-jar", "Foo.jar"},
+ lang: language.Java,
+ envs: map[string]string{"DD_SERVICE": "howdy", "DD_INJECTION_ENABLED": "tracer,service_name"},
+ expectedDDService: "howdy",
+ expectedGeneratedName: "Foo",
+ ddServiceInjected: true,
},
{
name: "gunicorn simple",
@@ -468,7 +522,8 @@ func TestExtractServiceMetadata(t *testing.T) {
"--workers=2",
"test:app",
},
- expectedServiceTag: "test",
+ lang: language.Python,
+ expectedGeneratedName: "test",
},
{
name: "gunicorn from name",
@@ -481,7 +536,7 @@ func TestExtractServiceMetadata(t *testing.T) {
"dummy",
"test:app",
},
- expectedServiceTag: "dummy",
+ expectedGeneratedName: "dummy",
},
{
name: "gunicorn from name (long arg)",
@@ -493,7 +548,7 @@ func TestExtractServiceMetadata(t *testing.T) {
"--name=dummy",
"test:app",
},
- expectedServiceTag: "dummy",
+ expectedGeneratedName: "dummy",
},
{
name: "gunicorn from name in env",
@@ -501,16 +556,16 @@ func TestExtractServiceMetadata(t *testing.T) {
"gunicorn",
"test:app",
},
- envs: map[string]string{"GUNICORN_CMD_ARGS": "--bind=127.0.0.1:8080 --workers=3 -n dummy"},
- expectedServiceTag: "dummy",
+ envs: map[string]string{"GUNICORN_CMD_ARGS": "--bind=127.0.0.1:8080 --workers=3 -n dummy"},
+ expectedGeneratedName: "dummy",
},
{
name: "gunicorn without app found",
cmdline: []string{
"gunicorn",
},
- envs: map[string]string{"GUNICORN_CMD_ARGS": "--bind=127.0.0.1:8080 --workers=3"},
- expectedServiceTag: "gunicorn",
+ envs: map[string]string{"GUNICORN_CMD_ARGS": "--bind=127.0.0.1:8080 --workers=3"},
+ expectedGeneratedName: "gunicorn",
},
{
name: "gunicorn with partial wsgi app",
@@ -518,7 +573,7 @@ func TestExtractServiceMetadata(t *testing.T) {
"gunicorn",
"my.package",
},
- expectedServiceTag: "my.package",
+ expectedGeneratedName: "my.package",
},
{
name: "gunicorn with empty WSGI_APP env",
@@ -526,16 +581,16 @@ func TestExtractServiceMetadata(t *testing.T) {
"gunicorn",
"my.package",
},
- envs: map[string]string{"WSGI_APP": ""},
- expectedServiceTag: "my.package",
+ envs: map[string]string{"WSGI_APP": ""},
+ expectedGeneratedName: "my.package",
},
{
name: "gunicorn with WSGI_APP env",
cmdline: []string{
"gunicorn",
},
- envs: map[string]string{"WSGI_APP": "test:app"},
- expectedServiceTag: "test",
+ envs: map[string]string{"WSGI_APP": "test:app"},
+ expectedGeneratedName: "test",
},
{
name: "gunicorn with replaced cmdline with colon",
@@ -544,7 +599,7 @@ func TestExtractServiceMetadata(t *testing.T) {
"master",
"[domains.foo.apps.bar:create_server()]",
},
- expectedServiceTag: "domains.foo.apps.bar",
+ expectedGeneratedName: "domains.foo.apps.bar",
},
{
name: "gunicorn with replaced cmdline",
@@ -553,7 +608,7 @@ func TestExtractServiceMetadata(t *testing.T) {
"master",
"[mcservice]",
},
- expectedServiceTag: "mcservice",
+ expectedGeneratedName: "mcservice",
},
}
@@ -568,14 +623,15 @@ func TestExtractServiceMetadata(t *testing.T) {
if tt.fs != nil {
fs = *tt.fs
}
- meta, ok := ExtractServiceMetadata(tt.cmdline, tt.envs, fs, make(DetectorContextMap))
- if len(tt.expectedServiceTag) == 0 {
+ meta, ok := ExtractServiceMetadata(tt.cmdline, tt.envs, fs, tt.lang, make(DetectorContextMap))
+ if len(tt.expectedGeneratedName) == 0 && len(tt.expectedDDService) == 0 {
require.False(t, ok)
} else {
require.True(t, ok)
- require.Equal(t, tt.expectedServiceTag, meta.Name)
+ require.Equal(t, tt.expectedDDService, meta.DDService)
+ require.Equal(t, tt.expectedGeneratedName, meta.Name)
require.Equal(t, tt.expectedAdditionalServices, meta.AdditionalNames)
- require.Equal(t, tt.fromDDService, meta.FromDDService)
+ require.Equal(t, tt.ddServiceInjected, meta.DDServiceInjected)
}
})
}
diff --git a/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck.go b/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck.go
index 1ee597fec2e25..08a03370e2b7d 100644
--- a/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck.go
+++ b/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck.go
@@ -65,6 +65,7 @@ type DeviceCheck struct {
config *checkconfig.CheckConfig
sender *report.MetricSender
session session.Session
+ sessionFactory session.Factory
devicePinger pinger.Pinger
sessionCloseErrorCount *atomic.Uint64
savedDynamicTags []string
@@ -80,12 +81,8 @@ const cacheKeyPrefix = "snmp-tags"
func NewDeviceCheck(config *checkconfig.CheckConfig, ipAddress string, sessionFactory session.Factory) (*DeviceCheck, error) {
newConfig := config.CopyWithNewIP(ipAddress)
- sess, err := sessionFactory(newConfig)
- if err != nil {
- return nil, fmt.Errorf("failed to configure session: %s", err)
- }
-
var devicePinger pinger.Pinger
+ var err error
if newConfig.PingEnabled {
devicePinger, err = createPinger(newConfig.PingConfig)
if err != nil {
@@ -98,7 +95,7 @@ func NewDeviceCheck(config *checkconfig.CheckConfig, ipAddress string, sessionFa
d := DeviceCheck{
config: newConfig,
- session: sess,
+ sessionFactory: sessionFactory,
devicePinger: devicePinger,
sessionCloseErrorCount: atomic.NewUint64(0),
nextAutodetectMetrics: timeNow(),
@@ -160,6 +157,12 @@ func (d *DeviceCheck) Run(collectionTime time.Time) error {
startTime := time.Now()
staticTags := append(d.config.GetStaticTags(), d.config.GetNetworkTags()...)
+ var err error
+ d.session, err = d.sessionFactory(d.config)
+ if err != nil {
+ return err
+ }
+
// Fetch and report metrics
var checkErr error
var deviceStatus metadata.DeviceStatus
diff --git a/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck_test.go b/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck_test.go
index f06bd065e5059..4661a9f28689b 100644
--- a/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck_test.go
+++ b/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck_test.go
@@ -421,6 +421,9 @@ profiles:
deviceCk, err := NewDeviceCheck(config, "1.2.3.4", sessionFactory)
assert.Nil(t, err)
+ deviceCk.session, err = sessionFactory(config)
+ assert.Nil(t, err)
+
sender := mocksender.NewMockSender("123") // required to initiate aggregator
deviceCk.SetSender(report.NewMetricSender(sender, "", nil, report.MakeInterfaceBandwidthState()))
sess.On("GetNext", []string{"1.0"}).Return(session.CreateGetNextPacket("9999", gosnmp.EndOfMibView, nil), nil)
@@ -912,6 +915,9 @@ community_string: public
deviceCk, err := NewDeviceCheck(config, "1.2.3.4", sessionFactory)
assert.Nil(t, err)
+ deviceCk.session, err = sessionFactory(config)
+ assert.Nil(t, err)
+
sender := mocksender.NewMockSender("123") // required to initiate aggregator
sender.SetupAcceptAll()
diff --git a/pkg/config/config_template.yaml b/pkg/config/config_template.yaml
index e01a2e747831b..979f769cfff8e 100644
--- a/pkg/config/config_template.yaml
+++ b/pkg/config/config_template.yaml
@@ -2987,6 +2987,15 @@ api_key:
#
# trace_agent_socket: unix:///var/run/datadog/apm.socket
+ ## @param type_socket_volumes - boolean - optional - default: false
+ ## @env DD_ADMISSION_CONTROLLER_INJECT_CONFIG_TYPE_SOCKET_VOLUMES - boolean - optional - default: false
+ ## When enabled, injected volumes are of type "Socket". This means that
+ ## injected pods will not start until the Agent creates the dogstatsd and
+ ## trace-agent sockets. This ensures no lost traces or dogstatsd metrics but
+ ## can cause the pod to wait if the agent has issues creating the sockets.
+ #
+ # type_socket_volumes: false
+
## @param inject_tags - custom object - optional
## Tags injection parameters.
#
@@ -4386,22 +4395,13 @@ api_key:
## Debug-specific configuration for OTLP ingest in the Datadog Agent.
## This template lists the most commonly used settings; see the OpenTelemetry Collector documentation
## for a full list of available settings:
- ## https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/loggingexporter#getting-started
+ ## https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/debugexporter#getting-started
#
# debug:
-
- ## Deprecated (v[6/7].41.0) - use `verbosity` instead
- ## @param loglevel - string - optional - default: none
- ## @env DD_OTLP_CONFIG_DEBUG_LOGLEVEL - string - optional - default: none
- ## Verbosity of debug logs when Datadog Agent receives otlp traces/metrics.
- ## Valid values are disabled, debug, info, error, warn.
- #
- # loglevel: info
-
## @param verbosity - string - optional - default: normal
## @env DD_OTLP_CONFIG_DEBUG_VERBOSITY - string - optional - default: normal
## Verbosity of debug logs when Datadog Agent receives otlp traces/metrics.
- ## Valid values are basic, normal, detailed.
+ ## Valid values are basic, normal, detailed, none.
#
# verbosity: normal
{{- if (eq .OS "windows")}}
diff --git a/pkg/config/model/viper.go b/pkg/config/model/viper.go
index f4266c10de8f2..8b4477cdfe55f 100644
--- a/pkg/config/model/viper.go
+++ b/pkg/config/model/viper.go
@@ -161,12 +161,25 @@ func (c *safeConfig) SetDefault(key string, value interface{}) {
c.Viper.SetDefault(key, value)
}
-// UnsetForSource wraps Viper for concurrent access
+// UnsetForSource unsets a config entry for a given source
func (c *safeConfig) UnsetForSource(key string, source Source) {
+ // modify the config then release the lock to avoid deadlocks while notifying
+ var receivers []NotificationReceiver
c.Lock()
- defer c.Unlock()
+ previousValue := c.Viper.Get(key)
c.configSources[source].Set(key, nil)
c.mergeViperInstances(key)
+ newValue := c.Viper.Get(key) // Can't use nil, so we get the newly computed value
+ if previousValue != nil {
+ // if the value has not changed, do not duplicate the slice so that no callback is called
+ receivers = slices.Clone(c.notificationReceivers)
+ }
+ c.Unlock()
+
+ // notifying all receiver about the updated setting
+ for _, receiver := range receivers {
+ receiver(key, previousValue, newValue)
+ }
}
// mergeViperInstances is called after a change in an instance of Viper
@@ -640,6 +653,8 @@ func (c *safeConfig) MergeConfig(in io.Reader) error {
// MergeFleetPolicy merges the configuration from the reader given with an existing config
// it overrides the existing values with the new ones in the FleetPolicies source, and updates the main config
// according to sources priority order.
+//
+// Note: this should only be called at startup, as notifiers won't receive a notification when this loads
func (c *safeConfig) MergeFleetPolicy(configPath string) error {
c.Lock()
defer c.Unlock()
diff --git a/pkg/config/model/viper_test.go b/pkg/config/model/viper_test.go
index 83bec5ee7a66b..22209d122c8fd 100644
--- a/pkg/config/model/viper_test.go
+++ b/pkg/config/model/viper_test.go
@@ -449,3 +449,20 @@ func TestParseEnvAsSliceMapString(t *testing.T) {
t.Setenv("DD_MAP", "__some_data__")
assert.Equal(t, []map[string]string{{"a": "a", "b": "b", "c": "c"}}, config.Get("map"))
}
+
+func TestListenersUnsetForSource(t *testing.T) {
+ config := NewConfig("test", "DD", strings.NewReplacer(".", "_"))
+
+ // Create a listener that will keep track of the changes
+ logLevels := []string{}
+ config.OnUpdate(func(_ string, _, next any) {
+ nextString := next.(string)
+ logLevels = append(logLevels, nextString)
+ })
+
+ config.Set("log_level", "info", SourceFile)
+ config.Set("log_level", "debug", SourceRC)
+ config.UnsetForSource("log_level", SourceRC)
+
+ assert.Equal(t, []string{"info", "debug", "info"}, logLevels)
+}
diff --git a/pkg/config/remote/client/client.go b/pkg/config/remote/client/client.go
index a97cecda5f899..124ce3de40f59 100644
--- a/pkg/config/remote/client/client.go
+++ b/pkg/config/remote/client/client.go
@@ -46,6 +46,12 @@ type ConfigFetcher interface {
ClientGetConfigs(context.Context, *pbgo.ClientGetConfigsRequest) (*pbgo.ClientGetConfigsResponse, error)
}
+// Listener defines the interface of a remote config listener
+type Listener interface {
+ OnUpdate(map[string]state.RawConfig, func(cfgPath string, status state.ApplyStatus))
+ OnStateChange(bool)
+}
+
// fetchConfigs defines the function that an agent client uses to get config updates
type fetchConfigs func(context.Context, *pbgo.ClientGetConfigsRequest, ...grpc.CallOption) (*pbgo.ClientGetConfigsResponse, error)
@@ -69,7 +75,7 @@ type Client struct {
state *state.Repository
- listeners map[string][]func(update map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus))
+ listeners map[string][]Listener
// Elements that can be changed during the execution of listeners
// They are atomics so that they don't have to share the top-level mutex
@@ -160,9 +166,6 @@ func (g *agentGRPCConfigFetcher) ClientGetConfigs(ctx context.Context, request *
return g.fetchConfigs(ctx, request)
}
-// Handler is a function that is called when a config update is received.
-type Handler func(update map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus))
-
// NewClient creates a new client
func NewClient(updater ConfigFetcher, opts ...func(o *Options)) (*Client, error) {
return newClient(updater, opts...)
@@ -289,7 +292,7 @@ func newClient(cf ConfigFetcher, opts ...func(opts *Options)) (*Client, error) {
installerState: installerState,
state: repository,
backoffPolicy: backoffPolicy,
- listeners: make(map[string][]func(update map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus))),
+ listeners: make(map[string][]Listener),
configFetcher: cf,
}, nil
}
@@ -324,8 +327,8 @@ func (c *Client) SetAgentName(agentName string) {
}
}
-// Subscribe subscribes to config updates of a product.
-func (c *Client) Subscribe(product string, fn func(update map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus))) {
+// SubscribeAll subscribes to all events (config updates, state changed, ...)
+func (c *Client) SubscribeAll(product string, listener Listener) {
c.m.Lock()
defer c.m.Unlock()
@@ -341,7 +344,12 @@ func (c *Client) Subscribe(product string, fn func(update map[string]state.RawCo
c.products = append(c.products, product)
}
- c.listeners[product] = append(c.listeners[product], fn)
+ c.listeners[product] = append(c.listeners[product], listener)
+}
+
+// Subscribe subscribes to config updates of a product.
+func (c *Client) Subscribe(product string, cb func(update map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus))) {
+ c.SubscribeAll(product, NewUpdateListener(cb))
}
// GetConfigs returns the current configs applied of a product.
@@ -428,11 +436,29 @@ func (c *Client) pollLoop() {
log.Infof("retrying the first update of remote-config state (%v)", err)
}
} else {
+ c.m.Lock()
+ for _, productListeners := range c.listeners {
+ for _, listener := range productListeners {
+ listener.OnStateChange(false)
+ }
+ }
+ c.m.Unlock()
+
c.lastUpdateError = err
c.backoffErrorCount = c.backoffPolicy.IncError(c.backoffErrorCount)
log.Errorf("could not update remote-config state: %v", c.lastUpdateError)
}
} else {
+ if c.lastUpdateError != nil {
+ c.m.Lock()
+ for _, productListeners := range c.listeners {
+ for _, listener := range productListeners {
+ listener.OnStateChange(true)
+ }
+ }
+ c.m.Unlock()
+ }
+
c.lastUpdateError = nil
successfulFirstRun = true
c.backoffErrorCount = c.backoffPolicy.DecError(c.backoffErrorCount)
@@ -470,7 +496,7 @@ func (c *Client) update() error {
for product, productListeners := range c.listeners {
if containsProduct(changedProducts, product) {
for _, listener := range productListeners {
- listener(c.state.GetConfigs(product), c.state.UpdateApplyStatus)
+ listener.OnUpdate(c.state.GetConfigs(product), c.state.UpdateApplyStatus)
}
}
}
@@ -594,6 +620,33 @@ func (c *Client) newUpdateRequest() (*pbgo.ClientGetConfigsRequest, error) {
return req, nil
}
+type listener struct {
+ onUpdate func(map[string]state.RawConfig, func(cfgPath string, status state.ApplyStatus))
+ onStateChange func(bool)
+}
+
+func (l *listener) OnUpdate(configs map[string]state.RawConfig, cb func(cfgPath string, status state.ApplyStatus)) {
+ if l.onUpdate != nil {
+ l.onUpdate(configs, cb)
+ }
+}
+
+func (l *listener) OnStateChange(state bool) {
+ if l.onStateChange != nil {
+ l.onStateChange(state)
+ }
+}
+
+// NewUpdateListener creates a remote config listener from a update callback
+func NewUpdateListener(onUpdate func(updates map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus))) Listener {
+ return &listener{onUpdate: onUpdate}
+}
+
+// NewListener creates a remote config listener from a couple of update and state change callbacks
+func NewListener(onUpdate func(updates map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus)), onStateChange func(bool)) Listener {
+ return &listener{onUpdate: onUpdate, onStateChange: onStateChange}
+}
+
var (
idSize = 21
idAlphabet = []rune("_-0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
diff --git a/pkg/config/setup/config.go b/pkg/config/setup/config.go
index 2cf2a4bdfb4b1..9ed374b26e64b 100644
--- a/pkg/config/setup/config.go
+++ b/pkg/config/setup/config.go
@@ -78,8 +78,10 @@ const (
// DefaultCompressorKind is the default compressor. Options available are 'zlib' and 'zstd'
DefaultCompressorKind = "zlib"
- // DefaultZstdCompressionLevel should mirror the default compression level defined in https://github.com/DataDog/zstd/blob/1.x/zstd.go#L23
- DefaultZstdCompressionLevel = 5
+ // DefaultZstdCompressionLevel is the default compression level for `zstd`.
+ // Compression level 1 provides the lowest compression ratio, but uses much less RSS especially
+ // in situations where we have a high value for `GOMAXPROCS`.
+ DefaultZstdCompressionLevel = 1
// DefaultLogsSenderBackoffFactor is the default logs sender backoff randomness factor
DefaultLogsSenderBackoffFactor = 2.0
@@ -103,6 +105,12 @@ const (
// DefaultMaxMessageSizeBytes is the default value for max_message_size_bytes
// If a log message is larger than this byte limit, the overflow bytes will be truncated.
DefaultMaxMessageSizeBytes = 256 * 1000
+
+ // DefaultNetworkPathTimeout defines the default timeout for a network path test
+ DefaultNetworkPathTimeout = 1000
+
+ // DefaultNetworkPathMaxTTL defines the default maximum TTL for traceroute tests
+ DefaultNetworkPathMaxTTL = 30
)
// datadog is the global configuration object
@@ -433,6 +441,8 @@ func InitConfig(config pkgconfigmodel.Config) {
// Network Path
config.BindEnvAndSetDefault("network_path.connections_monitoring.enabled", false)
config.BindEnvAndSetDefault("network_path.collector.workers", 4)
+ config.BindEnvAndSetDefault("network_path.collector.timeout", DefaultNetworkPathTimeout)
+ config.BindEnvAndSetDefault("network_path.collector.max_ttl", DefaultNetworkPathMaxTTL)
config.BindEnvAndSetDefault("network_path.collector.input_chan_size", 1000)
config.BindEnvAndSetDefault("network_path.collector.processing_chan_size", 1000)
config.BindEnvAndSetDefault("network_path.collector.pathtest_contexts_limit", 10000)
@@ -711,6 +721,7 @@ func InitConfig(config pkgconfigmodel.Config) {
config.BindEnvAndSetDefault("admission_controller.inject_config.socket_path", "/var/run/datadog")
config.BindEnvAndSetDefault("admission_controller.inject_config.trace_agent_socket", "unix:///var/run/datadog/apm.socket")
config.BindEnvAndSetDefault("admission_controller.inject_config.dogstatsd_socket", "unix:///var/run/datadog/dsd.socket")
+ config.BindEnvAndSetDefault("admission_controller.inject_config.type_socket_volumes", false)
config.BindEnvAndSetDefault("admission_controller.inject_tags.enabled", true)
config.BindEnvAndSetDefault("admission_controller.inject_tags.endpoint", "/injecttags")
config.BindEnvAndSetDefault("admission_controller.inject_tags.pod_owners_cache_validity", 10) // in minutes
@@ -905,7 +916,6 @@ func InitConfig(config pkgconfigmodel.Config) {
config.BindEnvAndSetDefault("runtime_security_config.socket", filepath.Join(InstallPath, "run/runtime-security.sock"))
}
config.BindEnvAndSetDefault("runtime_security_config.log_profiled_workloads", false)
- config.BindEnvAndSetDefault("runtime_security_config.telemetry.ignore_dd_agent_containers", true)
config.BindEnvAndSetDefault("runtime_security_config.use_secruntime_track", true)
bindEnvAndSetLogsConfigKeys(config, "runtime_security_config.endpoints.")
bindEnvAndSetLogsConfigKeys(config, "runtime_security_config.activity_dump.remote_storage.endpoints.")
diff --git a/pkg/config/setup/config_test.go b/pkg/config/setup/config_test.go
index 3598e7b8cd5c7..2325bfe23afbb 100644
--- a/pkg/config/setup/config_test.go
+++ b/pkg/config/setup/config_test.go
@@ -659,6 +659,22 @@ network_devices:
assert.Equal(t, "dev", config.GetString("network_devices.namespace"))
}
+func TestNetworkPathDefaults(t *testing.T) {
+ datadogYaml := ""
+ config := confFromYAML(t, datadogYaml)
+
+ assert.Equal(t, false, config.GetBool("network_path.connections_monitoring.enabled"))
+ assert.Equal(t, 4, config.GetInt("network_path.collector.workers"))
+ assert.Equal(t, 1000, config.GetInt("network_path.collector.timeout"))
+ assert.Equal(t, 30, config.GetInt("network_path.collector.max_ttl"))
+ assert.Equal(t, 1000, config.GetInt("network_path.collector.input_chan_size"))
+ assert.Equal(t, 1000, config.GetInt("network_path.collector.processing_chan_size"))
+ assert.Equal(t, 10000, config.GetInt("network_path.collector.pathtest_contexts_limit"))
+ assert.Equal(t, 15*time.Minute, config.GetDuration("network_path.collector.pathtest_ttl"))
+ assert.Equal(t, 5*time.Minute, config.GetDuration("network_path.collector.pathtest_interval"))
+ assert.Equal(t, 10*time.Second, config.GetDuration("network_path.collector.flush_interval"))
+}
+
func TestUsePodmanLogsAndDockerPathOverride(t *testing.T) {
// If use_podman_logs is true and docker_path_override is set, the config should return an error
datadogYaml := `
diff --git a/pkg/config/setup/otlp.go b/pkg/config/setup/otlp.go
index 7ff9245d6196a..f6312342e319c 100644
--- a/pkg/config/setup/otlp.go
+++ b/pkg/config/setup/otlp.go
@@ -94,6 +94,5 @@ func setupOTLPEnvironmentVariables(config pkgconfigmodel.Setup) {
config.BindEnv(OTLPSection + ".metrics.summaries.mode")
// Debug settings
- config.BindEnv(OTLPSection + ".debug.loglevel") // Deprecated
config.BindEnv(OTLPSection + ".debug.verbosity")
}
diff --git a/pkg/config/setup/system_probe.go b/pkg/config/setup/system_probe.go
index 64a4fc53e273e..7ebde9fda3f96 100644
--- a/pkg/config/setup/system_probe.go
+++ b/pkg/config/setup/system_probe.go
@@ -167,6 +167,10 @@ func InitSystemProbeConfig(cfg pkgconfigmodel.Config) {
// User Tracer
cfg.BindEnvAndSetDefault(join(diNS, "enabled"), false, "DD_DYNAMIC_INSTRUMENTATION_ENABLED")
+ cfg.BindEnvAndSetDefault(join(diNS, "offline_mode"), false, "DD_DYNAMIC_INSTRUMENTATION_OFFLINE_MODE")
+ cfg.BindEnvAndSetDefault(join(diNS, "probes_file_path"), false, "DD_DYNAMIC_INSTRUMENTATION_PROBES_FILE_PATH")
+ cfg.BindEnvAndSetDefault(join(diNS, "snapshot_output_file_path"), false, "DD_DYNAMIC_INSTRUMENTATION_SNAPSHOT_FILE_PATH")
+ cfg.BindEnvAndSetDefault(join(diNS, "diagnostics_output_file_path"), false, "DD_DYNAMIC_INSTRUMENTATION_DIAGNOSTICS_FILE_PATH")
// network_tracer settings
// we cannot use BindEnvAndSetDefault for network_config.enabled because we need to know if it was manually set.
diff --git a/pkg/config/setup/system_probe_cws.go b/pkg/config/setup/system_probe_cws.go
index ffa2f283ae56d..3497ffc143007 100644
--- a/pkg/config/setup/system_probe_cws.go
+++ b/pkg/config/setup/system_probe_cws.go
@@ -133,4 +133,10 @@ func initCWSSystemProbeConfig(cfg pkgconfigmodel.Config) {
cfg.BindEnvAndSetDefault("runtime_security_config.enforcement.raw_syscall.enabled", false)
cfg.BindEnvAndSetDefault("runtime_security_config.enforcement.exclude_binaries", []string{})
cfg.BindEnvAndSetDefault("runtime_security_config.enforcement.rule_source_allowed", []string{"file", "remote-config"})
+ cfg.BindEnvAndSetDefault("runtime_security_config.enforcement.disarmer.container.enabled", true)
+ cfg.BindEnvAndSetDefault("runtime_security_config.enforcement.disarmer.container.max_allowed", 5)
+ cfg.BindEnvAndSetDefault("runtime_security_config.enforcement.disarmer.container.period", "1m")
+ cfg.BindEnvAndSetDefault("runtime_security_config.enforcement.disarmer.executable.enabled", true)
+ cfg.BindEnvAndSetDefault("runtime_security_config.enforcement.disarmer.executable.max_allowed", 5)
+ cfg.BindEnvAndSetDefault("runtime_security_config.enforcement.disarmer.executable.period", "1m")
}
diff --git a/pkg/config/structure/unmarshal.go b/pkg/config/structure/unmarshal.go
new file mode 100644
index 0000000000000..97420043e2268
--- /dev/null
+++ b/pkg/config/structure/unmarshal.go
@@ -0,0 +1,440 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package structure defines a helper to retrieve structured data from the config
+package structure
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/DataDog/datadog-agent/pkg/config/model"
+)
+
+// UnmarshalKey retrieves data from the config at the given key and deserializes it
+// to be stored on the target struct. It is implemented entirely using reflection, and
+// does not depend upon details of the data model of the config.
+// Target struct can use of struct tag of "yaml", "json", or "mapstructure" to rename fields
+func UnmarshalKey(cfg model.Reader, key string, target interface{}) error {
+ source, err := newNode(reflect.ValueOf(cfg.Get(key)))
+ if err != nil {
+ return err
+ }
+ outValue := reflect.ValueOf(target)
+ if outValue.Kind() == reflect.Pointer {
+ outValue = reflect.Indirect(outValue)
+ }
+ switch outValue.Kind() {
+ case reflect.Map:
+ return copyMap(outValue, source)
+ case reflect.Struct:
+ return copyStruct(outValue, source)
+ case reflect.Slice:
+ if arr, ok := source.(arrayNode); ok {
+ return copyList(outValue, arr)
+ }
+ return fmt.Errorf("can not UnmarshalKey to a slice from a non-list source")
+ default:
+ return fmt.Errorf("can only UnmarshalKey to struct, map, or slice, got %v", outValue.Kind())
+ }
+}
+
+var errNotFound = fmt.Errorf("not found")
+
+// leafNode represents a leaf with a scalar value
+
+type leafNode interface {
+ GetBool() (bool, error)
+ GetInt() (int, error)
+ GetFloat() (float64, error)
+ GetString() (string, error)
+}
+
+type leafNodeImpl struct {
+ // val must be a scalar kind
+ val reflect.Value
+}
+
+var _ leafNode = (*leafNodeImpl)(nil)
+var _ node = (*leafNodeImpl)(nil)
+
+// arrayNode represents a node with an ordered array of children
+
+type arrayNode interface {
+ Size() int
+ Index(int) (node, error)
+}
+
+type arrayNodeImpl struct {
+ // val must be a Slice with Len() and Index()
+ val reflect.Value
+}
+
+var _ arrayNode = (*arrayNodeImpl)(nil)
+var _ node = (*arrayNodeImpl)(nil)
+
+// node represents an arbitrary node of the tree
+
+type node interface {
+ GetChild(string) (node, error)
+ ChildrenKeys() ([]string, error)
+}
+
+type innerNodeImpl struct {
+ // val must be a struct
+ val reflect.Value
+}
+
+type innerMapNodeImpl struct {
+ // val must be a map[string]interface{}
+ val reflect.Value
+}
+
+var _ node = (*innerNodeImpl)(nil)
+var _ node = (*innerMapNodeImpl)(nil)
+
+// all nodes, leaf, inner, and array nodes, each act as nodes
+func newNode(v reflect.Value) (node, error) {
+ if v.Kind() == reflect.Struct {
+ return &innerNodeImpl{val: v}, nil
+ } else if v.Kind() == reflect.Map {
+ return &innerMapNodeImpl{val: v}, nil
+ } else if v.Kind() == reflect.Slice {
+ return &arrayNodeImpl{val: v}, nil
+ } else if isScalarKind(v) {
+ return &leafNodeImpl{val: v}, nil
+ }
+ return nil, fmt.Errorf("could not create node from: %v of type %T and kind %v", v, v, v.Kind())
+}
+
+// GetChild returns the child node at the given key, or an error if not found
+func (n *innerNodeImpl) GetChild(key string) (node, error) {
+ findex := findFieldMatch(n.val, key)
+ if findex == -1 {
+ return nil, errNotFound
+ }
+ inner := n.val.Field(findex)
+ if inner.Kind() == reflect.Interface {
+ inner = inner.Elem()
+ }
+ return newNode(inner)
+}
+
+// ChildrenKeys returns the list of keys of the children of the given node, if it is a map
+func (n *innerNodeImpl) ChildrenKeys() ([]string, error) {
+ structType := n.val.Type()
+ keys := make([]string, 0, n.val.NumField())
+ for i := 0; i < structType.NumField(); i++ {
+ f := structType.Field(i)
+ ch, _ := utf8.DecodeRuneInString(f.Name)
+ if unicode.IsLower(ch) {
+ continue
+ }
+ keys = append(keys, fieldNameToKey(f))
+ }
+ return keys, nil
+}
+
+// GetChild returns the child node at the given key, or an error if not found
+func (n *innerMapNodeImpl) GetChild(key string) (node, error) {
+ inner := n.val.MapIndex(reflect.ValueOf(key))
+ if !inner.IsValid() {
+ return nil, errNotFound
+ }
+ if inner.Kind() == reflect.Interface {
+ inner = inner.Elem()
+ }
+ return newNode(inner)
+}
+
+// ChildrenKeys returns the list of keys of the children of the given node, if it is a map
+func (n *innerMapNodeImpl) ChildrenKeys() ([]string, error) {
+ mapkeys := n.val.MapKeys()
+ keys := make([]string, 0, len(mapkeys))
+ for _, kv := range mapkeys {
+ if kstr, ok := kv.Interface().(string); ok {
+ keys = append(keys, kstr)
+ } else {
+ return nil, fmt.Errorf("map node has invalid non-string key: %v", kv)
+ }
+ }
+ return keys, nil
+}
+
+// GetChild returns an error because array node does not have children accessible by name
+func (n *arrayNodeImpl) GetChild(string) (node, error) {
+ return nil, fmt.Errorf("arrayNodeImpl.GetChild not implemented")
+}
+
+// ChildrenKeys returns an error because array node does not have children accessible by name
+func (n *arrayNodeImpl) ChildrenKeys() ([]string, error) {
+ return nil, fmt.Errorf("arrayNodeImpl.ChildrenKeys not implemented")
+}
+
+// Size returns number of children in the list
+func (n *arrayNodeImpl) Size() int {
+ return n.val.Len()
+}
+
+// Index returns the kth element of the list
+func (n *arrayNodeImpl) Index(k int) (node, error) {
+ // arrayNodeImpl assumes val is an Array with Len() and Index()
+ elem := n.val.Index(k)
+ if elem.Kind() == reflect.Interface {
+ elem = elem.Elem()
+ }
+ return newNode(elem)
+}
+
+// GetChild returns an error because a leaf has no children
+func (n *leafNodeImpl) GetChild(string) (node, error) {
+ return nil, fmt.Errorf("can't GetChild of a leaf node")
+}
+
+// ChildrenKeys returns an error because a leaf has no children
+func (n *leafNodeImpl) ChildrenKeys() ([]string, error) {
+ return nil, fmt.Errorf("can't get ChildrenKeys of a leaf node")
+}
+
+// GetBool returns the scalar as a bool, or an error otherwise
+func (n *leafNodeImpl) GetBool() (bool, error) {
+ if n.val.Kind() == reflect.Bool {
+ return n.val.Bool(), nil
+ } else if n.val.Kind() == reflect.String {
+ return convertToBool(n.val.String())
+ }
+ return false, newConversionError(n.val, "bool")
+}
+
+// GetInt returns the scalar as a int, or an error otherwise
+func (n *leafNodeImpl) GetInt() (int, error) {
+ switch n.val.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return int(n.val.Int()), nil
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return int(n.val.Uint()), nil
+ case reflect.Float32, reflect.Float64:
+ return int(n.val.Float()), nil
+ }
+ return 0, newConversionError(n.val, "int")
+}
+
+// GetFloat returns the scalar as a float64, or an error otherwise
+func (n *leafNodeImpl) GetFloat() (float64, error) {
+ switch n.val.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return float64(n.val.Int()), nil
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return float64(n.val.Uint()), nil
+ case reflect.Float32, reflect.Float64:
+ return float64(n.val.Float()), nil
+ }
+ return 0, newConversionError(n.val, "float")
+}
+
+// GetString returns the scalar as a string, or an error otherwise
+func (n *leafNodeImpl) GetString() (string, error) {
+ if n.val.Kind() == reflect.String {
+ return n.val.String(), nil
+ }
+ return "", newConversionError(n.val, "string")
+}
+
+// convert a string to a bool using standard yaml constants
+func convertToBool(text string) (bool, error) {
+ lower := strings.ToLower(text)
+ if lower == "y" || lower == "yes" || lower == "on" || lower == "true" || lower == "1" {
+ return true, nil
+ } else if lower == "n" || lower == "no" || lower == "off" || lower == "false" || lower == "0" {
+ return false, nil
+ }
+ return false, newConversionError(reflect.ValueOf(text), "bool")
+}
+
+func fieldNameToKey(field reflect.StructField) string {
+ name := field.Name
+ if tagtext := field.Tag.Get("yaml"); tagtext != "" {
+ name = tagtext
+ } else if tagtext := field.Tag.Get("json"); tagtext != "" {
+ name = tagtext
+ } else if tagtext := field.Tag.Get("mapstructure"); tagtext != "" {
+ name = tagtext
+ }
+ // skip any additional specifiers such as ",omitempty"
+ if commaPos := strings.IndexRune(name, ','); commaPos != -1 {
+ name = name[:commaPos]
+ }
+ return name
+}
+
+func copyStruct(target reflect.Value, source node) error {
+ targetType := target.Type()
+ for i := 0; i < targetType.NumField(); i++ {
+ f := targetType.Field(i)
+ ch, _ := utf8.DecodeRuneInString(f.Name)
+ if unicode.IsLower(ch) {
+ continue
+ }
+ child, err := source.GetChild(fieldNameToKey(f))
+ if err == errNotFound {
+ continue
+ } else if err != nil {
+ return err
+ }
+ err = copyAny(target.FieldByName(f.Name), child)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func copyMap(target reflect.Value, source node) error {
+ // TODO: Should handle maps with more complex types in a future PR
+ ktype := reflect.TypeOf("")
+ vtype := reflect.TypeOf("")
+ mtype := reflect.MapOf(ktype, vtype)
+ results := reflect.MakeMap(mtype)
+
+ mapKeys, err := source.ChildrenKeys()
+ if err != nil {
+ return err
+ }
+ for _, mkey := range mapKeys {
+ child, err := source.GetChild(mkey)
+ if err != nil {
+ return err
+ }
+ if child == nil {
+ continue
+ }
+ if scalar, ok := child.(leafNode); ok {
+ if mval, err := scalar.GetString(); err == nil {
+ results.SetMapIndex(reflect.ValueOf(mkey), reflect.ValueOf(mval))
+ } else {
+ return fmt.Errorf("TODO: only map[string]string supported currently")
+ }
+ }
+ }
+ target.Set(results)
+ return nil
+}
+
+func copyLeaf(target reflect.Value, source leafNode) error {
+ if source == nil {
+ return fmt.Errorf("source value is not a scalar")
+ }
+ switch target.Kind() {
+ case reflect.Bool:
+ v, err := source.GetBool()
+ if err != nil {
+ return err
+ }
+ target.SetBool(v)
+ return nil
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ v, err := source.GetInt()
+ if err != nil {
+ return err
+ }
+ target.SetInt(int64(v))
+ return nil
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ v, err := source.GetInt()
+ if err != nil {
+ return err
+ }
+ target.SetUint(uint64(v))
+ return nil
+ case reflect.Float32, reflect.Float64:
+ v, err := source.GetFloat()
+ if err != nil {
+ return err
+ }
+ target.SetFloat(float64(v))
+ return nil
+ case reflect.String:
+ v, err := source.GetString()
+ if err != nil {
+ return err
+ }
+ target.SetString(v)
+ return nil
+ }
+ return fmt.Errorf("unsupported scalar type %v", target.Kind())
+}
+
+func copyList(target reflect.Value, source arrayNode) error {
+ if source == nil {
+ return fmt.Errorf("source value is not a list")
+ }
+ elemType := target.Type()
+ elemType = elemType.Elem()
+ numElems := source.Size()
+ results := reflect.MakeSlice(reflect.SliceOf(elemType), numElems, numElems)
+ for k := 0; k < numElems; k++ {
+ elemSource, err := source.Index(k)
+ if err != nil {
+ return err
+ }
+ ptrOut := reflect.New(elemType)
+ outTarget := ptrOut.Elem()
+ err = copyAny(outTarget, elemSource)
+ if err != nil {
+ return err
+ }
+ results.Index(k).Set(outTarget)
+ }
+ target.Set(results)
+ return nil
+}
+
+func copyAny(target reflect.Value, source node) error {
+ if target.Kind() == reflect.Pointer {
+ allocPtr := reflect.New(target.Type().Elem())
+ target.Set(allocPtr)
+ target = allocPtr.Elem()
+ }
+ if isScalarKind(target) {
+ if leaf, ok := source.(leafNode); ok {
+ return copyLeaf(target, leaf)
+ }
+ return fmt.Errorf("can't copy into target: scalar required, but source is not a leaf")
+ } else if target.Kind() == reflect.Map {
+ return copyMap(target, source)
+ } else if target.Kind() == reflect.Struct {
+ return copyStruct(target, source)
+ } else if target.Kind() == reflect.Slice {
+ if arr, ok := source.(arrayNode); ok {
+ return copyList(target, arr)
+ }
+ return fmt.Errorf("can't copy into target: []T required, but source is not an array")
+ } else if target.Kind() == reflect.Invalid {
+ return fmt.Errorf("can't copy invalid value %s : %v", target, target.Kind())
+ }
+ return fmt.Errorf("unknown value to copy: %v", target.Type())
+}
+
+func isScalarKind(v reflect.Value) bool {
+ k := v.Kind()
+ return (k >= reflect.Bool && k <= reflect.Float64) || k == reflect.String
+}
+
+func findFieldMatch(val reflect.Value, key string) int {
+ schema := val.Type()
+ for i := 0; i < schema.NumField(); i++ {
+ if key == fieldNameToKey(schema.Field(i)) {
+ return i
+ }
+ }
+ return -1
+}
+
+func newConversionError(v reflect.Value, expectType string) error {
+ return fmt.Errorf("could not convert to %s: %v of type %T and Kind %v", expectType, v, v, v.Kind())
+}
diff --git a/pkg/config/structure/unmarshal_test.go b/pkg/config/structure/unmarshal_test.go
new file mode 100644
index 0000000000000..3f8546020811f
--- /dev/null
+++ b/pkg/config/structure/unmarshal_test.go
@@ -0,0 +1,154 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package structure
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/DataDog/datadog-agent/pkg/config/mock"
+ "github.com/stretchr/testify/assert"
+)
+
+// Struct that is used within the config
+type UserV3 struct {
+ Username string `yaml:"user"`
+ UsernameLegacy string `yaml:"username"`
+ AuthKey string `yaml:"authKey"`
+ AuthProtocol string `yaml:"authProtocol"`
+ PrivKey string `yaml:"privKey"`
+ PrivProtocol string `yaml:"privProtocol"`
+}
+
+// Type that gets parsed out of config
+type TrapsConfig struct {
+ Enabled bool `yaml:"enabled"`
+ Port uint16 `yaml:"port"`
+ Users []UserV3 `yaml:"users"`
+ CommunityStrings []string `yaml:"community_strings"`
+ BindHost string `yaml:"bind_host"`
+ StopTimeout int `yaml:"stop_timeout"`
+ Namespace string `yaml:"namespace"`
+}
+
+func TestUnmarshalKeyTrapsConfig(t *testing.T) {
+ confYaml := `
+network_devices:
+ snmp_traps:
+ enabled: true
+ port: 1234
+ community_strings: ["a","b","c"]
+ users:
+ - user: alice
+ authKey: hunter2
+ authProtocol: MD5
+ privKey: pswd
+ privProtocol: AE5
+ - user: bob
+ authKey: "123456"
+ authProtocol: MD5
+ privKey: secret
+ privProtocol: AE5
+ bind_host: ok
+ stop_timeout: 4
+ namespace: abc
+`
+ mockConfig := mock.NewFromYAML(t, confYaml)
+
+ var trapsCfg = TrapsConfig{}
+ err := UnmarshalKey(mockConfig, "network_devices.snmp_traps", &trapsCfg)
+ assert.NoError(t, err)
+
+ assert.Equal(t, trapsCfg.Enabled, true)
+ assert.Equal(t, trapsCfg.Port, uint16(1234))
+ assert.Equal(t, trapsCfg.CommunityStrings, []string{"a", "b", "c"})
+
+ assert.Equal(t, len(trapsCfg.Users), 2)
+ assert.Equal(t, trapsCfg.Users[0].Username, "alice")
+ assert.Equal(t, trapsCfg.Users[0].AuthKey, "hunter2")
+ assert.Equal(t, trapsCfg.Users[0].AuthProtocol, "MD5")
+ assert.Equal(t, trapsCfg.Users[0].PrivKey, "pswd")
+ assert.Equal(t, trapsCfg.Users[0].PrivProtocol, "AE5")
+ assert.Equal(t, trapsCfg.Users[1].Username, "bob")
+ assert.Equal(t, trapsCfg.Users[1].AuthKey, "123456")
+ assert.Equal(t, trapsCfg.Users[1].AuthProtocol, "MD5")
+ assert.Equal(t, trapsCfg.Users[1].PrivKey, "secret")
+ assert.Equal(t, trapsCfg.Users[1].PrivProtocol, "AE5")
+
+ assert.Equal(t, trapsCfg.BindHost, "ok")
+ assert.Equal(t, trapsCfg.StopTimeout, 4)
+ assert.Equal(t, trapsCfg.Namespace, "abc")
+}
+
+type Endpoint struct {
+ Name string `yaml:"name"`
+ APIKey string `yaml:"apikey"`
+}
+
+func TestUnmarshalKeySliceOfStructures(t *testing.T) {
+ confYaml := `
+endpoints:
+- name: intake
+ apikey: abc1
+- name: config
+ apikey: abc2
+- name: health
+ apikey: abc3
+`
+ mockConfig := mock.NewFromYAML(t, confYaml)
+ mockConfig.SetKnown("endpoints")
+
+ var endpoints = []Endpoint{}
+ err := UnmarshalKey(mockConfig, "endpoints", &endpoints)
+ assert.NoError(t, err)
+
+ assert.Equal(t, len(endpoints), 3)
+ assert.Equal(t, endpoints[0].Name, "intake")
+ assert.Equal(t, endpoints[0].APIKey, "abc1")
+ assert.Equal(t, endpoints[1].Name, "config")
+ assert.Equal(t, endpoints[1].APIKey, "abc2")
+ assert.Equal(t, endpoints[2].Name, "health")
+ assert.Equal(t, endpoints[2].APIKey, "abc3")
+}
+
+type FeatureConfig struct {
+ Enabled bool `yaml:"enabled"`
+}
+
+func TestUnmarshalKeyParseStringAsBool(t *testing.T) {
+ confYaml := `
+feature:
+ enabled: "true"
+`
+ mockConfig := mock.NewFromYAML(t, confYaml)
+ mockConfig.SetKnown("feature")
+
+ var feature = FeatureConfig{}
+ err := UnmarshalKey(mockConfig, "feature", &feature)
+ assert.NoError(t, err)
+
+ assert.Equal(t, feature.Enabled, true)
+}
+
+func TestMapGetChildNotFound(t *testing.T) {
+ m := map[string]string{"a": "apple", "b": "banana"}
+ n, err := newNode(reflect.ValueOf(m))
+ assert.NoError(t, err)
+
+ val, err := n.GetChild("a")
+ assert.NoError(t, err)
+ str, err := val.(leafNode).GetString()
+ assert.NoError(t, err)
+ assert.Equal(t, str, "apple")
+
+ _, err = n.GetChild("c")
+ assert.Error(t, err)
+ assert.Equal(t, err.Error(), "not found")
+
+ keys, err := n.ChildrenKeys()
+ assert.NoError(t, err)
+ assert.Equal(t, keys, []string{"a", "b"})
+}
diff --git a/pkg/dynamicinstrumentation/codegen/c/dynamicinstrumentation.c b/pkg/dynamicinstrumentation/codegen/c/dynamicinstrumentation.c
new file mode 100644
index 0000000000000..f3c17c3dd5ca3
--- /dev/null
+++ b/pkg/dynamicinstrumentation/codegen/c/dynamicinstrumentation.c
@@ -0,0 +1,99 @@
+#include "bpf_helpers.h"
+#include "bpf_tracing.h"
+#include "kconfig.h"
+#include
+#include "types.h"
+
+#define MAX_STRING_SIZE {{ .InstrumentationInfo.InstrumentationOptions.StringMaxSize}}
+#define PARAM_BUFFER_SIZE {{ .InstrumentationInfo.InstrumentationOptions.ArgumentsMaxSize}}
+#define STACK_DEPTH_LIMIT 10
+#define MAX_SLICE_SIZE 1800
+#define MAX_SLICE_LENGTH 20
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+ __uint(max_entries, 1 << 24);
+} events SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(char[PARAM_BUFFER_SIZE]));
+ __uint(max_entries, 1);
+} zeroval SEC(".maps");
+
+struct event {
+ struct base_event base;
+ char output[PARAM_BUFFER_SIZE];
+};
+
+SEC("uprobe/{{.GetBPFFuncName}}")
+int {{.GetBPFFuncName}}(struct pt_regs *ctx)
+{
+ bpf_printk("{{.GetBPFFuncName}} probe in {{.ServiceName}} has triggered");
+
+ // reserve space on ringbuffer
+ struct event *event;
+ event = bpf_ringbuf_reserve(&events, sizeof(struct event), 0);
+ if (!event) {
+ bpf_printk("No space available on ringbuffer, dropping event");
+ return 0;
+ }
+
+ char* zero_string;
+ __u32 key = 0;
+ zero_string = bpf_map_lookup_elem(&zeroval, &key);
+ if (!zero_string) {
+ bpf_printk("couldn't lookup zero value in zeroval array map, dropping event for {{.GetBPFFuncName}}");
+ bpf_ringbuf_discard(event, 0);
+ return 0;
+ }
+
+ bpf_probe_read(&event->base.probe_id, sizeof(event->base.probe_id), zero_string);
+ bpf_probe_read(&event->base.program_counters, sizeof(event->base.program_counters), zero_string);
+ bpf_probe_read(&event->output, sizeof(event->output), zero_string);
+ bpf_probe_read(&event->base.probe_id, {{ .ID | len }}, "{{.ID}}");
+
+ // Get tid and tgid
+ u64 pidtgid = bpf_get_current_pid_tgid();
+ u32 tgid = pidtgid >> 32;
+ event->base.pid = tgid;
+
+ u64 uidgid = bpf_get_current_uid_gid();
+ u32 uid = uidgid >> 32;
+ event->base.uid = uid;
+
+ // Collect stack trace
+ __u64 currentPC = ctx->pc;
+ bpf_probe_read(&event->base.program_counters[0], sizeof(__u64), ¤tPC);
+
+ __u64 bp = ctx->regs[29];
+ bpf_probe_read(&bp, sizeof(__u64), (void*)bp); // dereference bp to get current stack frame
+ __u64 ret_addr = ctx->regs[30]; // when bpf prog enters, the return address hasn't yet been written to the stack
+
+ int i;
+ for (i = 1; i < STACK_DEPTH_LIMIT; i++)
+ {
+ if (bp == 0) {
+ break;
+ }
+ bpf_probe_read(&event->base.program_counters[i], sizeof(__u64), &ret_addr);
+ bpf_probe_read(&ret_addr, sizeof(__u64), (void*)(bp-8));
+ bpf_probe_read(&bp, sizeof(__u64), (void*)bp);
+ }
+
+ // Collect parameters
+ __u8 param_type;
+ __u16 param_size;
+ __u16 slice_length;
+
+ int outputOffset = 0;
+
+ {{ .InstrumentationInfo.BPFParametersSourceCode }}
+
+ bpf_ringbuf_submit(event, 0);
+
+ return 0;
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/pkg/dynamicinstrumentation/codegen/c/types.h b/pkg/dynamicinstrumentation/codegen/c/types.h
new file mode 100644
index 0000000000000..f170b91fe7541
--- /dev/null
+++ b/pkg/dynamicinstrumentation/codegen/c/types.h
@@ -0,0 +1,14 @@
+#ifndef DI_TYPES_H
+#define DI_TYPES_H
+
+#include "ktypes.h"
+
+// NOTE: Be careful when adding fields, alignment should always be to 8 bytes
+struct base_event {
+ char probe_id[304];
+ __u32 pid;
+ __u32 uid;
+ __u64 program_counters[10];
+}__attribute__((aligned(8)));
+
+#endif
diff --git a/pkg/dynamicinstrumentation/codegen/codegen.go b/pkg/dynamicinstrumentation/codegen/codegen.go
new file mode 100644
index 0000000000000..6c3e7b44905cf
--- /dev/null
+++ b/pkg/dynamicinstrumentation/codegen/codegen.go
@@ -0,0 +1,232 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux_bpf
+
+// Package codegen is used to generate bpf program source code based on probe definitions
+package codegen
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "text/template"
+
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes"
+)
+
+// GenerateBPFParamsCode generates the source code associated with the probe and data
+// in it's associated process info.
+func GenerateBPFParamsCode(procInfo *ditypes.ProcessInfo, probe *ditypes.Probe) error {
+ parameterBytes := []byte{}
+ out := bytes.NewBuffer(parameterBytes)
+
+ if probe.InstrumentationInfo.InstrumentationOptions.CaptureParameters {
+ params := applyCaptureDepth(procInfo.TypeMap.Functions[probe.FuncName], probe.InstrumentationInfo.InstrumentationOptions.MaxReferenceDepth)
+ applyFieldCountLimit(params)
+ for i := range params {
+ flattenedParams := flattenParameters([]ditypes.Parameter{params[i]})
+
+ err := generateHeadersText(flattenedParams, out)
+ if err != nil {
+ return err
+ }
+
+ err = generateParametersText(flattenedParams, out)
+ if err != nil {
+ return err
+ }
+ }
+ } else {
+ log.Info("Not capturing parameters")
+ }
+
+ probe.InstrumentationInfo.BPFParametersSourceCode = out.String()
+ return nil
+}
+
+func resolveHeaderTemplate(param *ditypes.Parameter) (*template.Template, error) {
+ switch param.Kind {
+ case uint(reflect.String):
+ if param.Location.InReg {
+ return template.New("string_reg_header_template").Parse(stringRegisterHeaderTemplateText)
+ }
+ return template.New("string_stack_header_template").Parse(stringStackHeaderTemplateText)
+ case uint(reflect.Slice):
+ if param.Location.InReg {
+ return template.New("slice_reg_header_template").Parse(sliceRegisterHeaderTemplateText)
+ }
+ return template.New("slice_stack_header_template").Parse(sliceStackHeaderTemplateText)
+ default:
+ return template.New("header_template").Parse(headerTemplateText)
+ }
+}
+
+func generateHeadersText(params []ditypes.Parameter, out io.Writer) error {
+ for i := range params {
+ err := generateHeaderText(params[i], out)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func generateHeaderText(param ditypes.Parameter, out io.Writer) error {
+ if reflect.Kind(param.Kind) == reflect.Slice {
+ return generateSliceHeader(¶m, out)
+ }
+
+ tmplt, err := resolveHeaderTemplate(¶m)
+ if err != nil {
+ return err
+ }
+ err = tmplt.Execute(out, param)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func generateParametersText(params []ditypes.Parameter, out io.Writer) error {
+ for i := range params {
+ err := generateParameterText(¶ms[i], out)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func generateParameterText(param *ditypes.Parameter, out io.Writer) error {
+
+ if param.Kind == uint(reflect.Array) ||
+ param.Kind == uint(reflect.Struct) ||
+ param.Kind == uint(reflect.Pointer) {
+ // - Arrays/structs don't have actual values, we just want to generate
+ // a header for them for the sake of event parsing.
+ // - Pointers do have actual values, but they're captured when the
+ // underlying value is also captured.
+ return nil
+ }
+
+ template, err := resolveParameterTemplate(param)
+ if err != nil {
+ return err
+ }
+ param.Type = cleanupTypeName(param.Type)
+ err = template.Execute(out, param)
+ if err != nil {
+ return fmt.Errorf("could not execute template for generating read of parameter: %w", err)
+ }
+
+ return nil
+}
+
+func resolveParameterTemplate(param *ditypes.Parameter) (*template.Template, error) {
+ if param.Type == "main.triggerVerifierErrorForTesting" {
+ return template.New("trigger_verifier_error_template").Parse(forcedVerifierErrorTemplate)
+ }
+ notSupported := param.NotCaptureReason == ditypes.Unsupported
+ cutForFieldLimit := param.NotCaptureReason == ditypes.FieldLimitReached
+
+ if notSupported {
+ return template.New("unsupported_type_template").Parse(unsupportedTypeTemplateText)
+ } else if cutForFieldLimit {
+ return template.New("cut_field_limit_template").Parse(cutForFieldLimitTemplateText)
+ }
+
+ if param.Location.InReg {
+ return resolveRegisterParameterTemplate(param)
+ }
+ return resolveStackParameterTemplate(param)
+}
+
+func resolveRegisterParameterTemplate(param *ditypes.Parameter) (*template.Template, error) {
+ needsDereference := param.Location.NeedsDereference
+ stringType := param.Kind == uint(reflect.String)
+ sliceType := param.Kind == uint(reflect.Slice)
+
+ if needsDereference {
+ // Register Pointer
+ return template.New("pointer_register_template").Parse(pointerRegisterTemplateText)
+ } else if stringType {
+ // Register String
+ return template.New("string_register_template").Parse(stringRegisterTemplateText)
+ } else if sliceType {
+ // Register Slice
+ return template.New("slice_register_template").Parse(sliceRegisterTemplateText)
+ } else if !needsDereference {
+ // Register Normal Value
+ return template.New("register_template").Parse(normalValueRegisterTemplateText)
+ }
+ return nil, errors.New("no template created: invalid or unsupported type")
+}
+
+func resolveStackParameterTemplate(param *ditypes.Parameter) (*template.Template, error) {
+ needsDereference := param.Location.NeedsDereference
+ stringType := param.Kind == uint(reflect.String)
+ sliceType := param.Kind == uint(reflect.Slice)
+
+ if needsDereference {
+ // Stack Pointer
+ return template.New("pointer_stack_template").Parse(pointerStackTemplateText)
+ } else if stringType {
+ // Stack String
+ return template.New("string_stack_template").Parse(stringStackTemplateText)
+ } else if sliceType {
+ // Stack Slice
+ return template.New("slice_stack_template").Parse(sliceStackTemplateText)
+ } else if !needsDereference {
+ // Stack Normal Value
+ return template.New("stack_template").Parse(normalValueStackTemplateText)
+ }
+ return nil, errors.New("no template created: invalid or unsupported type")
+}
+
+func cleanupTypeName(s string) string {
+ return strings.TrimPrefix(s, "*")
+}
+
+func generateSliceHeader(slice *ditypes.Parameter, out io.Writer) error {
+ if slice == nil {
+ return errors.New("nil slice parameter when generating header code")
+ }
+ if len(slice.ParameterPieces) != 1 {
+ return errors.New("invalid slice parameter when generating header code")
+ }
+
+ x := []byte{}
+ buf := bytes.NewBuffer(x)
+ err := generateHeaderText(slice.ParameterPieces[0], buf)
+ if err != nil {
+ return err
+ }
+ w := sliceHeaderWrapper{
+ Parameter: slice,
+ SliceTypeHeaderText: buf.String(),
+ }
+
+ sliceTemplate, err := resolveHeaderTemplate(slice)
+ if err != nil {
+ return err
+ }
+
+ err = sliceTemplate.Execute(out, w)
+ if err != nil {
+ return fmt.Errorf("could not execute template for generating slice header: %w", err)
+ }
+ return nil
+}
+
+type sliceHeaderWrapper struct {
+ Parameter *ditypes.Parameter
+ SliceTypeHeaderText string
+}
diff --git a/pkg/dynamicinstrumentation/codegen/compile.go b/pkg/dynamicinstrumentation/codegen/compile.go
new file mode 100644
index 0000000000000..abb8523280443
--- /dev/null
+++ b/pkg/dynamicinstrumentation/codegen/compile.go
@@ -0,0 +1,11 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux_bpf
+
+package codegen
+
+//go:generate $GOPATH/bin/include_headers pkg/dynamicinstrumentation/codegen/c/dynamicinstrumentation.c pkg/ebpf/bytecode/build/runtime/dynamicinstrumentation.c pkg/ebpf/c
+//go:generate $GOPATH/bin/integrity pkg/ebpf/bytecode/build/runtime/dynamicinstrumentation.c pkg/ebpf/bytecode/runtime/dynamicinstrumentation.go runtime
diff --git a/pkg/dynamicinstrumentation/codegen/output_offsets.go b/pkg/dynamicinstrumentation/codegen/output_offsets.go
new file mode 100644
index 0000000000000..56250b25897c2
--- /dev/null
+++ b/pkg/dynamicinstrumentation/codegen/output_offsets.go
@@ -0,0 +1,138 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux_bpf
+
+package codegen
+
+import (
+ "math/rand"
+ "reflect"
+
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes"
+)
+
+type paramDepthCounter struct {
+ depth int
+ param *ditypes.Parameter
+}
+
+func applyCaptureDepth(params []ditypes.Parameter, maxDepth int) []ditypes.Parameter {
+ log.Tracef("Applying capture depth: %d", maxDepth)
+ queue := []paramDepthCounter{}
+
+ for i := range params {
+ queue = append(queue, paramDepthCounter{
+ depth: 0,
+ param: ¶ms[i],
+ })
+ }
+
+ for len(queue) != 0 {
+ front := queue[0]
+ queue = queue[1:]
+
+ if front.depth == maxDepth {
+ // max capture depth reached, remove parameters below this level.
+ front.param.ParameterPieces = []ditypes.Parameter{}
+ if front.param.Kind == uint(reflect.Struct) {
+ // struct size reflects the number of fields,
+ // setting to 0 tells the user space parsing not to
+ // expect anything else.
+ front.param.TotalSize = 0
+ }
+ } else {
+ for i := range front.param.ParameterPieces {
+ queue = append(queue, paramDepthCounter{
+ depth: front.depth + 1,
+ param: &front.param.ParameterPieces[i],
+ })
+ }
+ }
+ }
+ return params
+}
+
+func flattenParameters(params []ditypes.Parameter) []ditypes.Parameter {
+ flattenedParams := []ditypes.Parameter{}
+ for i := range params {
+ kind := reflect.Kind(params[i].Kind)
+ if kind == reflect.Slice {
+ // Slices don't get flattened as we need the underlying type.
+ // We populate the slice's template using that type.
+ flattenedParams = append(flattenedParams, params[i])
+ } else if hasHeader(kind) {
+ paramHeader := params[i]
+ paramHeader.ParameterPieces = nil
+ flattenedParams = append(flattenedParams, paramHeader)
+ flattenedParams = append(flattenedParams, flattenParameters(params[i].ParameterPieces)...)
+ } else if len(params[i].ParameterPieces) > 0 {
+ flattenedParams = append(flattenedParams, flattenParameters(params[i].ParameterPieces)...)
+ } else {
+ flattenedParams = append(flattenedParams, params[i])
+ }
+ }
+
+ for i := range flattenedParams {
+ flattenedParams[i].ID = randomID()
+ }
+
+ return flattenedParams
+}
+
+func applyFieldCountLimit(params []ditypes.Parameter) {
+ queue := []*ditypes.Parameter{}
+ for i := range params {
+ queue = append(queue, ¶ms[len(params)-1-i])
+ }
+ var (
+ current *ditypes.Parameter
+ max int
+ )
+ for len(queue) != 0 {
+ current = queue[0]
+ queue = queue[1:]
+
+ max = len(current.ParameterPieces)
+ if len(current.ParameterPieces) > ditypes.MaxFieldCount {
+ max = ditypes.MaxFieldCount
+ for j := max; j < len(current.ParameterPieces); j++ {
+ excludeForFieldCount(¤t.ParameterPieces[j])
+ }
+ }
+ for n := 0; n < max; n++ {
+ queue = append(queue, ¤t.ParameterPieces[n])
+ }
+ }
+}
+
+func excludeForFieldCount(root *ditypes.Parameter) {
+ // Exclude all in this tree
+ if root == nil {
+ return
+ }
+ root.NotCaptureReason = ditypes.FieldLimitReached
+ root.Kind = ditypes.KindCutFieldLimit
+ for i := range root.ParameterPieces {
+ excludeForFieldCount(&root.ParameterPieces[i])
+ }
+}
+
+func hasHeader(kind reflect.Kind) bool {
+ return kind == reflect.Struct ||
+ kind == reflect.Array ||
+ kind == reflect.Pointer
+}
+
+func randomID() string {
+ length := 6
+ randomString := make([]byte, length)
+ for i := 0; i < length; i++ {
+ randomString[i] = byte(65 + rand.Intn(25))
+ }
+ return string(randomString)
+}
diff --git a/pkg/dynamicinstrumentation/codegen/templates.go b/pkg/dynamicinstrumentation/codegen/templates.go
new file mode 100644
index 0000000000000..64f5dab18ec3a
--- /dev/null
+++ b/pkg/dynamicinstrumentation/codegen/templates.go
@@ -0,0 +1,210 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux_bpf
+
+package codegen
+
+var forcedVerifierErrorTemplate = `
+int illegalDereference = *(*(*ctx->regs[0]));
+`
+
+var headerTemplateText = `
+// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}}
+// Write the kind and size to output buffer
+param_type = {{.Kind}};
+bpf_probe_read(&event->output[outputOffset], sizeof(param_type), ¶m_type);
+param_size = {{.TotalSize}};
+bpf_probe_read(&event->output[outputOffset+1], sizeof(param_size), ¶m_size);
+outputOffset += 3;
+`
+
+// The length of slices aren't known until parsing, so they require
+// special headers to read in the length dynamically
+var sliceRegisterHeaderTemplateText = `
+// Name={{.Parameter.Name}} ID={{.Parameter.ID}} TotalSize={{.Parameter.TotalSize}} Kind={{.Parameter.Kind}}
+// Write the slice kind to output buffer
+param_type = {{.Parameter.Kind}};
+bpf_probe_read(&event->output[outputOffset], sizeof(param_type), ¶m_type);
+// Read slice length and write it to output buffer
+bpf_probe_read(¶m_size, sizeof(param_size), &ctx->regs[{{.Parameter.Location.Register}}+1]);
+bpf_probe_read(&event->output[outputOffset+1], sizeof(param_size), ¶m_size);
+outputOffset += 3;
+
+__u16 indexSlice{{.Parameter.ID}};
+slice_length = param_size;
+if (slice_length > MAX_SLICE_LENGTH) {
+ slice_length = MAX_SLICE_LENGTH;
+}
+
+for (indexSlice{{.Parameter.ID}} = 0; indexSlice{{.Parameter.ID}} < MAX_SLICE_LENGTH; indexSlice{{.Parameter.ID}}++) {
+ if (indexSlice{{.Parameter.ID}} >= slice_length) {
+ break;
+ }
+ {{.SliceTypeHeaderText}}
+}
+`
+
+// The length of slices aren't known until parsing, so they require
+// special headers to read in the length dynamically
+var sliceStackHeaderTemplateText = `
+// Name={{.Parameter.Name}} ID={{.Parameter.ID}} TotalSize={{.Parameter.TotalSize}} Kind={{.Parameter.Kind}}
+// Write the slice kind to output buffer
+param_type = {{.Parameter.Kind}};
+bpf_probe_read(&event->output[outputOffset], sizeof(param_type), ¶m_type);
+// Read slice length and write it to output buffer
+bpf_probe_read(¶m_size, sizeof(param_size), &ctx->regs[29]+{{.Parameter.Location.StackOffset}}+8]);
+bpf_probe_read(&event->output[outputOffset+1], sizeof(param_size), ¶m_size);
+outputOffset += 3;
+
+__u16 indexSlice{{.Parameter.ID}};
+slice_length = param_size;
+if (slice_length > MAX_SLICE_LENGTH) {
+ slice_length = MAX_SLICE_LENGTH;
+}
+
+for (indexSlice{{.Parameter.ID}} = 0; indexSlice{{.Parameter.ID}} < MAX_SLICE_LENGTH; indexSlice{{.Parameter.ID}}++) {
+ if (indexSlice{{.Parameter.ID}} >= slice_length) {
+ break;
+ }
+ {{.SliceTypeHeaderText}}
+}
+`
+
+// The length of strings aren't known until parsing, so they require
+// special headers to read in the length dynamically
+var stringRegisterHeaderTemplateText = `
+// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}}
+// Write the string kind to output buffer
+param_type = {{.Kind}};
+bpf_probe_read(&event->output[outputOffset], sizeof(param_type), ¶m_type);
+
+// Read string length and write it to output buffer
+bpf_probe_read(¶m_size, sizeof(param_size), &ctx->regs[{{.Location.Register}}+1]);
+
+// Limit string length
+__u16 string_size_{{.ID}} = param_size;
+if (string_size_{{.ID}} > MAX_STRING_SIZE) {
+ string_size_{{.ID}} = MAX_STRING_SIZE;
+}
+bpf_probe_read(&event->output[outputOffset+1], sizeof(string_size_{{.ID}}), &string_size_{{.ID}});
+outputOffset += 3;
+`
+
+// The length of strings aren't known until parsing, so they require
+// special headers to read in the length dynamically
+var stringStackHeaderTemplateText = `
+// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}}
+// Write the string kind to output buffer
+param_type = {{.Kind}};
+bpf_probe_read(&event->output[outputOffset], sizeof(param_type), ¶m_type);
+// Read string length and write it to output buffer
+bpf_probe_read(¶m_size, sizeof(param_size), (char*)((ctx->regs[29])+{{.Location.StackOffset}}+8));
+// Limit string length
+__u16 string_size_{{.ID}} = param_size;
+if (string_size_{{.ID}} > MAX_STRING_SIZE) {
+ string_size_{{.ID}} = MAX_STRING_SIZE;
+}
+bpf_probe_read(&event->output[outputOffset+1], sizeof(string_size_{{.ID}}), &string_size_{{.ID}});
+outputOffset += 3;
+`
+
+var sliceRegisterTemplateText = `
+// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}}
+// Read contents of slice
+bpf_probe_read(&event->output[outputOffset], MAX_SLICE_SIZE, (void*)ctx->regs[{{.Location.Register}}]);
+outputOffset += MAX_SLICE_SIZE;
+`
+
+var sliceStackTemplateText = `
+// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}}
+// Read contents of slice
+bpf_probe_read(&event->output[outputOffset], MAX_SLICE_SIZE, (void*)(ctx->regs[29]+{{.Location.StackOffset}});
+outputOffset += MAX_SLICE_SIZE;`
+
+var stringRegisterTemplateText = `
+// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}}
+// Read string length and write it to output buffer
+bpf_probe_read(¶m_size, sizeof(param_size), &ctx->regs[{{.Location.Register}}+1]);
+
+__u16 string_size_read_{{.ID}} = param_size;
+if (string_size_read_{{.ID}} > MAX_STRING_SIZE) {
+ string_size_read_{{.ID}} = MAX_STRING_SIZE;
+}
+
+// Read contents of string
+bpf_probe_read(&event->output[outputOffset], string_size_read_{{.ID}}, (void*)ctx->regs[{{.Location.Register}}]);
+outputOffset += string_size_read_{{.ID}};
+`
+
+var stringStackTemplateText = `
+// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}}
+// Read string length and write it to output buffer
+bpf_probe_read(¶m_size, sizeof(param_size), (char*)((ctx->regs[29])+{{.Location.StackOffset}}+8));
+// Limit string length
+__u16 string_size_read_{{.ID}} = param_size;
+if (string_size_read_{{.ID}} > MAX_STRING_SIZE) {
+ string_size_read_{{.ID}} = MAX_STRING_SIZE;
+}
+// Read contents of string
+bpf_probe_read(&ret_addr, sizeof(__u64), (void*)(ctx->regs[29]+{{.Location.StackOffset}}));
+bpf_probe_read(&event->output[outputOffset], string_size_read_{{.ID}}, (void*)(ret_addr));
+outputOffset += string_size_read_{{.ID}};
+`
+
+var pointerRegisterTemplateText = `
+// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}}
+// Read the pointer value (address of underlying value)
+void *ptrTo{{.ID}};
+bpf_probe_read(&ptrTo{{.ID}}, sizeof(ptrTo{{.ID}}), &ctx->regs[{{.Location.Register}}]);
+
+// Write the underlying value to output
+bpf_probe_read(&event->output[outputOffset], {{.TotalSize}}, ptrTo{{.ID}}+{{.Location.PointerOffset}});
+outputOffset += {{.TotalSize}};
+
+// Write the pointer address to output
+ptrTo{{.ID}} += {{.Location.PointerOffset}};
+bpf_probe_read(&event->output[outputOffset], sizeof(ptrTo{{.ID}}), &ptrTo{{.ID}});
+`
+
+var pointerStackTemplateText = `
+// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}}
+// Read the pointer value (address of underlying value)
+void *ptrTo{{.ID}};
+bpf_probe_read(&ptrTo{{.ID}}, sizeof(ptrTo{{.ID}}), (char*)((ctx->regs[29])+{{.Location.StackOffset}}+8));
+
+// Write the underlying value to output
+bpf_probe_read(&event->output[outputOffset], {{.TotalSize}}, ptrTo{{.ID}}+{{.Location.PointerOffset}});
+outputOffset += {{.TotalSize}};
+
+// Write the pointer address to output
+ptrTo{{.ID}} += {{.Location.PointerOffset}};
+bpf_probe_read(&event->output[outputOffset], sizeof(ptrTo{{.ID}}), &ptrTo{{.ID}});
+`
+
+var normalValueRegisterTemplateText = `
+// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}}
+bpf_probe_read(&event->output[outputOffset], {{.TotalSize}}, &ctx->regs[{{.Location.Register}}]);
+outputOffset += {{.TotalSize}};
+`
+
+var normalValueStackTemplateText = `
+// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}}
+// Read value for {{.Name}}
+bpf_probe_read(&event->output[outputOffset], {{.TotalSize}}, (char*)((ctx->regs[29])+{{.Location.StackOffset}}));
+outputOffset += {{.TotalSize}};
+`
+
+// Unsupported types just get a single `255` value to signify as a placeholder
+// that an unsupported type goes here. Size is where we keep the actual type.
+var unsupportedTypeTemplateText = `
+// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}}
+// No capture, unsupported type
+`
+
+var cutForFieldLimitTemplateText = `
+// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}}
+// No capture, cut for field limit
+`
diff --git a/pkg/dynamicinstrumentation/di.go b/pkg/dynamicinstrumentation/di.go
new file mode 100644
index 0000000000000..409b8a1af5f7a
--- /dev/null
+++ b/pkg/dynamicinstrumentation/di.go
@@ -0,0 +1,159 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux_bpf
+
+// Package dynamicinstrumentation provides the main entrypoint into running the
+// dynamic instrumentation for Go product
+package dynamicinstrumentation
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/diagnostics"
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/diconfig"
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes"
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ebpf"
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/uploader"
+)
+
+// GoDI is the central controller representation of the Dynamic Instrumentation
+// implementation for Go services
+type GoDI struct {
+ cm diconfig.ConfigManager
+
+ lu uploader.LogUploader
+ du uploader.DiagnosticUploader
+
+ processEvent ditypes.EventCallback
+ Close func()
+
+ stats GoDIStats
+}
+
+// GoDIStats is used to track various metrics relevant to the health of the
+// Dynamic Instrumentation process
+type GoDIStats struct {
+ PIDEventsCreatedCount map[uint32]uint64 // pid : count
+ ProbeEventsCreatedCount map[string]uint64 // probeID : count
+}
+
+func newGoDIStats() GoDIStats {
+ return GoDIStats{
+ PIDEventsCreatedCount: make(map[uint32]uint64),
+ ProbeEventsCreatedCount: make(map[string]uint64),
+ }
+}
+
+// DIOptions is used to configure the running Dynamic Instrumentation process
+type DIOptions struct {
+ Offline bool
+
+ ProbesFilePath string
+ SnapshotOutput string
+ DiagnosticOutput string
+
+ ditypes.EventCallback
+}
+
+// RunDynamicInstrumentation is the main entry point into running the Dynamic
+// Instrumentation project for Go.
+func RunDynamicInstrumentation(opts *DIOptions) (*GoDI, error) {
+ var goDI *GoDI
+
+ err := ebpf.SetupEventsMap()
+ if err != nil {
+ return nil, err
+ }
+
+ if opts.Offline {
+ cm, err := diconfig.NewFileConfigManager(opts.ProbesFilePath)
+ if err != nil {
+ return nil, fmt.Errorf("couldn't create new file config manager: %w", err)
+ }
+ lu, err := uploader.NewOfflineLogSerializer(opts.SnapshotOutput)
+ if err != nil {
+ return nil, fmt.Errorf("couldn't create new offline log serializer: %w", err)
+ }
+ du, err := uploader.NewOfflineDiagnosticSerializer(diagnostics.Diagnostics, opts.DiagnosticOutput)
+ if err != nil {
+ return nil, fmt.Errorf("couldn't create new offline diagnostic serializer: %w", err)
+ }
+ goDI = &GoDI{
+ cm: cm,
+ lu: lu,
+ du: du,
+ stats: newGoDIStats(),
+ }
+ } else {
+ cm, err := diconfig.NewRCConfigManager()
+ if err != nil {
+ return nil, fmt.Errorf("couldn't create new RC config manager: %w", err)
+ }
+ goDI = &GoDI{
+ cm: cm,
+ lu: uploader.NewLogUploader(),
+ du: uploader.NewDiagnosticUploader(),
+ stats: newGoDIStats(),
+ }
+ }
+ if opts.EventCallback != nil {
+ goDI.processEvent = opts.EventCallback
+ } else {
+ goDI.processEvent = goDI.uploadSnapshot
+ }
+
+ closeRingbuffer, err := goDI.startRingbufferConsumer()
+ if err != nil {
+ return nil, fmt.Errorf("couldn't set up new ringbuffer consumer: %w", err)
+ }
+
+ goDI.Close = func() {
+ goDI.cm.Stop()
+ closeRingbuffer()
+ }
+
+ return goDI, nil
+}
+
+func (goDI *GoDI) printSnapshot(event *ditypes.DIEvent) {
+ if event == nil {
+ return
+ }
+ procInfo := goDI.cm.GetProcInfos()[event.PID]
+ diLog := uploader.NewDILog(procInfo, event)
+
+ var bs []byte
+ var err error
+
+ if diLog != nil {
+ bs, err = json.MarshalIndent(diLog, "", " ")
+ } else {
+ bs, err = json.MarshalIndent(event, "", " ")
+ }
+
+ if err != nil {
+ log.Info(err)
+ }
+ log.Debug(string(bs))
+}
+
+func (goDI *GoDI) uploadSnapshot(event *ditypes.DIEvent) {
+ goDI.printSnapshot(event)
+ procInfo := goDI.cm.GetProcInfos()[event.PID]
+ diLog := uploader.NewDILog(procInfo, event)
+ if diLog != nil {
+ goDI.lu.Enqueue(diLog)
+ }
+}
+
+// GetStats returns the maps of various statitics for
+// runtime health of dynamic instrumentation
+func (goDI *GoDI) GetStats() GoDIStats {
+ return goDI.stats
+}
diff --git a/pkg/dynamicinstrumentation/diagnostics/diagnostics.go b/pkg/dynamicinstrumentation/diagnostics/diagnostics.go
new file mode 100644
index 0000000000000..c1e351297210d
--- /dev/null
+++ b/pkg/dynamicinstrumentation/diagnostics/diagnostics.go
@@ -0,0 +1,82 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux_bpf
+
+// Package diagnostics provides a facility for dynamic instrumentation to upload diagnostic information
+package diagnostics
+
+import (
+ "sync"
+
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes"
+)
+
+func newDIDiagnostic(service, runtimeID, probeID string, status ditypes.Status) *ditypes.DiagnosticUpload {
+ return &ditypes.DiagnosticUpload{
+ Service: service,
+ DDSource: "dd_debugger",
+ Debugger: struct {
+ ditypes.Diagnostic `json:"diagnostics"`
+ }{
+ Diagnostic: ditypes.Diagnostic{
+ RuntimeID: runtimeID,
+ ProbeID: probeID,
+ Status: status,
+ },
+ },
+ }
+}
+
+type probeInstanceID struct {
+ service string
+ runtimeID string
+ probeID string
+}
+
+// DiagnosticManager is used to keep track and upload diagnostic information
+type DiagnosticManager struct {
+ state map[probeInstanceID]*ditypes.DiagnosticUpload
+ Updates chan *ditypes.DiagnosticUpload
+
+ mu sync.Mutex
+}
+
+// NewDiagnosticManager creates a new DiagnosticManager
+func NewDiagnosticManager() *DiagnosticManager {
+ return &DiagnosticManager{
+ state: make(map[probeInstanceID]*ditypes.DiagnosticUpload),
+ Updates: make(chan *ditypes.DiagnosticUpload),
+ }
+}
+
+// SetStatus associates the status with the specified service/probe
+func (m *DiagnosticManager) SetStatus(service, runtimeID, probeID string, status ditypes.Status) {
+ id := probeInstanceID{service, probeID, runtimeID}
+ d := newDIDiagnostic(service, runtimeID, probeID, status)
+ m.update(id, d)
+}
+
+// SetError associates the error with the specified service/probe
+func (m *DiagnosticManager) SetError(service, runtimeID, probeID, errorType, errorMessage string) {
+ id := probeInstanceID{service, probeID, runtimeID}
+ d := newDIDiagnostic(service, runtimeID, probeID, ditypes.StatusError)
+ d.SetError(errorType, errorMessage)
+ m.update(id, d)
+}
+
+func (m *DiagnosticManager) update(id probeInstanceID, d *ditypes.DiagnosticUpload) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ if m.state[id] != d {
+ m.state[id] = d
+ // TODO: if there is no consumer reading updates, this blocks the calling goroutine
+ m.Updates <- d
+ }
+}
+
+// Diagnostics is a global instance of a diagnostic manager
+var Diagnostics = NewDiagnosticManager()
diff --git a/pkg/dynamicinstrumentation/diconfig/binary_inspection.go b/pkg/dynamicinstrumentation/diconfig/binary_inspection.go
new file mode 100644
index 0000000000000..4722aa6505202
--- /dev/null
+++ b/pkg/dynamicinstrumentation/diconfig/binary_inspection.go
@@ -0,0 +1,268 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux_bpf
+
+package diconfig
+
+import (
+ "debug/elf"
+ "fmt"
+ "reflect"
+
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes"
+ "github.com/DataDog/datadog-agent/pkg/network/go/bininspect"
+)
+
+// inspectGoBinaries goes through each service and populates information about the binary
+// and the relevant parameters, and their types
+// configEvent maps service names to info about the service and their configurations
+func inspectGoBinaries(configEvent ditypes.DIProcs) error {
+ var err error
+ for i := range configEvent {
+ err = AnalyzeBinary(configEvent[i])
+ if err != nil {
+ return fmt.Errorf("inspection of PID %d (path=%s) failed: %w", configEvent[i].PID, configEvent[i].BinaryPath, err)
+ }
+ }
+ return nil
+}
+
+// AnalyzeBinary reads the binary associated with the specified process and parses
+// the DWARF information. It populates relevant fields in the process representation
+func AnalyzeBinary(procInfo *ditypes.ProcessInfo) error {
+ functions := []string{}
+ targetFunctions := map[string]bool{}
+ for _, probe := range procInfo.GetProbes() {
+ functions = append(functions, probe.FuncName)
+ targetFunctions[probe.FuncName] = true
+ }
+
+ dwarfData, err := loadDWARF(procInfo.BinaryPath)
+ if err != nil {
+ return fmt.Errorf("could not retrieve debug information from binary: %w", err)
+ }
+
+ typeMap, err := getTypeMap(dwarfData, targetFunctions)
+ if err != nil {
+ return fmt.Errorf("could not retrieve type information from binary %w", err)
+ }
+
+ procInfo.TypeMap = typeMap
+
+ elfFile, err := elf.Open(procInfo.BinaryPath)
+ if err != nil {
+ return fmt.Errorf("could not open elf file %w", err)
+ }
+
+ procInfo.DwarfData = dwarfData
+
+ fieldIDs := make([]bininspect.FieldIdentifier, 0)
+ for _, funcParams := range typeMap.Functions {
+ for _, param := range funcParams {
+ fieldIDs = append(fieldIDs,
+ collectFieldIDs(param)...)
+ }
+ }
+
+ r, err := bininspect.InspectWithDWARF(elfFile, functions, fieldIDs)
+ if err != nil {
+ return fmt.Errorf("could not determine locations of variables from debug information %w", err)
+ }
+
+ // Use the result from InspectWithDWARF to populate the locations of parameters
+ for functionName, functionMetadata := range r.Functions {
+ putLocationsInParams(functionMetadata.Parameters, r.StructOffsets, procInfo.TypeMap.Functions, functionName)
+ correctStructSizes(procInfo.TypeMap.Functions[functionName])
+ }
+
+ return nil
+}
+
+// collectFieldIDs returns all struct fields if there are any amongst types of parameters
+// including if there's structs that are nested deep within complex types
+func collectFieldIDs(param ditypes.Parameter) []bininspect.FieldIdentifier {
+ fieldIDs := []bininspect.FieldIdentifier{}
+ stack := append([]ditypes.Parameter{param}, param.ParameterPieces...)
+
+ for len(stack) != 0 {
+
+ current := stack[len(stack)-1]
+ stack = stack[:len(stack)-1]
+ if !kindIsSupported(reflect.Kind(current.Kind)) {
+ continue
+ }
+ if len(current.ParameterPieces) != 0 {
+ stack = append(stack, current.ParameterPieces...)
+ }
+
+ if current.Kind == uint(reflect.Struct) || current.Kind == uint(reflect.Slice) {
+ for _, structField := range current.ParameterPieces {
+ if structField.Name == "" || current.Type == "" {
+ // these can be blank in anonymous types or embedded fields
+ // of builtin types. bininspect has no ability to find offsets
+ // in these cases and we're best off skipping them.
+ continue
+ }
+ fieldIDs = append(fieldIDs, bininspect.FieldIdentifier{
+ StructName: current.Type,
+ FieldName: structField.Name,
+ })
+ if len(fieldIDs) >= ditypes.MaxFieldCount {
+ log.Info("field limit applied, not collecting further fields", len(fieldIDs), ditypes.MaxFieldCount)
+ return fieldIDs
+ }
+ }
+ }
+ }
+ return fieldIDs
+}
+
+func putLocationsInParams(
+ paramMetadatas []bininspect.ParameterMetadata,
+ fieldLocations map[bininspect.FieldIdentifier]uint64,
+ funcMap map[string][]ditypes.Parameter,
+ funcName string) {
+
+ params := funcMap[funcName]
+ locations := []ditypes.Location{}
+
+ // Collect locations in order
+ for _, param := range paramMetadatas {
+ for _, piece := range param.Pieces {
+ locations = append(locations, ditypes.Location{
+ InReg: piece.InReg,
+ StackOffset: piece.StackOffset,
+ Register: piece.Register,
+ })
+ }
+ }
+
+ assignLocationsInOrder(params, locations)
+ correctTypeSpecificLocations(params, fieldLocations)
+
+ funcMap[funcName] = params
+}
+
+func assignLocationsInOrder(params []ditypes.Parameter, locations []ditypes.Location) {
+ stack := []*ditypes.Parameter{}
+ locationCounter := 0
+
+ // Start by pushing addresses of all parameters to stack
+ for i := range params {
+ stack = append(stack, ¶ms[len(params)-1-i])
+ }
+
+ for {
+ if len(stack) == 0 || locationCounter == len(locations) {
+ return
+ }
+ current := stack[len(stack)-1]
+ stack = stack[:len(stack)-1]
+ if len(current.ParameterPieces) != 0 &&
+ current.Kind != uint(reflect.Array) &&
+ current.Kind != uint(reflect.Pointer) &&
+ current.Kind != uint(reflect.Slice) {
+
+ for i := range current.ParameterPieces {
+ stack = append(stack, ¤t.ParameterPieces[len(current.ParameterPieces)-1-i])
+ }
+ } else {
+ // Location fields are directly assigned instead of setting the whole
+ // location field to preserve other fields
+ locationToAssign := locations[locationCounter]
+ current.Location.InReg = locationToAssign.InReg
+ current.Location.Register = locationToAssign.Register
+ current.Location.StackOffset = locationToAssign.StackOffset
+
+ if reflect.Kind(current.Kind) == reflect.String {
+ // Strings actually have two locations (pointer, length)
+ // but are shortened to a single one for parsing. The missing
+ // location is taken into account in bpf code, but we need
+ // to make sure it's not assigned to something else here.
+ locationCounter++
+ } else if reflect.Kind(current.Kind) == reflect.Slice {
+ // slices actually have three locations (array, length, capacity)
+ // but are shortened to a single one for parsing. The missing
+ // locations are taken into account in bpf code, but we need
+ // to make sure it's not assigned to something else here.
+ locationCounter += 2
+ }
+ locationCounter++
+ }
+ }
+}
+
+func correctTypeSpecificLocations(params []ditypes.Parameter, fieldLocations map[bininspect.FieldIdentifier]uint64) {
+ for i := range params {
+ if params[i].Kind == uint(reflect.Array) {
+ correctArrayLocations(¶ms[i], fieldLocations)
+ } else if params[i].Kind == uint(reflect.Pointer) {
+ correctPointerLocations(¶ms[i], fieldLocations)
+ } else if params[i].Kind == uint(reflect.Struct) {
+ correctStructLocations(¶ms[i], fieldLocations)
+ }
+ }
+}
+
+// correctStructLocations sets pointer and stack offsets for struct fields from
+// bininspect results
+func correctStructLocations(structParam *ditypes.Parameter, fieldLocations map[bininspect.FieldIdentifier]uint64) {
+ for i := range structParam.ParameterPieces {
+ fieldID := bininspect.FieldIdentifier{
+ StructName: structParam.Type,
+ FieldName: structParam.ParameterPieces[i].Name,
+ }
+ offset, ok := fieldLocations[fieldID]
+ if !ok {
+ log.Infof("no field location available for %s.%s\n", fieldID.StructName, fieldID.FieldName)
+ continue
+ }
+
+ fieldLocationsHaveAlreadyBeenDirectlyAssigned := isLocationSet(structParam.ParameterPieces[i].Location)
+ if fieldLocationsHaveAlreadyBeenDirectlyAssigned {
+ // The location would be set if it was directly assigned to (i.e. has its own register instead of needing
+ // to dereference a pointer or get the element from a slice)
+ structParam.ParameterPieces[i].Location = structParam.Location
+ structParam.ParameterPieces[i].Location.StackOffset = int64(offset) + structParam.Location.StackOffset
+ }
+
+ structParam.ParameterPieces[i].Location.PointerOffset = offset
+ structParam.ParameterPieces[i].Location.StackOffset = structParam.ParameterPieces[0].Location.StackOffset + int64(offset)
+
+ correctTypeSpecificLocations([]ditypes.Parameter{structParam.ParameterPieces[i]}, fieldLocations)
+ }
+}
+
+func isLocationSet(l ditypes.Location) bool {
+ return reflect.DeepEqual(l, ditypes.Location{})
+}
+
+// correctPointerLocations takes a parameters location and copies it to the underlying
+// type that's pointed to. It sets `NeedsDereference` to true
+// then calls the top level function on each element of the array to ensure all
+// element's have corrected locations
+func correctPointerLocations(pointerParam *ditypes.Parameter, fieldLocations map[bininspect.FieldIdentifier]uint64) {
+ // Pointers should have exactly one entry in ParameterPieces that correspond to the underlying type
+ if len(pointerParam.ParameterPieces) != 1 {
+ return
+ }
+ pointerParam.ParameterPieces[0].Location = pointerParam.Location
+ pointerParam.ParameterPieces[0].Location.NeedsDereference = true
+ correctTypeSpecificLocations([]ditypes.Parameter{pointerParam.ParameterPieces[0]}, fieldLocations)
+}
+
+// correctArrayLocations takes a parameter's location, and distribute it to each element
+// by using `stack offset + (size*index)` then calls the top level function on each element
+// of the array to ensure all element's have corrected locations
+func correctArrayLocations(arrayParam *ditypes.Parameter, fieldLocations map[bininspect.FieldIdentifier]uint64) {
+ initialOffset := arrayParam.Location.StackOffset
+ for i := range arrayParam.ParameterPieces {
+ arrayParam.ParameterPieces[i].Location.StackOffset = initialOffset + (arrayParam.ParameterPieces[i].TotalSize * int64(i))
+ correctTypeSpecificLocations([]ditypes.Parameter{arrayParam.ParameterPieces[i]}, fieldLocations)
+ }
+}
diff --git a/pkg/dynamicinstrumentation/diconfig/config_manager.go b/pkg/dynamicinstrumentation/diconfig/config_manager.go
new file mode 100644
index 0000000000000..a49287e0a7566
--- /dev/null
+++ b/pkg/dynamicinstrumentation/diconfig/config_manager.go
@@ -0,0 +1,295 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux_bpf
+
+// Package diconfig provides utlity that allows dynamic instrumentation to receive and
+// manage probe configurations from users
+package diconfig
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/codegen"
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/diagnostics"
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes"
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ebpf"
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/eventparser"
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/proctracker"
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ratelimiter"
+ "github.com/cilium/ebpf/ringbuf"
+ "github.com/google/uuid"
+)
+
+type rcConfig struct {
+ ID string
+ Version int
+ ProbeType string `json:"type"`
+ Language string
+ Where struct {
+ TypeName string `json:"typeName"`
+ MethodName string `json:"methodName"`
+ SourceFile string
+ Lines []string
+ }
+ Tags []string
+ Template string
+ CaptureSnapshot bool
+ EvaluatedAt string
+ Capture struct {
+ MaxReferenceDepth int `json:"maxReferenceDepth"`
+ MaxFieldCount int `json:"maxFieldCount"`
+ }
+}
+
+type configUpdateCallback func(*ditypes.ProcessInfo, *ditypes.Probe)
+
+// ConfigManager is a facility to track probe configurations for
+// instrumenting tracked processes
+type ConfigManager interface {
+ GetProcInfos() ditypes.DIProcs
+ Stop()
+}
+
+// RCConfigManager is the configuration manager which utilizes remote-config
+type RCConfigManager struct {
+ procTracker *proctracker.ProcessTracker
+
+ diProcs ditypes.DIProcs
+ callback configUpdateCallback
+}
+
+// NewRCConfigManager creates a new configuration manager which utilizes remote-config
+func NewRCConfigManager() (*RCConfigManager, error) {
+ log.Info("Creating new RC config manager")
+ cm := &RCConfigManager{
+ callback: applyConfigUpdate,
+ }
+
+ cm.procTracker = proctracker.NewProcessTracker(cm.updateProcesses)
+ err := cm.procTracker.Start()
+ if err != nil {
+ return nil, fmt.Errorf("could not start process tracker: %w", err)
+ }
+ cm.diProcs = ditypes.NewDIProcs()
+ return cm, nil
+}
+
+// GetProcInfos returns the state of the RCConfigManager
+func (cm *RCConfigManager) GetProcInfos() ditypes.DIProcs {
+ return cm.diProcs
+}
+
+// Stop closes the config and proc trackers used by the RCConfigManager
+func (cm *RCConfigManager) Stop() {
+ cm.procTracker.Stop()
+ for _, procInfo := range cm.GetProcInfos() {
+ procInfo.CloseAllUprobeLinks()
+ }
+}
+
+// updateProcesses is the callback interface that ConfigManager uses to consume the map of `ProcessInfo`s
+// It is called whenever there's an update to the state of known processes of services on the machine.
+//
+// It compares the previously known state of services on the machine and creates a hook on the remote-config
+// callback for configurations on new ones, and deletes the hook on old ones.
+func (cm *RCConfigManager) updateProcesses(runningProcs ditypes.DIProcs) {
+ // Remove processes that are no longer running from state and close their uprobe links
+ for pid, procInfo := range cm.diProcs {
+ _, ok := runningProcs[pid]
+ if !ok {
+ procInfo.CloseAllUprobeLinks()
+ delete(cm.diProcs, pid)
+ }
+ }
+
+ for pid, runningProcInfo := range runningProcs {
+ _, ok := cm.diProcs[pid]
+ if !ok {
+ cm.diProcs[pid] = runningProcInfo
+ err := cm.installConfigProbe(runningProcInfo)
+ if err != nil {
+ log.Infof("could not install config probe for service %s (pid %d): %s", runningProcInfo.ServiceName, runningProcInfo.PID, err)
+ }
+ }
+ }
+}
+
+func (cm *RCConfigManager) installConfigProbe(procInfo *ditypes.ProcessInfo) error {
+ var err error
+ configProbe := newConfigProbe()
+
+ svcConfigProbe := *configProbe
+ svcConfigProbe.ServiceName = procInfo.ServiceName
+ procInfo.ProbesByID[configProbe.ID] = &svcConfigProbe
+
+ err = AnalyzeBinary(procInfo)
+ if err != nil {
+ return fmt.Errorf("could not analyze binary for config probe: %w", err)
+ }
+
+ err = codegen.GenerateBPFParamsCode(procInfo, configProbe)
+ if err != nil {
+ return fmt.Errorf("could not generate bpf code for config probe: %w", err)
+ }
+
+ err = ebpf.CompileBPFProgram(procInfo, configProbe)
+ if err != nil {
+ return fmt.Errorf("could not compile bpf code for config probe: %w", err)
+ }
+
+ err = ebpf.AttachBPFUprobe(procInfo, configProbe)
+ if err != nil {
+ return fmt.Errorf("could not attach bpf code for config probe: %w", err)
+ }
+
+ m, err := procInfo.SetupConfigUprobe()
+ if err != nil {
+ return fmt.Errorf("could not setup config probe for service %s: %w", procInfo.ServiceName, err)
+ }
+
+ r, err := ringbuf.NewReader(m)
+ if err != nil {
+ return fmt.Errorf("could not read from config probe %s", procInfo.ServiceName)
+ }
+
+ go cm.readConfigs(r, procInfo)
+
+ return nil
+}
+
+func (cm *RCConfigManager) readConfigs(r *ringbuf.Reader, procInfo *ditypes.ProcessInfo) {
+ log.Tracef("Waiting for configs for service: %s", procInfo.ServiceName)
+ for {
+ record, err := r.Read()
+ if err != nil {
+ log.Errorf("error reading raw configuration from bpf: %v", err)
+ continue
+ }
+
+ configEventParams, err := eventparser.ParseParams(record.RawSample)
+ if err != nil {
+ log.Errorf("error parsing configuration for PID %d: %v", procInfo.PID, err)
+ continue
+ }
+ if len(configEventParams) != 3 {
+ log.Errorf("error parsing configuration for PID %d: not enough arguments", procInfo.PID)
+ continue
+ }
+
+ runtimeID, err := uuid.ParseBytes([]byte(configEventParams[0].ValueStr))
+ if err != nil {
+ log.Errorf("Runtime ID \"%s\" is not a UUID: %v)", runtimeID, err)
+ continue
+ }
+
+ configPath, err := ditypes.ParseConfigPath(string(configEventParams[1].ValueStr))
+ if err != nil {
+ log.Errorf("couldn't parse config path: %v", err)
+ continue
+ }
+
+ // An empty config means that this probe has been removed for this process
+ if configEventParams[2].ValueStr == "" {
+ cm.diProcs.DeleteProbe(procInfo.PID, configPath.ProbeUUID.String())
+ continue
+ }
+
+ conf := rcConfig{}
+ err = json.Unmarshal([]byte(configEventParams[2].ValueStr), &conf)
+ if err != nil {
+ diagnostics.Diagnostics.SetError(procInfo.ServiceName, procInfo.RuntimeID, configPath.ProbeUUID.String(), "ATTACH_ERROR", err.Error())
+ log.Errorf("could not unmarshal configuration, cannot apply: %v (Probe-ID: %s)\n", err, configPath.ProbeUUID)
+ continue
+ }
+
+ if conf.Capture.MaxReferenceDepth == 0 {
+ conf.Capture.MaxReferenceDepth = int(ditypes.MaxReferenceDepth)
+ }
+ if conf.Capture.MaxFieldCount == 0 {
+ conf.Capture.MaxFieldCount = int(ditypes.MaxFieldCount)
+ }
+ opts := &ditypes.InstrumentationOptions{
+ CaptureParameters: ditypes.CaptureParameters,
+ ArgumentsMaxSize: ditypes.ArgumentsMaxSize,
+ StringMaxSize: ditypes.StringMaxSize,
+ MaxReferenceDepth: conf.Capture.MaxReferenceDepth,
+ MaxFieldCount: conf.Capture.MaxFieldCount,
+ }
+
+ probe, probeExists := procInfo.ProbesByID[configPath.ProbeUUID.String()]
+ if !probeExists {
+ cm.diProcs.SetProbe(procInfo.PID, procInfo.ServiceName, conf.Where.TypeName, conf.Where.MethodName, configPath.ProbeUUID, runtimeID, opts)
+ diagnostics.Diagnostics.SetStatus(procInfo.ServiceName, runtimeID.String(), configPath.ProbeUUID.String(), ditypes.StatusReceived)
+ probe = procInfo.ProbesByID[configPath.ProbeUUID.String()]
+ }
+
+ // Check hash to see if the configuration changed
+ if configPath.Hash != probe.InstrumentationInfo.ConfigurationHash {
+ probe.InstrumentationInfo.ConfigurationHash = configPath.Hash
+ applyConfigUpdate(procInfo, probe)
+ }
+ }
+}
+
+func applyConfigUpdate(procInfo *ditypes.ProcessInfo, probe *ditypes.Probe) {
+ log.Tracef("Applying config update: %v", probe)
+ err := AnalyzeBinary(procInfo)
+ if err != nil {
+ log.Errorf("couldn't inspect binary: %v\n", err)
+ return
+ }
+
+generateCompileAttach:
+ err = codegen.GenerateBPFParamsCode(procInfo, probe)
+ if err != nil {
+ log.Info("Couldn't generate BPF programs", err)
+ return
+ }
+
+ err = ebpf.CompileBPFProgram(procInfo, probe)
+ if err != nil {
+ log.Info("Couldn't compile BPF object", err)
+ if !probe.InstrumentationInfo.AttemptedRebuild {
+ log.Info("Removing parameters and attempting to rebuild BPF object", err)
+ probe.InstrumentationInfo.AttemptedRebuild = true
+ probe.InstrumentationInfo.InstrumentationOptions.CaptureParameters = false
+ goto generateCompileAttach
+ }
+ return
+ }
+
+ err = ebpf.AttachBPFUprobe(procInfo, probe)
+ if err != nil {
+ log.Info("Couldn't load and attach bpf programs", err)
+ if !probe.InstrumentationInfo.AttemptedRebuild {
+ log.Info("Removing parameters and attempting to rebuild BPF object", err)
+ probe.InstrumentationInfo.AttemptedRebuild = true
+ probe.InstrumentationInfo.InstrumentationOptions.CaptureParameters = false
+ goto generateCompileAttach
+ }
+ return
+ }
+}
+
+func newConfigProbe() *ditypes.Probe {
+ return &ditypes.Probe{
+ ID: ditypes.ConfigBPFProbeID,
+ FuncName: "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer.passProbeConfiguration",
+ InstrumentationInfo: &ditypes.InstrumentationInfo{
+ InstrumentationOptions: &ditypes.InstrumentationOptions{
+ ArgumentsMaxSize: 100000,
+ StringMaxSize: 30000,
+ MaxFieldCount: int(ditypes.MaxFieldCount),
+ MaxReferenceDepth: 8,
+ CaptureParameters: true,
+ },
+ },
+ RateLimiter: ratelimiter.NewSingleEventRateLimiter(0),
+ }
+}
diff --git a/pkg/dynamicinstrumentation/diconfig/dwarf.go b/pkg/dynamicinstrumentation/diconfig/dwarf.go
new file mode 100644
index 0000000000000..03bc95335d409
--- /dev/null
+++ b/pkg/dynamicinstrumentation/diconfig/dwarf.go
@@ -0,0 +1,642 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux_bpf
+
+package diconfig
+
+import (
+ "cmp"
+ "debug/dwarf"
+ "debug/elf"
+ "fmt"
+ "io"
+ "reflect"
+ "slices"
+
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes"
+ "github.com/go-delve/delve/pkg/dwarf/godwarf"
+)
+
+func getTypeMap(dwarfData *dwarf.Data, targetFunctions map[string]bool) (*ditypes.TypeMap, error) {
+ return loadFunctionDefinitions(dwarfData, targetFunctions)
+}
+
+var dwarfMap = make(map[string]*dwarf.Data)
+
+type seenTypeCounter struct {
+ parameter *ditypes.Parameter
+ count uint8
+}
+
+var seenTypes = make(map[string]*seenTypeCounter)
+
+func loadFunctionDefinitions(dwarfData *dwarf.Data, targetFunctions map[string]bool) (*ditypes.TypeMap, error) {
+ entryReader := dwarfData.Reader()
+ typeReader := dwarfData.Reader()
+ readingAFunction := false
+ var funcName string
+
+ var result = ditypes.TypeMap{
+ Functions: make(map[string][]ditypes.Parameter),
+ InlinedFunctions: make(map[uint64][]*dwarf.Entry),
+ }
+
+ var (
+ name string
+ typeFields *ditypes.Parameter
+ )
+
+entryLoop:
+ for {
+ entry, err := entryReader.Next()
+ if err == io.EOF || entry == nil {
+ break
+ }
+
+ if entryIsEmpty(entry) {
+ readingAFunction = false
+ continue entryLoop
+ }
+
+ if entry.Tag == dwarf.TagCompileUnit {
+
+ name, ok := entry.Val(dwarf.AttrName).(string)
+ if !ok {
+ continue entryLoop
+ }
+ ranges, err := dwarfData.Ranges(entry)
+ if err != nil {
+ log.Infof("couldnt retrieve ranges for compile unit %s: %s", name, err)
+ continue entryLoop
+ }
+
+ for i := range ranges {
+ result.DeclaredFiles = append(result.DeclaredFiles, &ditypes.LowPCEntry{
+ LowPC: ranges[i][0],
+ Entry: entry,
+ })
+ }
+ }
+
+ if entry.Tag == dwarf.TagInlinedSubroutine {
+ // This is a inlined function
+ for i := range entry.Field {
+ // Find it's high program counter (where it exits in the parent routine)
+ if entry.Field[i].Attr == dwarf.AttrHighpc {
+
+ // The field for HighPC can be a constant or address, which are int64 and uint64 respectively
+ if entry.Field[i].Class == dwarf.ClassConstant {
+ result.InlinedFunctions[uint64(entry.Field[i].Val.(int64))] =
+ append([]*dwarf.Entry{entry}, result.InlinedFunctions[uint64(entry.Field[i].Val.(int64))]...)
+ } else if entry.Field[i].Class == dwarf.ClassAddress {
+ result.InlinedFunctions[entry.Field[i].Val.(uint64)] =
+ append([]*dwarf.Entry{entry}, result.InlinedFunctions[entry.Field[i].Val.(uint64)]...)
+ }
+ }
+ }
+ continue entryLoop
+ }
+
+ if entry.Tag == dwarf.TagSubprogram {
+
+ for _, field := range entry.Field {
+ if field.Attr == dwarf.AttrLowpc {
+ lowpc := field.Val.(uint64)
+ result.FunctionsByPC = append(result.FunctionsByPC, &ditypes.LowPCEntry{LowPC: lowpc, Entry: entry})
+ }
+ }
+
+ for _, field := range entry.Field {
+ if field.Attr == dwarf.AttrName {
+ funcName = field.Val.(string)
+ if !targetFunctions[funcName] {
+ continue entryLoop
+ }
+ result.Functions[funcName] = make([]ditypes.Parameter, 0)
+ readingAFunction = true
+ continue entryLoop
+ }
+ }
+ }
+
+ if !readingAFunction {
+ continue
+ }
+
+ if entry.Tag != dwarf.TagFormalParameter {
+ readingAFunction = false
+ continue entryLoop
+ }
+
+ // This branch should only be reached if we're currently reading ditypes.Parameters of a function
+ // Meaning: This is a formal ditypes.Parameter entry, and readingAFunction = true
+
+ // Go through fields of the entry collecting type, name, size information
+ for i := range entry.Field {
+
+ // ditypes.Parameter name
+ if entry.Field[i].Attr == dwarf.AttrName {
+ name = entry.Field[i].Val.(string)
+ }
+
+ // Collect information about the type of this ditypes.Parameter
+ if entry.Field[i].Attr == dwarf.AttrType {
+
+ typeReader.Seek(entry.Field[i].Val.(dwarf.Offset))
+ typeEntry, err := typeReader.Next()
+ if err != nil {
+ return nil, err
+ }
+
+ typeFields, err = expandTypeData(typeEntry.Offset, dwarfData)
+ if err != nil {
+ return nil, fmt.Errorf("error while parsing debug information: %w", err)
+ }
+
+ }
+ }
+
+ typeFields.Name = name
+
+ // We've collected information about this ditypes.Parameter, append it to the slice of ditypes.Parameters for this function
+ result.Functions[funcName] = append(result.Functions[funcName], *typeFields)
+ seenTypes = make(map[string]*seenTypeCounter) // reset seen types map for next parameter
+ }
+
+ // Sort program counter slice for lookup when resolving pcs->functions
+ slices.SortFunc(result.FunctionsByPC, func(a, b *ditypes.LowPCEntry) int {
+ return cmp.Compare(b.LowPC, a.LowPC)
+ })
+ slices.SortFunc(result.DeclaredFiles, func(a, b *ditypes.LowPCEntry) int {
+ return cmp.Compare(b.LowPC, a.LowPC)
+ })
+
+ return &result, nil
+}
+
+func loadDWARF(binaryPath string) (*dwarf.Data, error) {
+ if dwarfData, ok := dwarfMap[binaryPath]; ok {
+ return dwarfData, nil
+ }
+ elfFile, err := elf.Open(binaryPath)
+ if err != nil {
+ return nil, fmt.Errorf("couldn't open elf binary: %w", err)
+ }
+
+ dwarfData, err := elfFile.DWARF()
+ if err != nil {
+ return nil, fmt.Errorf("couldn't retrieve debug info from elf: %w", err)
+ }
+ dwarfMap[binaryPath] = dwarfData
+ return dwarfData, nil
+}
+
+func expandTypeData(offset dwarf.Offset, dwarfData *dwarf.Data) (*ditypes.Parameter, error) {
+ typeReader := dwarfData.Reader()
+
+ typeReader.Seek(offset)
+ typeEntry, err := typeReader.Next()
+ if err != nil {
+ return nil, fmt.Errorf("could not get type entry: %w", err)
+ }
+
+ if !entryTypeIsSupported(typeEntry) {
+ return resolveUnsupportedEntry(typeEntry), nil
+ }
+
+ if typeEntry.Tag == dwarf.TagTypedef {
+ typeEntry, err = resolveTypedefToRealType(typeEntry, typeReader)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ typeName, typeSize, typeKind := getTypeEntryBasicInfo(typeEntry)
+ typeHeader := ditypes.Parameter{
+ Type: typeName,
+ TotalSize: typeSize,
+ Kind: typeKind,
+ }
+
+ v, typeParsedAlready := seenTypes[typeHeader.Type]
+ if typeParsedAlready {
+ v.count++
+ if v.count >= ditypes.MaxReferenceDepth {
+ return v.parameter, nil
+ }
+ } else {
+ seenTypes[typeHeader.Type] = &seenTypeCounter{
+ parameter: &typeHeader,
+ count: 1,
+ }
+ }
+
+ if typeKind == uint(reflect.Slice) {
+ sliceElements, err := getSliceField(typeEntry.Offset, dwarfData)
+ if err != nil {
+ return nil, fmt.Errorf("could not collect fields of slice type: %w", err)
+ }
+ typeHeader = sliceElements[0]
+ } else if typeEntry.Tag == dwarf.TagStructType && typeName != "string" {
+ structFields, err := getStructFields(typeEntry.Offset, dwarfData)
+ if err != nil {
+ return nil, fmt.Errorf("could not collect fields of struct type of ditypes.Parameter: %w", err)
+ }
+ typeHeader.ParameterPieces = structFields
+ } else if typeEntry.Tag == dwarf.TagArrayType {
+ arrayElements, err := getIndividualArrayElements(typeEntry.Offset, dwarfData)
+ if err != nil {
+ return nil, fmt.Errorf("could not get length of array: %w", err)
+ }
+ typeHeader.ParameterPieces = arrayElements
+ } else if typeEntry.Tag == dwarf.TagPointerType {
+ pointerElements, err := getPointerLayers(typeEntry.Offset, dwarfData)
+ if err != nil {
+ return nil, fmt.Errorf("could not find pointer type: %w", err)
+ }
+ typeHeader.ParameterPieces = pointerElements
+ }
+
+ return &typeHeader, nil
+}
+
+// getSliceField returns the representation of a slice as a []ditypes.Parameter. The returned
+// slice will have only one element.
+//
+// Slices are represented internally in go as a struct with 3 fields. The pointer to the
+// the underlying array, the array length, and the array capacity.
+func getSliceField(offset dwarf.Offset, dwarfData *dwarf.Data) ([]ditypes.Parameter, error) {
+ typeReader := dwarfData.Reader()
+
+ typeReader.Seek(offset)
+ typeEntry, err := typeReader.Next()
+ if err != nil {
+ return nil, fmt.Errorf("could not get slice type entry: %w", err)
+ }
+
+ elementTypeName, elementTypeSize, elementTypeKind := getTypeEntryBasicInfo(typeEntry)
+ sliceParameter := ditypes.Parameter{
+ Type: elementTypeName,
+ TotalSize: elementTypeSize,
+ Kind: elementTypeKind,
+ }
+
+ arrayEntry, err := typeReader.Next()
+ if err != nil {
+ return nil, fmt.Errorf("could not get slice type entry: %w", err)
+ }
+
+ for i := range arrayEntry.Field {
+ if arrayEntry.Field[i].Attr == dwarf.AttrType {
+ typeReader.Seek(arrayEntry.Field[i].Val.(dwarf.Offset))
+ typeEntry, err := typeReader.Next()
+ if err != nil {
+ return nil, err
+ }
+ underlyingType, err := expandTypeData(typeEntry.Offset, dwarfData)
+ if err != nil {
+ return nil, err
+ }
+ sliceParameter.ParameterPieces = append(sliceParameter.ParameterPieces, underlyingType.ParameterPieces[0])
+ }
+ }
+ return []ditypes.Parameter{sliceParameter}, nil
+}
+
+func getIndividualArrayElements(offset dwarf.Offset, dwarfData *dwarf.Data) ([]ditypes.Parameter, error) {
+ savedArrayEntryOffset := offset
+ typeReader := dwarfData.Reader()
+
+ // Go to the entry of the array type to get the underlying type information
+ typeReader.Seek(offset)
+ typeEntry, err := typeReader.Next()
+ if err != nil {
+ return nil, fmt.Errorf("could not get array type entry: %w", err)
+ }
+
+ var (
+ elementFields *ditypes.Parameter
+ elementTypeName string
+ elementTypeSize int64
+ elementTypeKind uint
+ )
+ underlyingType, err := followType(typeEntry, dwarfData.Reader())
+ if err != nil {
+ return nil, fmt.Errorf("could not get underlying array type's type entry: %w", err)
+ }
+ if !entryTypeIsSupported(underlyingType) {
+ elementFields = resolveUnsupportedEntry(underlyingType)
+ elementTypeName, elementTypeSize, elementTypeKind = getTypeEntryBasicInfo(underlyingType)
+ } else {
+ arrayElementTypeEntry, err := resolveTypedefToRealType(underlyingType, typeReader)
+ if err != nil {
+ return nil, err
+ }
+
+ elementFields, err = expandTypeData(arrayElementTypeEntry.Offset, dwarfData)
+ if err != nil {
+ return nil, err
+ }
+
+ elementTypeName, elementTypeSize, elementTypeKind = getTypeEntryBasicInfo(arrayElementTypeEntry)
+ }
+
+ // Return back to entry of array so we can go to the subrange entry after the type, which gives
+ // us the length of the array
+ typeReader.Seek(savedArrayEntryOffset)
+ _, err = typeReader.Next()
+ if err != nil {
+ return nil, fmt.Errorf("could not find array entry: %w", err)
+ }
+ subrangeEntry, err := typeReader.Next()
+ if err != nil {
+ return nil, fmt.Errorf("could not get length of array: %w", err)
+ }
+
+ var arrayLength int64
+ for h := range subrangeEntry.Field {
+ if subrangeEntry.Field[h].Attr == dwarf.AttrCount {
+ arrayLength = subrangeEntry.Field[h].Val.(int64)
+ }
+ }
+
+ arrayElements := []ditypes.Parameter{}
+ for h := 0; h < int(arrayLength); h++ {
+ newParam := ditypes.Parameter{}
+ copyTree(&newParam.ParameterPieces, &elementFields.ParameterPieces)
+ newParam.Name = fmt.Sprintf("[%d]%s[%d]", arrayLength, elementTypeName, h)
+ newParam.Type = elementTypeName
+ newParam.Kind = elementTypeKind
+ newParam.TotalSize = elementTypeSize
+ arrayElements = append(arrayElements, newParam)
+ }
+
+ return arrayElements, nil
+}
+
+func getStructFields(offset dwarf.Offset, dwarfData *dwarf.Data) ([]ditypes.Parameter, error) {
+ inOrderReader := dwarfData.Reader()
+ typeReader := dwarfData.Reader()
+
+ structFields := []ditypes.Parameter{}
+ fieldEntry := &dwarf.Entry{}
+
+ // Start at the entry of the definition of the struct
+ inOrderReader.Seek(offset)
+ _, err := inOrderReader.Next()
+ if err != nil {
+ return structFields, err
+ }
+
+ // From the struct entry in DWARF, traverse through subsequent DWARF entries
+ // which are fields of the struct
+ for {
+ fieldEntry, err = inOrderReader.Next()
+ if err != nil {
+ return []ditypes.Parameter{}, err
+ }
+
+ if entryIsEmpty(fieldEntry) || fieldEntry.Tag != dwarf.TagMember {
+ break
+ }
+
+ newStructField := ditypes.Parameter{}
+
+ for i := range fieldEntry.Field {
+
+ // Struct Field Name
+ if fieldEntry.Field[i].Attr == dwarf.AttrName {
+ newStructField.Name = fieldEntry.Field[i].Val.(string)
+ }
+
+ // Struct Field Type
+ if fieldEntry.Field[i].Attr == dwarf.AttrType {
+ typeReader.Seek(fieldEntry.Field[i].Val.(dwarf.Offset))
+ typeEntry, err := typeReader.Next()
+ if err != nil {
+ return []ditypes.Parameter{}, err
+ }
+
+ if !entryTypeIsSupported(typeEntry) {
+ unsupportedType := resolveUnsupportedEntry(typeEntry)
+ structFields = append(structFields, *unsupportedType)
+ continue
+ }
+
+ if typeEntry.Tag == dwarf.TagTypedef {
+ typeEntry, err = resolveTypedefToRealType(typeEntry, typeReader)
+ if err != nil {
+ return []ditypes.Parameter{}, err
+ }
+ }
+
+ newStructField.Type, newStructField.TotalSize, newStructField.Kind = getTypeEntryBasicInfo(typeEntry)
+ if typeEntry.Tag != dwarf.TagBaseType {
+ field, err := expandTypeData(typeEntry.Offset, dwarfData)
+ if err != nil {
+ return []ditypes.Parameter{}, err
+ }
+ field.Name = newStructField.Name
+ structFields = append(structFields, *field)
+ } else {
+ structFields = append(structFields, newStructField)
+ }
+ }
+ }
+ }
+ return structFields, nil
+}
+
+func getPointerLayers(offset dwarf.Offset, dwarfData *dwarf.Data) ([]ditypes.Parameter, error) {
+ typeReader := dwarfData.Reader()
+ typeReader.Seek(offset)
+ pointerEntry, err := typeReader.Next()
+ if err != nil {
+ return nil, err
+ }
+ var underlyingType *ditypes.Parameter
+ for i := range pointerEntry.Field {
+
+ if pointerEntry.Field[i].Attr == dwarf.AttrType {
+ typeReader.Seek(pointerEntry.Field[i].Val.(dwarf.Offset))
+ typeEntry, err := typeReader.Next()
+ if err != nil {
+ return nil, err
+ }
+
+ underlyingType, err = expandTypeData(typeEntry.Offset, dwarfData)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ if underlyingType == nil {
+ return []ditypes.Parameter{}, nil
+ }
+ return []ditypes.Parameter{*underlyingType}, nil
+}
+
+// Can use `Children` field, but there's also always a NULL/empty entry at the end of entry trees.
+func entryIsEmpty(e *dwarf.Entry) bool {
+ return !e.Children &&
+ len(e.Field) == 0 &&
+ e.Offset == 0 &&
+ e.Tag == dwarf.Tag(0)
+}
+
+func getTypeEntryBasicInfo(typeEntry *dwarf.Entry) (typeName string, typeSize int64, typeKind uint) {
+ if typeEntry.Tag == dwarf.TagPointerType {
+ typeSize = 8 // On 64 bit, all pointers are 8 bytes
+ }
+ for i := range typeEntry.Field {
+ if typeEntry.Field[i].Attr == dwarf.AttrName {
+ typeName = typeEntry.Field[i].Val.(string)
+ }
+ if typeEntry.Field[i].Attr == dwarf.AttrByteSize {
+ typeSize = typeEntry.Field[i].Val.(int64)
+ }
+ if typeEntry.Field[i].Attr == godwarf.AttrGoKind {
+ typeKind = uint(typeEntry.Field[i].Val.(int64))
+ if typeKind == 0 {
+ // Temporary fix for bug: https://github.com/golang/go/issues/64231
+ switch typeEntry.Tag {
+ case dwarf.TagStructType:
+ typeKind = uint(reflect.Struct)
+ case dwarf.TagArrayType:
+ typeKind = uint(reflect.Array)
+ case dwarf.TagPointerType:
+ typeKind = uint(reflect.Pointer)
+ default:
+ log.Info("Unexpected AttrGoKind == 0 for", typeEntry.Tag)
+ }
+ }
+ }
+ }
+ return
+}
+
+func followType(outerType *dwarf.Entry, reader *dwarf.Reader) (*dwarf.Entry, error) {
+ for i := range outerType.Field {
+ if outerType.Field[i].Attr == dwarf.AttrType {
+ reader.Seek(outerType.Field[i].Val.(dwarf.Offset))
+ nextType, err := reader.Next()
+ if err != nil {
+ return nil, fmt.Errorf("error while retrieving underlying type: %w", err)
+ }
+ return nextType, nil
+ }
+ }
+ return outerType, nil
+}
+
+// resolveTypedefToRealType is used to get the underlying type of fields/variables/parameters when
+// go packages the type underneath a typdef DWARF entry. The typedef DWARF entry has a 'type' entry
+// which points to the actual type, which is what this function 'resolves'.
+// Typedef's are used in for structs, pointers, maps, and likely other types.
+func resolveTypedefToRealType(outerType *dwarf.Entry, reader *dwarf.Reader) (*dwarf.Entry, error) {
+
+ if outerType.Tag == dwarf.TagTypedef {
+ followedType, err := followType(outerType, reader)
+ if err != nil {
+ return nil, err
+ }
+
+ if followedType.Tag == dwarf.TagTypedef {
+ return resolveTypedefToRealType(followedType, reader)
+ }
+ return followedType, nil
+ }
+
+ return outerType, nil
+}
+
+func correctStructSizes(params []ditypes.Parameter) {
+ for i := range params {
+ correctStructSize(¶ms[i])
+ }
+}
+
+// correctStructSize sets the size of structs to the number of fields in the struct
+func correctStructSize(param *ditypes.Parameter) {
+ if len(param.ParameterPieces) == 0 {
+ return
+ }
+ if param.Kind == uint(reflect.Struct) || param.Kind == uint(reflect.Array) {
+ param.TotalSize = int64(len(param.ParameterPieces))
+ }
+ for i := range param.ParameterPieces {
+ correctStructSize(¶m.ParameterPieces[i])
+ }
+}
+
+func copyTree(dst, src *[]ditypes.Parameter) {
+ if dst == nil || src == nil || len(*src) == 0 {
+ return
+ }
+ *dst = make([]ditypes.Parameter, len(*src))
+ copy(*dst, *src)
+ for i := range *src {
+ copyTree(&((*dst)[i].ParameterPieces), &((*src)[i].ParameterPieces))
+ }
+}
+
+func kindIsSupported(k reflect.Kind) bool {
+ if k == reflect.Map ||
+ k == reflect.UnsafePointer ||
+ k == reflect.Chan {
+ return false
+ }
+ return true
+}
+
+func typeIsSupported(t string) bool {
+ return t != "unsafe.Pointer"
+}
+
+func entryTypeIsSupported(e *dwarf.Entry) bool {
+ for f := range e.Field {
+
+ if e.Field[f].Attr == godwarf.AttrGoKind {
+ kindOfTypeEntry := reflect.Kind(e.Field[f].Val.(int64))
+ if !kindIsSupported(kindOfTypeEntry) {
+ return false
+ }
+ }
+
+ if e.Field[f].Attr == dwarf.AttrName {
+ if !typeIsSupported(e.Field[f].Val.(string)) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+func resolveUnsupportedEntry(e *dwarf.Entry) *ditypes.Parameter {
+ var (
+ kind uint
+ name string
+ )
+ for f := range e.Field {
+ if e.Field[f].Attr == godwarf.AttrGoKind {
+ kind = uint(e.Field[f].Val.(int64))
+ }
+ if e.Field[f].Attr == dwarf.AttrName {
+ name = e.Field[f].Val.(string)
+ }
+ }
+ if name == "unsafe.Pointer" {
+ // The DWARF entry for unsafe.Pointer doesn't have a `kind` field
+ kind = uint(reflect.UnsafePointer)
+ }
+ return &ditypes.Parameter{
+ Type: fmt.Sprintf("unsupported-%s", reflect.Kind(kind).String()),
+ Kind: kind,
+ NotCaptureReason: ditypes.Unsupported,
+ }
+}
diff --git a/pkg/dynamicinstrumentation/diconfig/file_config_manager.go b/pkg/dynamicinstrumentation/diconfig/file_config_manager.go
new file mode 100644
index 0000000000000..3f495ee97c4e4
--- /dev/null
+++ b/pkg/dynamicinstrumentation/diconfig/file_config_manager.go
@@ -0,0 +1,230 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux_bpf
+
+package diconfig
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "sync"
+
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes"
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/proctracker"
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/util"
+)
+
+// FileWatchingConfigManager is used to track updates to a specified file
+// which contains probe configurations
+type FileWatchingConfigManager struct {
+ sync.Mutex
+ configTracker *configTracker
+ procTracker *proctracker.ProcessTracker
+
+ callback configUpdateCallback
+ configs configsByService
+ state ditypes.DIProcs
+}
+
+type fileConfigCallback func(configsByService)
+
+type configsByService = map[ditypes.ServiceName]map[ditypes.ProbeID]rcConfig
+
+// NewFileConfigManager creates a FileWatchingConfigManager set up to track
+// the specified file.
+func NewFileConfigManager(configFile string) (*FileWatchingConfigManager, error) {
+ cm := &FileWatchingConfigManager{
+ callback: applyConfigUpdate,
+ }
+
+ cm.procTracker = proctracker.NewProcessTracker(cm.updateProcessInfo)
+ err := cm.procTracker.Start()
+ if err != nil {
+ return nil, err
+ }
+
+ cm.configTracker = newFileWatchingConfigTracker(configFile, cm.updateServiceConfigs)
+ err = cm.configTracker.Start()
+ if err != nil {
+ return nil, err
+ }
+ return cm, nil
+}
+
+// GetProcInfos returns the state of the FileWatchingConfigManager
+func (cm *FileWatchingConfigManager) GetProcInfos() ditypes.DIProcs {
+ return cm.state
+}
+
+// Stop closes the config and proc trackers used by the FileWatchingConfigManager
+func (cm *FileWatchingConfigManager) Stop() {
+ cm.configTracker.Stop()
+ cm.procTracker.Stop()
+}
+
+func newFileWatchingConfigTracker(configFile string, onConfigUpdate fileConfigCallback) *configTracker {
+ ct := configTracker{
+ ConfigPath: configFile,
+ configCallback: onConfigUpdate,
+ stopChannel: make(chan bool),
+ }
+
+ return &ct
+}
+
+// correlate this new configuration with a running service,
+// and operate on the new global state of services/configs
+// via cm.callback
+func (cm *FileWatchingConfigManager) updateServiceConfigs(configs configsByService) {
+ log.Info("Updating config from file:", configs)
+ cm.configs = configs
+ err := cm.update()
+ if err != nil {
+ log.Info(err)
+ }
+}
+
+func (cm *FileWatchingConfigManager) updateProcessInfo(procs ditypes.DIProcs) {
+ cm.Lock()
+ defer cm.Unlock()
+ log.Info("Updating procs", procs)
+ cm.configTracker.UpdateProcesses(procs)
+ err := cm.update()
+ if err != nil {
+ log.Info(err)
+ }
+}
+
+type configTracker struct {
+ Processes map[ditypes.PID]*ditypes.ProcessInfo
+ ConfigPath string
+ configCallback fileConfigCallback
+ stopChannel chan bool
+}
+
+func (ct *configTracker) Start() error {
+ fw := util.NewFileWatcher(ct.ConfigPath)
+ updateChan, err := fw.Watch()
+ if err != nil {
+ return fmt.Errorf("failed to watch config file %s: %s", ct.ConfigPath, err)
+ }
+
+ go func(updateChan <-chan []byte) {
+ configUpdateLoop:
+ for {
+ select {
+ case rawConfigBytes := <-updateChan:
+ conf := map[string]map[string]rcConfig{}
+ err = json.Unmarshal(rawConfigBytes, &conf)
+ if err != nil {
+ log.Infof("invalid config read from %s: %s", ct.ConfigPath, err)
+ continue
+ }
+ ct.configCallback(conf)
+ case <-ct.stopChannel:
+ break configUpdateLoop
+ }
+ }
+ }(updateChan)
+ return nil
+}
+
+func (ct *configTracker) Stop() {
+ ct.stopChannel <- true
+}
+
+// UpdateProcesses is the callback interface that ConfigTracker uses to consume the map of ProcessInfo's
+// such that it's used whenever there's an update to the state of known service processes on the machine.
+// It simply overwrites the previous state of known service processes with the new one
+func (ct *configTracker) UpdateProcesses(procs ditypes.DIProcs) {
+ current := procs
+ old := ct.Processes
+ if !reflect.DeepEqual(current, old) {
+ ct.Processes = current
+ }
+}
+
+func (cm *FileWatchingConfigManager) update() error {
+ var updatedState = ditypes.NewDIProcs()
+ for serviceName, configsByID := range cm.configs {
+ for pid, proc := range cm.configTracker.Processes {
+ // If a config exists relevant to this proc
+ if proc.ServiceName == serviceName {
+ procCopy := *proc
+ updatedState[pid] = &procCopy
+ updatedState[pid].ProbesByID = convert(serviceName, configsByID)
+ }
+ }
+ }
+
+ if !reflect.DeepEqual(cm.state, updatedState) {
+ err := inspectGoBinaries(updatedState)
+ if err != nil {
+ return err
+ }
+
+ for pid, procInfo := range cm.state {
+ // cleanup dead procs
+ if _, running := updatedState[pid]; !running {
+ procInfo.CloseAllUprobeLinks()
+ delete(cm.state, pid)
+ }
+ }
+
+ for pid, procInfo := range updatedState {
+ if _, tracked := cm.state[pid]; !tracked {
+ for _, probe := range procInfo.GetProbes() {
+ // install all probes from new process
+ cm.callback(procInfo, probe)
+ }
+ } else {
+ for _, existingProbe := range cm.state[pid].GetProbes() {
+ updatedProbe := procInfo.GetProbe(existingProbe.ID)
+ if updatedProbe == nil {
+ // delete old probes
+ cm.state[pid].DeleteProbe(existingProbe.ID)
+ }
+ }
+ for _, updatedProbe := range procInfo.GetProbes() {
+ existingProbe := cm.state[pid].GetProbe(updatedProbe.ID)
+ if !reflect.DeepEqual(existingProbe, updatedProbe) {
+ // update existing probes that changed
+ cm.callback(procInfo, updatedProbe)
+ }
+ }
+ }
+ }
+ cm.state = updatedState
+ }
+ return nil
+}
+
+func convert(service string, configsByID map[ditypes.ProbeID]rcConfig) map[ditypes.ProbeID]*ditypes.Probe {
+ probesByID := map[ditypes.ProbeID]*ditypes.Probe{}
+ for id, config := range configsByID {
+ probesByID[id] = config.toProbe(service)
+ }
+ return probesByID
+}
+
+func (rc *rcConfig) toProbe(service string) *ditypes.Probe {
+ return &ditypes.Probe{
+ ID: rc.ID,
+ ServiceName: service,
+ FuncName: fmt.Sprintf("%s.%s", rc.Where.TypeName, rc.Where.MethodName),
+ InstrumentationInfo: &ditypes.InstrumentationInfo{
+ InstrumentationOptions: &ditypes.InstrumentationOptions{
+ CaptureParameters: ditypes.CaptureParameters,
+ ArgumentsMaxSize: ditypes.ArgumentsMaxSize,
+ StringMaxSize: ditypes.StringMaxSize,
+ MaxReferenceDepth: rc.Capture.MaxReferenceDepth,
+ },
+ },
+ }
+}
diff --git a/pkg/dynamicinstrumentation/ditypes/analysis.go b/pkg/dynamicinstrumentation/ditypes/analysis.go
new file mode 100644
index 0000000000000..e10ab9657c53e
--- /dev/null
+++ b/pkg/dynamicinstrumentation/ditypes/analysis.go
@@ -0,0 +1,105 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux_bpf
+
+package ditypes
+
+import (
+ "debug/dwarf"
+ "fmt"
+)
+
+// TypeMap contains all the information about functions and their parameters including
+// functions that have been inlined in the binary
+type TypeMap struct {
+ // Functions maps fully-qualified function names to a slice of its parameters
+ Functions map[string][]Parameter
+
+ // InlinedFunctions maps program counters to a slice of dwarf entries used
+ // when resolving stack traces that include inlined functions
+ InlinedFunctions map[uint64][]*dwarf.Entry
+
+ // FunctionsByPC places DWARF subprogram (function) entries in order by
+ // its low program counter which is necessary for resolving stack traces
+ FunctionsByPC []*LowPCEntry
+
+ // DeclaredFiles places DWARF compile unit entries in order by its
+ // low program counter which is necessary for resolving declared file
+ // for the sake of stack traces
+ DeclaredFiles []*LowPCEntry
+}
+
+// Parameter represents a function parameter as read from DWARF info
+type Parameter struct {
+ Name string
+ ID string
+ Type string
+ TotalSize int64
+ Kind uint
+ Location Location
+ NotCaptureReason NotCaptureReason
+ ParameterPieces []Parameter
+}
+
+func (p Parameter) String() string {
+ return fmt.Sprintf("%s %s", p.Name, p.Type)
+}
+
+// NotCaptureReason is used to convey why a parameter was not captured
+type NotCaptureReason uint8
+
+const (
+ Unsupported NotCaptureReason = iota + 1 // Unsupported means the data type of the parameter is unsupported
+ FieldLimitReached // FieldLimitReached means the parameter wasn't captured because the data type has too many fields
+ CaptureDepthReached // CaptureDepthReached means the parameter wasn't captures because the data type has too many levels
+)
+
+// SpecialKind is used for clarity in generated events that certain fields weren't read
+type SpecialKind uint8
+
+const (
+ KindUnsupported = 255 - iota // KindUnsupported is for unsupported types
+ KindCutFieldLimit // KindCutFieldLimit is for fields that were cut because of field limit
+ KindCaptureDepthReached // KindCaptureDepthReached is for fields that were cut because of depth limit
+)
+
+func (s SpecialKind) String() string {
+ switch s {
+ case KindUnsupported:
+ return "Unsupported"
+ case KindCutFieldLimit:
+ return "CutFieldLimit"
+ default:
+ return fmt.Sprintf("%d", s)
+ }
+}
+
+// Location represents where a particular datatype is found on probe entry
+type Location struct {
+ InReg bool
+ StackOffset int64
+ Register int
+ NeedsDereference bool
+ PointerOffset uint64
+}
+
+func (l Location) String() string {
+ return fmt.Sprintf("Location{InReg: %t, StackOffset: %d, Register: %d}", l.InReg, l.StackOffset, l.Register)
+}
+
+// LowPCEntry is a helper type used to sort DWARF entries by their low program counter
+type LowPCEntry struct {
+ LowPC uint64
+ Entry *dwarf.Entry
+}
+
+// BPFProgram represents a bpf program that's created for a single probe
+type BPFProgram struct {
+ ProgramText string
+
+ // Used for bpf code generation
+ Probe *Probe
+}
diff --git a/pkg/dynamicinstrumentation/ditypes/config.go b/pkg/dynamicinstrumentation/ditypes/config.go
new file mode 100644
index 0000000000000..06c0f826b33b7
--- /dev/null
+++ b/pkg/dynamicinstrumentation/ditypes/config.go
@@ -0,0 +1,337 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux_bpf
+
+// Package ditypes contains various datatypes and otherwise shared components
+// used by all the packages in dynamic instrumentation
+package ditypes
+
+import (
+ "debug/dwarf"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ratelimiter"
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/link"
+ "github.com/google/uuid"
+)
+
+const ConfigBPFProbeID = "config" // ConfigBPFProbeID is the ID used for the config bpf program
+
+var (
+ CaptureParameters = true // CaptureParameters is the default value for if probes should capture parameter values
+ ArgumentsMaxSize = 10000 // ArgumentsMaxSize is the default size in bytes of the output buffer used for param values
+ StringMaxSize = 512 // StringMaxSize is the default size in bytes of a single string
+ MaxReferenceDepth uint8 = 4 // MaxReferenceDepth is the default depth that DI will traverse datatypes for capturing values
+ MaxFieldCount = 20 // MaxFieldCount is the default limit for how many fields DI will capture in a single data type
+ SliceMaxSize = 1800 // SliceMaxSize is the default limit in bytes of a slice
+ SliceMaxLength = 100 // SliceMaxLength is the default limit in number of elements of a slice
+)
+
+// ProbeID is the unique identifier for probes
+type ProbeID = string
+
+// ServiceName is the unique identifier for a service
+type ServiceName = string
+
+// PID stands for process ID
+type PID = uint32
+
+// DIProcs is the map that dynamic instrumentation uses for tracking processes and their relevant instrumentation info
+type DIProcs map[PID]*ProcessInfo
+
+// NewDIProcs creates a new DIProcs map
+func NewDIProcs() DIProcs {
+ return DIProcs{}
+}
+
+// GetProbes returns the relevant probes information for a specific process
+func (procs DIProcs) GetProbes(pid PID) []*Probe {
+ procInfo, ok := procs[pid]
+ if !ok {
+ return nil
+ }
+ return procInfo.GetProbes()
+}
+
+// GetProbe returns the relevant probe information for a specific probe being instrumented
+// in a specific process
+func (procs DIProcs) GetProbe(pid PID, probeID ProbeID) *Probe {
+ procInfo, ok := procs[pid]
+ if !ok {
+ return nil
+ }
+ return procInfo.GetProbe(probeID)
+}
+
+// SetProbe associates instrumentation information with a probe for a specific process
+func (procs DIProcs) SetProbe(pid PID, service, typeName, method string, probeID, runtimeID uuid.UUID, opts *InstrumentationOptions) {
+ procInfo, ok := procs[pid]
+ if !ok {
+ return
+ }
+ probe := &Probe{
+ ID: probeID.String(),
+ ServiceName: service,
+ FuncName: fmt.Sprintf("%s.%s", typeName, method),
+ InstrumentationInfo: &InstrumentationInfo{InstrumentationOptions: opts},
+ }
+
+ procInfo.ProbesByID[probeID.String()] = probe
+ // TODO: remove this from here
+ procInfo.RuntimeID = runtimeID.String()
+}
+
+// DeleteProbe removes instrumentation for the specified probe
+// in the specified process
+func (procs DIProcs) DeleteProbe(pid PID, probeID ProbeID) {
+ procInfo, ok := procs[pid]
+ if !ok {
+ return
+ }
+ procInfo.DeleteProbe(probeID)
+}
+
+// CloseUprobe closes the uprobe link for the specific probe (by ID) of
+// a the specified process (by PID)
+func (procs DIProcs) CloseUprobe(pid PID, probeID ProbeID) {
+ probe := procs.GetProbe(pid, probeID)
+ if probe == nil {
+ return
+ }
+ proc, ok := procs[pid]
+ if !ok || proc == nil {
+ log.Info("could not close uprobe, pid not found")
+ }
+ err := proc.CloseUprobeLink(probeID)
+ if err != nil {
+ log.Infof("could not close uprobe: %s", err)
+ }
+}
+
+// SetRuntimeID sets the runtime ID for the specified process
+func (procs DIProcs) SetRuntimeID(pid PID, runtimeID string) {
+ proc, ok := procs[pid]
+ if !ok || proc == nil {
+ log.Info("could not set runtime ID, pid not found")
+ }
+ proc.RuntimeID = runtimeID
+}
+
+// ProcessInfo represents a process, it contains the information relevant to
+// dynamic instrumentation for this specific process
+type ProcessInfo struct {
+ PID uint32
+ ServiceName string
+ RuntimeID string
+ BinaryPath string
+
+ TypeMap *TypeMap
+ DwarfData *dwarf.Data
+
+ ConfigurationUprobe *link.Link
+ ProbesByID ProbesByID
+ InstrumentationUprobes map[ProbeID]*link.Link
+ InstrumentationObjects map[ProbeID]*ebpf.Collection
+}
+
+// SetupConfigUprobe sets the configuration probe for the process
+func (pi *ProcessInfo) SetupConfigUprobe() (*ebpf.Map, error) {
+ configProbe, ok := pi.ProbesByID[ConfigBPFProbeID]
+ if !ok {
+ return nil, fmt.Errorf("config probe was not set for process %s", pi.ServiceName)
+ }
+
+ configLink, ok := pi.InstrumentationUprobes[ConfigBPFProbeID]
+ if !ok {
+ return nil, fmt.Errorf("config uprobe was not set for process %s", pi.ServiceName)
+ }
+ pi.ConfigurationUprobe = configLink
+ delete(pi.InstrumentationUprobes, ConfigBPFProbeID)
+
+ m, ok := pi.InstrumentationObjects[configProbe.ID].Maps["events"]
+ if !ok {
+ return nil, fmt.Errorf("config ringbuffer was not set for process %s", pi.ServiceName)
+ }
+ return m, nil
+}
+
+// CloseConfigUprobe closes the uprobe connection for the configuration probe
+func (pi *ProcessInfo) CloseConfigUprobe() error {
+ if pi.ConfigurationUprobe != nil {
+ return (*pi.ConfigurationUprobe).Close()
+ }
+ return nil
+}
+
+// SetUprobeLink associates the uprobe link with the specified probe
+// in the tracked process
+func (pi *ProcessInfo) SetUprobeLink(probeID ProbeID, l *link.Link) {
+ pi.InstrumentationUprobes[probeID] = l
+}
+
+// CloseUprobeLink closes the probe and deletes the link for the probe
+// in the tracked process
+func (pi *ProcessInfo) CloseUprobeLink(probeID ProbeID) error {
+ if l, ok := pi.InstrumentationUprobes[probeID]; ok {
+ err := (*l).Close()
+ delete(pi.InstrumentationUprobes, probeID)
+ return err
+ }
+ return nil
+}
+
+// CloseAllUprobeLinks closes all probes and deletes their links for all probes
+// in the tracked process
+func (pi *ProcessInfo) CloseAllUprobeLinks() {
+ for probeID := range pi.InstrumentationUprobes {
+ if err := pi.CloseUprobeLink(probeID); err != nil {
+ log.Info("Failed to close uprobe link for probe", pi.BinaryPath, pi.PID, probeID, err)
+ }
+ }
+ err := pi.CloseConfigUprobe()
+ if err != nil {
+ log.Info("Failed to close config uprobe for process", pi.BinaryPath, pi.PID, err)
+ }
+}
+
+// GetProbes returns references to each probe in the associated process
+func (pi *ProcessInfo) GetProbes() []*Probe {
+ probes := make([]*Probe, 0, len(pi.ProbesByID))
+ for _, probe := range pi.ProbesByID {
+ probes = append(probes, probe)
+ }
+ return probes
+}
+
+// GetProbe returns a reference to the specified probe in the associated process
+func (pi *ProcessInfo) GetProbe(probeID ProbeID) *Probe {
+ return pi.ProbesByID[probeID]
+}
+
+// DeleteProbe closes the uprobe link and disassociates the probe in the associated process
+func (pi *ProcessInfo) DeleteProbe(probeID ProbeID) {
+ err := pi.CloseUprobeLink(probeID)
+ if err != nil {
+ log.Infof("could not close uprobe link: %s", err)
+ }
+ delete(pi.ProbesByID, probeID)
+}
+
+// ProbesByID maps probe IDs with probes
+type ProbesByID = map[ProbeID]*Probe
+
+// FieldIdentifier is a tuple of struct names and field names
+type FieldIdentifier struct {
+ StructName, FieldName string
+}
+
+// InstrumentationInfo contains information used while setting up probes
+type InstrumentationInfo struct {
+ InstrumentationOptions *InstrumentationOptions
+
+ // BPFParametersSourceCode is the source code needed for capturing parameters via this probe
+ BPFParametersSourceCode string
+
+ // BPFSourceCode is the source code of the BPF program attached via this probe
+ BPFSourceCode string
+
+ // BPFObjectFileReader is the compiled BPF program attached via this probe
+ BPFObjectFileReader io.ReaderAt
+
+ ConfigurationHash string
+
+ // Toggle for whether or not the BPF object was rebuilt after changing parameters
+ AttemptedRebuild bool
+}
+
+// InstrumentationOptions is a set of options for how data should be captured by probes
+type InstrumentationOptions struct {
+ CaptureParameters bool
+ ArgumentsMaxSize int
+ StringMaxSize int
+ MaxReferenceDepth int
+ MaxFieldCount int
+ SliceMaxSize int
+ SliceMaxLength int
+}
+
+// Probe represents a location in a GoProgram that can be instrumented
+// dynamically. It contains information about the service and the function
+// associated with the probe.
+type Probe struct {
+ // ID is a unique identifier for the probe.
+ ID string
+
+ // ServiceName is the name of the service in which the probe should be placed.
+ ServiceName string
+
+ // FuncName is the name of the function that triggers the probe.
+ FuncName string
+
+ InstrumentationInfo *InstrumentationInfo
+
+ RateLimiter *ratelimiter.SingleRateLimiter
+}
+
+// GetBPFFuncName cleans the function name to be allowed by the bpf compiler
+func (p *Probe) GetBPFFuncName() string {
+ // can't have '.', '-' or '/' in bpf program name
+ replacer := strings.NewReplacer(".", "_", "/", "_", "-", "_")
+ return replacer.Replace(p.FuncName)
+}
+
+// ConfigPath is a remote-config specific representation which is used for retrieving probe definitions
+type ConfigPath struct {
+ OrgID int64
+ Product string
+ ProbeType string
+ ProbeUUID uuid.UUID
+ Hash string
+}
+
+// ParseConfigPath takes the remote-config specific string and parses a ConfigPath object out of it
+// the string is expected to be datadog///_/
+func ParseConfigPath(str string) (*ConfigPath, error) {
+ parts := strings.Split(str, "/")
+ if len(parts) != 5 {
+ return nil, fmt.Errorf("failed to parse config path %s", str)
+ }
+ orgIDStr, product, probeIDStr, hash := parts[1], parts[2], parts[3], parts[4]
+ orgID, err := strconv.ParseInt(orgIDStr, 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse orgID %s (from %s)", orgIDStr, str)
+ }
+ if product != "LIVE_DEBUGGING" {
+ return nil, fmt.Errorf("product %s not supported (from %s)", product, str)
+ }
+
+ typeAndID := strings.Split(probeIDStr, "_")
+ if len(typeAndID) != 2 {
+ return nil, fmt.Errorf("failed to parse probe type and UUID %s (from %s)", probeIDStr, str)
+ }
+ probeType, probeUUIDStr := typeAndID[0], typeAndID[1]
+ if probeType != "logProbe" {
+ return nil, fmt.Errorf("probe type %s not supported (from %s)", probeType, str)
+ }
+ probeUUID, err := uuid.Parse(probeUUIDStr)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse probeUUID %s (from %s)", probeUUIDStr, str)
+ }
+
+ return &ConfigPath{
+ OrgID: orgID,
+ Product: product,
+ ProbeType: probeType,
+ ProbeUUID: probeUUID,
+ Hash: hash,
+ }, nil
+}
diff --git a/pkg/dynamicinstrumentation/ditypes/config_test.go b/pkg/dynamicinstrumentation/ditypes/config_test.go
new file mode 100644
index 0000000000000..9fefba03063cd
--- /dev/null
+++ b/pkg/dynamicinstrumentation/ditypes/config_test.go
@@ -0,0 +1,44 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux_bpf
+
+package ditypes
+
+import (
+ "testing"
+
+ "github.com/google/uuid"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestParseConfigPath(t *testing.T) {
+ expectedUUID, err := uuid.Parse("f0b49f3e-8364-448d-97e9-3e640c4a21e6")
+ assert.NoError(t, err)
+
+ configPath, err := ParseConfigPath("datadog/2/LIVE_DEBUGGING/logProbe_f0b49f3e-8364-448d-97e9-3e640c4a21e6/51fed9071414a7058c2ee96fc703f3e1fa51b5bffaab6155ce5c492303882b51")
+ assert.NoError(t, err)
+
+ assert.NoError(t, err)
+ assert.Equal(t, int64(2), configPath.OrgID)
+ assert.Equal(t, "LIVE_DEBUGGING", configPath.Product)
+ assert.Equal(t, "logProbe", configPath.ProbeType)
+ assert.Equal(t, expectedUUID, configPath.ProbeUUID)
+ assert.Equal(t, "51fed9071414a7058c2ee96fc703f3e1fa51b5bffaab6155ce5c492303882b51", configPath.Hash)
+}
+
+func TestParseConfigPathErrors(t *testing.T) {
+ tcs := []string{
+ "datadog/2/LIVE_DEBUGGING/logProbe_f0b49f3e-8364-448d-97e9-3e640c4a21e6",
+ "datadog/2/NOT_SUPPORTED/logProbe_f0b49f3e-8364-448d-97e9-3e640c4a21e6/51fed9071414a7058c2ee96fc703f3e1fa51b5bffaab6155ce5c492303882b51",
+ "datadog/2/LIVE_DEBUGGING/notSupported_f0b49f3e-8364-448d-97e9-3e640c4a21e6/51fed9071414a7058c2ee96fc703f3e1fa51b5bffaab6155ce5c492303882b51",
+ "datadog/2/LIVE_DEBUGGING/logProbe_f0b49f3e-8364-448d-97e9-3e640c4a21e6/51fed9071414a7058c2ee96fc703f3e1fa51b5bffaab6155ce5c492303882b51/extra",
+ "datadog/2/LIVE_DEBUGGING/logProbe_f0b49f3e-xxxx-448d-97e9-3e640c4a21e6/51fed9071414a7058c2ee96fc703f3e1fa51b5bffaab6155ce5c492303882b51",
+ }
+ for _, tc := range tcs {
+ _, err := ParseConfigPath(tc)
+ assert.Error(t, err)
+ }
+}
diff --git a/pkg/dynamicinstrumentation/ditypes/diagnostics.go b/pkg/dynamicinstrumentation/ditypes/diagnostics.go
new file mode 100644
index 0000000000000..d28764057b9ba
--- /dev/null
+++ b/pkg/dynamicinstrumentation/ditypes/diagnostics.go
@@ -0,0 +1,52 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux_bpf
+
+package ditypes
+
+// DiagnosticUpload is the message sent to the DataDog backend conveying diagnostic information
+type DiagnosticUpload struct {
+ Service string `json:"service"`
+ DDSource string `json:"ddsource"`
+
+ Debugger struct {
+ Diagnostic `json:"diagnostics"`
+ } `json:"debugger"`
+}
+
+// SetError sets the error in the diagnostic upload
+func (d *DiagnosticUpload) SetError(errorType, errorMessage string) {
+ d.Debugger.Diagnostic.Status = StatusError
+ d.Debugger.Diagnostic.DiagnosticException = &DiagnosticException{
+ Type: errorType,
+ Message: errorMessage,
+ }
+}
+
+// Status conveys the status of a probe
+type Status string
+
+const (
+ StatusReceived Status = "RECEIVED" // StatusReceived means the probe configuration was received
+ StatusInstalled Status = "INSTALLED" // StatusInstalled means the probe was installed
+ StatusEmitting Status = "EMITTING" // StatusEmitting means the probe is emitting events
+ StatusError Status = "ERROR" // StatusError means the probe has an issue
+)
+
+// Diagnostic contains fields relevant for conveying the status of a probe
+type Diagnostic struct {
+ RuntimeID string `json:"runtimeId"`
+ ProbeID string `json:"probeId"`
+ Status Status `json:"status"`
+
+ *DiagnosticException `json:"exception,omitempty"`
+}
+
+// DiagnosticException is used for diagnosing errors in probes
+type DiagnosticException struct {
+ Type string `json:"type"`
+ Message string `json:"message"`
+}
diff --git a/pkg/dynamicinstrumentation/ditypes/ebpf.go b/pkg/dynamicinstrumentation/ditypes/ebpf.go
new file mode 100644
index 0000000000000..40bc53fdf9d28
--- /dev/null
+++ b/pkg/dynamicinstrumentation/ditypes/ebpf.go
@@ -0,0 +1,17 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024-present Datadog, Inc.
+
+//go:build ignore
+
+package ditypes
+
+/*
+#include "../codegen/c/types.h"
+*/
+import "C"
+
+type BaseEvent C.struct_base_event
+
+const SizeofBaseEvent = C.sizeof_struct_base_event
diff --git a/pkg/dynamicinstrumentation/ditypes/ebpf_linux.go b/pkg/dynamicinstrumentation/ditypes/ebpf_linux.go
new file mode 100644
index 0000000000000..d76d6c39c2105
--- /dev/null
+++ b/pkg/dynamicinstrumentation/ditypes/ebpf_linux.go
@@ -0,0 +1,13 @@
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs -- -I ../../network/ebpf/c -I ../../ebpf/c -fsigned-char ebpf.go
+
+package ditypes
+
+type BaseEvent struct {
+ Probe_id [304]byte
+ Pid uint32
+ Uid uint32
+ Program_counters [10]uint64
+}
+
+const SizeofBaseEvent = 0x188
diff --git a/pkg/dynamicinstrumentation/ditypes/ringbuffer.go b/pkg/dynamicinstrumentation/ditypes/ringbuffer.go
new file mode 100644
index 0000000000000..3e42586dc499c
--- /dev/null
+++ b/pkg/dynamicinstrumentation/ditypes/ringbuffer.go
@@ -0,0 +1,43 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux_bpf
+
+package ditypes
+
+import "github.com/cilium/ebpf"
+
+// EventsRingbuffer is the shared ringbuffer which all bpf programs use for communication
+// with userspace
+var EventsRingbuffer *ebpf.Map
+
+// DIEvent represents a single invocation of a function and it's captured information
+type DIEvent struct {
+ ProbeID string
+ PID uint32
+ UID uint32
+ Argdata []*Param
+ StackPCs []uint64
+}
+
+// Param is the representation of a single function parameter after being parsed from
+// the raw byte buffer sent from bpf
+type Param struct {
+ ValueStr string `json:",omitempty"`
+ Type string
+ Size uint16
+ Kind byte
+ Fields []*Param `json:",omitempty"`
+}
+
+// StackFrame represents a single entry in a stack trace
+type StackFrame struct {
+ FileName string `json:"fileName,omitempty"`
+ Function string `json:"function,omitempty"`
+ Line int `json:"lineNumber,omitempty"`
+}
+
+// EventCallback is the function that is called everytime a new event is created
+type EventCallback func(*DIEvent)
diff --git a/pkg/dynamicinstrumentation/ditypes/snapshot.go b/pkg/dynamicinstrumentation/ditypes/snapshot.go
new file mode 100644
index 0000000000000..44e7fcb35cdd2
--- /dev/null
+++ b/pkg/dynamicinstrumentation/ditypes/snapshot.go
@@ -0,0 +1,118 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux_bpf
+
+package ditypes
+
+import (
+ "github.com/google/uuid"
+)
+
+// SnapshotUpload is a single message sent to the datadog back containing the
+// snapshot and metadata
+type SnapshotUpload struct {
+ Service string `json:"service"`
+ Message string `json:"message"`
+ DDSource string `json:"ddsource"`
+ DDTags string `json:"ddtags"`
+ Logger struct {
+ Name string `json:"name"`
+ Method string `json:"method"`
+ Version int `json:"version,omitempty"`
+ ThreadID int `json:"thread_id,omitempty"`
+ ThreadName string `json:"thread_name,omitempty"`
+ } `json:"logger"`
+
+ Debugger struct {
+ Snapshot `json:"snapshot"`
+ } `json:"debugger"`
+
+ // TODO: check precision (ms, ns etc)
+ Duration int64 `json:"duration"`
+
+ DD *TraceCorrelation `json:"dd,omitempty"`
+}
+
+// Snapshot is a single instance of a function invocation and all
+// captured data
+type Snapshot struct {
+ ID *uuid.UUID `json:"id"`
+ Timestamp int64 `json:"timestamp"`
+
+ Language string `json:"language"`
+ ProbeInSnapshot `json:"probe"`
+
+ Captures `json:"captures"`
+
+ Errors []EvaluationError `json:"evaluationErrors,omitempty"`
+
+ Stack []StackFrame `json:"stack"`
+}
+
+// Captures contains captured data at various points during a function invocation
+type Captures struct {
+ Entry *Capture `json:"entry,omitempty"`
+ Return *Capture `json:"return,omitempty"`
+
+ Lines map[string]Capture `json:"lines,omitempty"`
+}
+
+// ProbeInSnapshot contains information about the probe that produced a snapshot
+type ProbeInSnapshot struct {
+ ID string `json:"id"`
+ EvaluateAt string `json:"evaluateAt,omitempty"`
+ Tags string `json:"tags,omitempty"`
+ Version int `json:"version,omitempty"`
+
+ ProbeLocation `json:"location"`
+}
+
+// ProbeLocation represents where a snapshot was originally captured
+type ProbeLocation struct {
+ Type string `json:"type,omitempty"`
+ Method string `json:"method,omitempty"`
+ Lines []string `json:"lines,omitempty"`
+ File string `json:"file,omitempty"`
+}
+
+// CapturedValueMap maps type names to their values
+type CapturedValueMap = map[string]*CapturedValue
+
+// Capture represents all the captured values in a snapshot
+type Capture struct {
+ Arguments CapturedValueMap `json:"arguments,omitempty"`
+ Locals CapturedValueMap `json:"locals,omitempty"`
+}
+
+// CapturedValue represents the value of a captured type
+type CapturedValue struct {
+ Type string `json:"type"`
+
+ // we use a string pointer so the empty string is marshalled
+ Value *string `json:"value,omitempty"`
+
+ Fields map[string]*CapturedValue `json:"fields,omitempty"`
+ Entries [][]CapturedValue `json:"entries,omitempty"`
+ Elements []CapturedValue `json:"elements,omitempty"`
+
+ NotCapturedReason string `json:"notCapturedReason,omitempty"`
+ IsNull bool `json:"isNull,omitempty"`
+
+ Size string `json:"size,omitempty"`
+ Truncated bool `json:"truncated,omitempty"`
+}
+
+// EvaluationError expresses why a value could not be evaluated
+type EvaluationError struct {
+ Expr string `json:"expr"`
+ Message string `json:"message"`
+}
+
+// TraceCorrelation contains fields that correlate a snapshot with traces
+type TraceCorrelation struct {
+ TraceID string `json:"trace_id,omitempty"`
+ SpanID string `json:"span_id,omitempty"`
+}
diff --git a/pkg/dynamicinstrumentation/ditypes/snapshot_test.go b/pkg/dynamicinstrumentation/ditypes/snapshot_test.go
new file mode 100644
index 0000000000000..e4496f4c6dc92
--- /dev/null
+++ b/pkg/dynamicinstrumentation/ditypes/snapshot_test.go
@@ -0,0 +1,50 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux_bpf
+
+package ditypes
+
+import (
+ "encoding/json"
+ "io"
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestDynamicInstrumentationLogJSONRoundTrip(t *testing.T) {
+ files := []string{
+ "testdata/snapshot-00.json",
+ "testdata/snapshot-01.json",
+ "testdata/snapshot-02.json",
+ }
+ for _, filePath := range files {
+ file, err := os.Open(filePath)
+ if err != nil {
+ t.Error(err)
+ }
+ defer file.Close()
+
+ bytes, err := io.ReadAll(file)
+ if err != nil {
+ t.Error(err)
+ }
+
+ var s SnapshotUpload
+ err = json.Unmarshal(bytes, &s)
+ if err != nil {
+ t.Error(err)
+ }
+
+ mBytes, err := json.Marshal(s)
+ if err != nil {
+ t.Error(err)
+ }
+
+ assert.JSONEq(t, string(bytes), string(mBytes))
+ }
+}
diff --git a/pkg/dynamicinstrumentation/ditypes/testdata/snapshot-00.json b/pkg/dynamicinstrumentation/ditypes/testdata/snapshot-00.json
new file mode 100644
index 0000000000000..e92603672b4c6
--- /dev/null
+++ b/pkg/dynamicinstrumentation/ditypes/testdata/snapshot-00.json
@@ -0,0 +1,402 @@
+{
+ "service": "debugger-backend-api-monitor",
+ "ddsource": "dd_debugger",
+ "message": "Log probe executed successfully",
+ "duration": 763602,
+ "ddtags": "tag:value",
+ "logger": {
+ "thread_id": 91,
+ "method": "emitSnapshot",
+ "thread_name": "scheduled-executor-thread-16",
+ "name": "com.datadog.debugger.apimonitor.SnapshotReadAfterWriteMonitorJob",
+ "version": 2
+ },
+ "debugger": {
+ "snapshot": {
+ "stack": [
+ {
+ "fileName": "SnapshotReadAfterWriteMonitorJob.kt",
+ "function": "com.datadog.debugger.apimonitor.SnapshotReadAfterWriteMonitorJob.emitSnapshot",
+ "lineNumber": 89
+ },
+ {
+ "fileName": "SnapshotReadAfterWriteMonitorJob.kt",
+ "function": "com.datadog.debugger.apimonitor.SnapshotReadAfterWriteMonitorJob.access$emitSnapshot",
+ "lineNumber": 31
+ },
+ {
+ "fileName": "SnapshotReadAfterWriteMonitorJob.kt",
+ "function": "com.datadog.debugger.apimonitor.SnapshotReadAfterWriteMonitorJob$run$1.invoke",
+ "lineNumber": 63
+ },
+ {
+ "fileName": "SnapshotReadAfterWriteMonitorJob.kt",
+ "function": "com.datadog.debugger.apimonitor.SnapshotReadAfterWriteMonitorJob$run$1.invoke",
+ "lineNumber": 55
+ },
+ {
+ "fileName": "MonitoredTask.kt",
+ "function": "com.datadog.debugger.MonitoredTask$ExecutionState.run",
+ "lineNumber": 30
+ },
+ {
+ "fileName": "MonitoredTask.kt",
+ "function": "com.datadog.debugger.MonitoredTask.run",
+ "lineNumber": 89
+ },
+ {
+ "fileName": "SnapshotReadAfterWriteMonitorJob.kt",
+ "function": "com.datadog.debugger.apimonitor.SnapshotReadAfterWriteMonitorJob.run",
+ "lineNumber": 55
+ },
+ {
+ "function": "com.datadog.debugger.apimonitor.$SnapshotReadAfterWriteMonitorJob$Definition$Exec.dispatch",
+ "lineNumber": -1
+ },
+ {
+ "fileName": "AbstractExecutableMethodsDefinition.java",
+ "function": "io.micronaut.context.AbstractExecutableMethodsDefinition$DispatchedExecutableMethod.invoke",
+ "lineNumber": 378
+ },
+ {
+ "fileName": "DelegatingExecutableMethod.java",
+ "function": "io.micronaut.inject.DelegatingExecutableMethod.invoke",
+ "lineNumber": 76
+ },
+ {
+ "fileName": "ScheduledMethodProcessor.java",
+ "function": "io.micronaut.scheduling.processor.ScheduledMethodProcessor.lambda$process$5",
+ "lineNumber": 127
+ },
+ {
+ "fileName": "Executors.java",
+ "function": "java.util.concurrent.Executors$RunnableAdapter.call",
+ "lineNumber": 577
+ },
+ {
+ "fileName": "FutureTask.java",
+ "function": "java.util.concurrent.FutureTask.runAndReset",
+ "lineNumber": 358
+ },
+ {
+ "fileName": "ScheduledThreadPoolExecutor.java",
+ "function": "java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run",
+ "lineNumber": 305
+ },
+ {
+ "fileName": "ThreadPoolExecutor.java",
+ "function": "java.util.concurrent.ThreadPoolExecutor.runWorker",
+ "lineNumber": 1144
+ },
+ {
+ "fileName": "ThreadPoolExecutor.java",
+ "function": "java.util.concurrent.ThreadPoolExecutor$Worker.run",
+ "lineNumber": 642
+ },
+ {
+ "fileName": "Thread.java",
+ "function": "java.lang.Thread.run",
+ "lineNumber": 1589
+ }
+ ],
+ "captures": {
+ "entry": {
+ "arguments": {
+ "apiMonitorStr": {
+ "type": "java.lang.String",
+ "value": "red"
+ },
+ "this": {
+ "type": "com.datadog.debugger.apimonitor.SnapshotReadAfterWriteMonitorJob",
+ "fields": {
+ "apiClient": {
+ "type": "com.datadog.debugger.monitor.utils.DebuggerConfigurationApiClient",
+ "fields": {
+ "converter": {
+ "type": "com.datadog.debugger.monitor.utils.JsonApiConverter",
+ "fields": {
+ "mapper": {
+ "notCapturedReason": "depth",
+ "type": "com.fasterxml.jackson.databind.ObjectMapper"
+ }
+ }
+ },
+ "rcApiClient": {
+ "type": "com.datadog.debugger.monitor.utils.AuthenticatingRcApiClient",
+ "fields": {
+ "apiClient": {
+ "notCapturedReason": "depth",
+ "type": "com.datadog.debugger.monitor.utils.RcApiClient$Intercepted"
+ },
+ "mcnultyJobConfig": {
+ "notCapturedReason": "depth",
+ "type": "java.util.LinkedHashMap"
+ },
+ "secretManager": {
+ "notCapturedReason": "depth",
+ "type": "com.datadog.debugger.monitor.utils.VaultSecretManager"
+ }
+ }
+ }
+ }
+ },
+ "metrics": {
+ "type": "com.datadog.debugger.Metrics",
+ "fields": {
+ "statsd": {
+ "type": "com.timgroup.statsd.NonBlockingStatsDClient",
+ "fields": {
+ "clientChannel": {
+ "notCapturedReason": "depth",
+ "type": "com.timgroup.statsd.UnixDatagramClientChannel"
+ },
+ "handler": {
+ "notCapturedReason": "depth",
+ "type": "com.timgroup.statsd.NonBlockingStatsDClient$1"
+ },
+ "constantTagsRendered": {
+ "type": "java.lang.String",
+ "value": "|#version:v13250988-a801c320,env:prod,service:debugger-backend-api-monitor,dd.internal.entity_id:d1239294-1fe7-4188-9646-1bb7f59eb0b0"
+ },
+ "statsDSender": {
+ "notCapturedReason": "depth",
+ "type": "com.timgroup.statsd.StatsDSender"
+ },
+ "telemetryClientChannel": {
+ "notCapturedReason": "depth",
+ "type": "com.timgroup.statsd.UnixDatagramClientChannel"
+ },
+ "prefix": {
+ "type": "java.lang.String",
+ "value": "dd.debugger_backend_api_monitor."
+ },
+ "telemetryStatsDProcessor": {
+ "notCapturedReason": "depth",
+ "type": "com.timgroup.statsd.StatsDNonBlockingProcessor"
+ },
+ "blocking": {
+ "type": "boolean",
+ "value": "false"
+ },
+ "statsDProcessor": {
+ "notCapturedReason": "depth",
+ "type": "com.timgroup.statsd.StatsDNonBlockingProcessor"
+ },
+ "telemetryStatsDSender": {
+ "notCapturedReason": "depth",
+ "type": "com.timgroup.statsd.StatsDSender"
+ },
+ "telemetry": {
+ "notCapturedReason": "depth",
+ "type": "com.timgroup.statsd.Telemetry"
+ }
+ }
+ }
+ }
+ },
+ "jobConfiguration": {
+ "type": "com.datadog.debugger.apimonitor.ApiMonitorJobConfigurations$SnapshotReadAfterWriteJobConfiguration",
+ "fields": {
+ "pollInterval": {
+ "type": "java.time.Duration",
+ "value": "PT1S"
+ },
+ "orgId": {
+ "type": "long",
+ "value": "2"
+ },
+ "timeout": {
+ "type": "java.time.Duration",
+ "value": "PT1M"
+ }
+ }
+ },
+ "eventPlatformQueryClient": {
+ "type": "com.datadog.debugger.monitor.utils.EventPlatformQueryClientImpl",
+ "fields": {
+ "httpClient": {
+ "type": "com.datadog.debugger.monitor.utils.EventPlatformApiHttpClient$Intercepted",
+ "fields": {
+ "$interceptors": {
+ "notCapturedReason": "depth",
+ "type": "io.micronaut.aop.Interceptor[][]"
+ },
+ "$proxyMethods": {
+ "notCapturedReason": "depth",
+ "type": "io.micronaut.inject.ExecutableMethod[]"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "apiMonitorInt": {
+ "type": "int",
+ "value": "86"
+ }
+ }
+ },
+ "return": {
+ "arguments": {
+ "apiMonitorStr": {
+ "type": "java.lang.String",
+ "value": "red"
+ },
+ "this": {
+ "type": "com.datadog.debugger.apimonitor.SnapshotReadAfterWriteMonitorJob",
+ "fields": {
+ "apiClient": {
+ "type": "com.datadog.debugger.monitor.utils.DebuggerConfigurationApiClient",
+ "fields": {
+ "converter": {
+ "type": "com.datadog.debugger.monitor.utils.JsonApiConverter",
+ "fields": {
+ "mapper": {
+ "notCapturedReason": "depth",
+ "type": "com.fasterxml.jackson.databind.ObjectMapper"
+ }
+ }
+ },
+ "rcApiClient": {
+ "type": "com.datadog.debugger.monitor.utils.AuthenticatingRcApiClient",
+ "fields": {
+ "apiClient": {
+ "notCapturedReason": "depth",
+ "type": "com.datadog.debugger.monitor.utils.RcApiClient$Intercepted"
+ },
+ "mcnultyJobConfig": {
+ "notCapturedReason": "depth",
+ "type": "java.util.LinkedHashMap"
+ },
+ "secretManager": {
+ "notCapturedReason": "depth",
+ "type": "com.datadog.debugger.monitor.utils.VaultSecretManager"
+ }
+ }
+ }
+ }
+ },
+ "metrics": {
+ "type": "com.datadog.debugger.Metrics",
+ "fields": {
+ "statsd": {
+ "type": "com.timgroup.statsd.NonBlockingStatsDClient",
+ "fields": {
+ "clientChannel": {
+ "notCapturedReason": "depth",
+ "type": "com.timgroup.statsd.UnixDatagramClientChannel"
+ },
+ "handler": {
+ "notCapturedReason": "depth",
+ "type": "com.timgroup.statsd.NonBlockingStatsDClient$1"
+ },
+ "constantTagsRendered": {
+ "type": "java.lang.String",
+ "value": "|#version:v13250988-a801c320,env:prod,service:debugger-backend-api-monitor,dd.internal.entity_id:d1239294-1fe7-4188-9646-1bb7f59eb0b0"
+ },
+ "statsDSender": {
+ "notCapturedReason": "depth",
+ "type": "com.timgroup.statsd.StatsDSender"
+ },
+ "telemetryClientChannel": {
+ "notCapturedReason": "depth",
+ "type": "com.timgroup.statsd.UnixDatagramClientChannel"
+ },
+ "prefix": {
+ "type": "java.lang.String",
+ "value": "dd.debugger_backend_api_monitor."
+ },
+ "telemetryStatsDProcessor": {
+ "notCapturedReason": "depth",
+ "type": "com.timgroup.statsd.StatsDNonBlockingProcessor"
+ },
+ "blocking": {
+ "type": "boolean",
+ "value": "false"
+ },
+ "statsDProcessor": {
+ "notCapturedReason": "depth",
+ "type": "com.timgroup.statsd.StatsDNonBlockingProcessor"
+ },
+ "telemetryStatsDSender": {
+ "notCapturedReason": "depth",
+ "type": "com.timgroup.statsd.StatsDSender"
+ },
+ "telemetry": {
+ "notCapturedReason": "depth",
+ "type": "com.timgroup.statsd.Telemetry"
+ }
+ }
+ }
+ }
+ },
+ "jobConfiguration": {
+ "type": "com.datadog.debugger.apimonitor.ApiMonitorJobConfigurations$SnapshotReadAfterWriteJobConfiguration",
+ "fields": {
+ "pollInterval": {
+ "type": "java.time.Duration",
+ "value": "PT1S"
+ },
+ "orgId": {
+ "type": "long",
+ "value": "2"
+ },
+ "timeout": {
+ "type": "java.time.Duration",
+ "value": "PT1M"
+ }
+ }
+ },
+ "eventPlatformQueryClient": {
+ "type": "com.datadog.debugger.monitor.utils.EventPlatformQueryClientImpl",
+ "fields": {
+ "httpClient": {
+ "type": "com.datadog.debugger.monitor.utils.EventPlatformApiHttpClient$Intercepted",
+ "fields": {
+ "$interceptors": {
+ "notCapturedReason": "depth",
+ "type": "io.micronaut.aop.Interceptor[][]"
+ },
+ "$proxyMethods": {
+ "notCapturedReason": "depth",
+ "type": "io.micronaut.inject.ExecutableMethod[]"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "apiMonitorInt": {
+ "type": "int",
+ "value": "86"
+ }
+ },
+ "locals": {
+ "uuid": {
+ "type": "java.lang.String",
+ "value": "328a0839-de9b-40fb-8c7f-f02972a0bceb"
+ },
+ "@return": {
+ "type": "java.lang.String",
+ "value": "328a0839-de9b-40fb-8c7f-f02972a0bceb"
+ }
+ }
+ }
+ },
+ "language": "java",
+ "id": "6e34e113-2bb3-44be-9330-79de17fab0fc",
+ "probe": {
+ "evaluateAt": "DEFAULT",
+ "location": {
+ "method": "emitSnapshot",
+ "type": "com.datadog.debugger.apimonitor.SnapshotReadAfterWriteMonitorJob"
+ },
+ "id": "59e78a5b-fa9a-4056-a2bf-a4384769d1ae",
+ "version": 1
+ },
+ "timestamp": 1676045474719
+ }
+ }
+}
\ No newline at end of file
diff --git a/pkg/dynamicinstrumentation/ditypes/testdata/snapshot-01.json b/pkg/dynamicinstrumentation/ditypes/testdata/snapshot-01.json
new file mode 100644
index 0000000000000..2cbade7901259
--- /dev/null
+++ b/pkg/dynamicinstrumentation/ditypes/testdata/snapshot-01.json
@@ -0,0 +1,1660 @@
+{
+ "service": "security-monitoring-entity-reducer",
+ "message": "Emitting entity with score",
+ "duration": 0,
+ "ddsource": "dd_debugger",
+ "ddtags": "env:prod,service:security-monitoring-entity-reducer",
+ "debugger": {
+ "snapshot": {
+ "stack": [
+ {
+ "fileName": "EntityStatsOutput.java",
+ "function": "com.dd.logs.security_analytics.EntityStatsOutput.toByteString",
+ "lineNumber": 205
+ },
+ {
+ "fileName": "InternalIntakeReducerOutputEncoder.java",
+ "function": "com.dd.logs.rule_engine.outputs.internal_intake.InternalIntakeReducerOutputEncoder.encode",
+ "lineNumber": 31
+ },
+ {
+ "fileName": "InternalIntakeProducer.java",
+ "function": "com.dd.logs.internal_intake.producer.InternalIntakeProducer.processDatum",
+ "lineNumber": 116
+ },
+ {
+ "fileName": "WorkloadProcessor.java",
+ "function": "com.fsmatic.workload.WorkloadProcessor.onReceive",
+ "lineNumber": 332
+ },
+ {
+ "fileName": "AbstractActor.scala",
+ "function": "akka.actor.UntypedAbstractActor$$anonfun$receive$1.applyOrElse",
+ "lineNumber": 339
+ },
+ {
+ "fileName": "Actor.scala",
+ "function": "akka.actor.Actor.aroundReceive",
+ "lineNumber": 539
+ },
+ {
+ "fileName": "ActorSupport.java",
+ "function": "com.fsmatic.akka.ActorSupport.lambda$aroundReceive$0",
+ "lineNumber": 30
+ },
+ {
+ "function": "com.fsmatic.akka.ActorSupport$$Lambda/0x000000100176eaa8.accept",
+ "lineNumber": -1
+ },
+ {
+ "fileName": "MdcContextActor.java",
+ "function": "com.fsmatic.mdc.MdcContextActor.wrapReceive",
+ "lineNumber": 37
+ },
+ {
+ "fileName": "ActorSupport.java",
+ "function": "com.fsmatic.akka.ActorSupport.aroundReceive",
+ "lineNumber": 30
+ },
+ {
+ "fileName": "AActor.java",
+ "function": "com.fsmatic.akka.AActor.aroundReceive",
+ "lineNumber": 34
+ },
+ {
+ "fileName": "ActorCell.scala",
+ "function": "akka.actor.ActorCell.receiveMessage",
+ "lineNumber": 614
+ },
+ {
+ "fileName": "ActorCell.scala",
+ "function": "akka.actor.ActorCell.invoke",
+ "lineNumber": 583
+ },
+ {
+ "fileName": "Mailbox.scala",
+ "function": "akka.dispatch.Mailbox.processMailbox",
+ "lineNumber": 268
+ },
+ {
+ "fileName": "Mailbox.scala",
+ "function": "akka.dispatch.Mailbox.run",
+ "lineNumber": 229
+ },
+ {
+ "fileName": "Mailbox.scala",
+ "function": "akka.dispatch.Mailbox.exec",
+ "lineNumber": 241
+ },
+ {
+ "fileName": "ForkJoinTask.java",
+ "function": "akka.dispatch.forkjoin.ForkJoinTask.doExec",
+ "lineNumber": 260
+ },
+ {
+ "fileName": "ForkJoinPool.java",
+ "function": "akka.dispatch.forkjoin.ForkJoinPool$WorkQueue.runTask",
+ "lineNumber": 1339
+ },
+ {
+ "fileName": "ForkJoinPool.java",
+ "function": "akka.dispatch.forkjoin.ForkJoinPool.runWorker",
+ "lineNumber": 1979
+ },
+ {
+ "fileName": "ForkJoinWorkerThread.java",
+ "function": "akka.dispatch.forkjoin.ForkJoinWorkerThread.run",
+ "lineNumber": 107
+ }
+ ],
+ "captures": {
+ "lines": {
+ "205": {
+ "arguments": {
+ "this": {
+ "type": "com.dd.logs.security_analytics.ImmutableEntityStatsOutput",
+ "fields": {
+ "projectionAttributes": {
+ "size": "0",
+ "notCapturedReason": "java.lang.RuntimeException: Unsupported Map type: com.google.common.collect.RegularImmutableMap",
+ "type": "com.google.common.collect.RegularImmutableMap"
+ },
+ "signalScore": {
+ "type": "long",
+ "value": "1"
+ },
+ "logger": {
+ "type": "com.dd.logging.BasicLogger",
+ "fields": {
+ "metas": {
+ "isNull": true,
+ "type": "java.util.Map"
+ },
+ "logger": {
+ "type": "ch.qos.logback.classic.Logger",
+ "fields": {
+ "parent": {
+ "notCapturedReason": "depth",
+ "type": "ch.qos.logback.classic.Logger"
+ },
+ "level": {
+ "isNull": true,
+ "type": "ch.qos.logback.classic.Level"
+ },
+ "name": {
+ "type": "java.lang.String",
+ "value": "com.dd.logs.security_analytics.EntityStatsOutput"
+ },
+ "aai": {
+ "isNull": true,
+ "type": "ch.qos.logback.core.spi.AppenderAttachableImpl"
+ },
+ "childrenList": {
+ "isNull": true,
+ "type": "java.util.List"
+ },
+ "loggerContext": {
+ "notCapturedReason": "depth",
+ "type": "ch.qos.logback.classic.LoggerContext"
+ },
+ "effectiveLevelInt": {
+ "type": "int",
+ "value": "20000"
+ },
+ "additive": {
+ "type": "boolean",
+ "value": "true"
+ }
+ }
+ },
+ "name": {
+ "type": "java.lang.String",
+ "value": "com.dd.logs.security_analytics.EntityStatsOutput"
+ }
+ }
+ },
+ "count": {
+ "type": "long",
+ "value": "1"
+ },
+ "projectionTags": {
+ "size": "0",
+ "notCapturedReason": "java.lang.RuntimeException: Unsupported Map type: com.google.common.collect.SingletonImmutableBiMap",
+ "type": "com.google.common.collect.SingletonImmutableBiMap"
+ },
+ "internalIntakeTimestamp": {
+ "type": "long",
+ "value": "1709233217857"
+ },
+ "id": {
+ "type": "java.lang.String",
+ "value": "AY318mP5AAB-QSHUZlx-FQAA"
+ },
+ "trackKey": {
+ "type": "com.dd.logs.Track$Key",
+ "fields": {
+ "type": {
+ "type": "com.dd.logs.TrackType",
+ "fields": {
+ "name": {
+ "type": "java.lang.String",
+ "value": "entitystat"
+ }
+ }
+ },
+ "orgId": {
+ "type": "long",
+ "value": "2"
+ }
+ }
+ },
+ "entity": {
+ "type": "com.dd.cloudsecurityplatform.pb.EntityOutput",
+ "fields": {
+ "id_": {
+ "type": "java.lang.String",
+ "value": "10.154.142.130"
+ },
+ "memoizedHashCode": {
+ "type": "int",
+ "value": "0"
+ },
+ "typeString_": {
+ "type": "java.lang.String",
+ "value": "ip_address"
+ },
+ "memoizedIsInitialized": {
+ "type": "byte",
+ "value": "1"
+ },
+ "alwaysUseFieldBuilders": {
+ "type": "boolean",
+ "value": "false"
+ },
+ "unknownFields": {
+ "type": "com.google.protobuf.UnknownFieldSet",
+ "fields": {
+ "fields": {
+ "notCapturedReason": "depth",
+ "type": "java.util.TreeMap"
+ }
+ }
+ },
+ "memoizedSize": {
+ "type": "int",
+ "value": "-1"
+ }
+ }
+ }
+ }
+ }
+ },
+ "locals": {
+ "ipAttributes": {
+ "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$StringProjectionValues$Builder",
+ "fields": {
+ "unknownFieldsOrBuilder": {
+ "type": "com.google.protobuf.UnknownFieldSet",
+ "fields": {
+ "fields": {
+ "size": "0",
+ "type": "java.util.TreeMap"
+ }
+ }
+ },
+ "isClean": {
+ "type": "boolean",
+ "value": "false"
+ },
+ "bitField0_": {
+ "type": "int",
+ "value": "0"
+ },
+ "meAsParent": {
+ "isNull": true,
+ "type": "com.google.protobuf.GeneratedMessageV3$Builder$BuilderParentImpl"
+ },
+ "value_": {
+ "size": "0",
+ "notCapturedReason": "java.lang.RuntimeException: Unsupported Collection type: com.google.protobuf.LazyStringArrayList",
+ "type": "com.google.protobuf.LazyStringArrayList"
+ },
+ "builderParent": {
+ "isNull": true,
+ "type": "com.google.protobuf.GeneratedMessageV3$BuilderParent"
+ }
+ }
+ },
+ "outputBuilder": {
+ "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$Builder",
+ "fields": {
+ "entity_": {
+ "type": "com.dd.cloudsecurityplatform.pb.EntityOutput",
+ "fields": {
+ "id_": {
+ "type": "java.lang.String",
+ "value": "10.154.142.130"
+ },
+ "memoizedHashCode": {
+ "type": "int",
+ "value": "0"
+ },
+ "typeString_": {
+ "type": "java.lang.String",
+ "value": "ip_address"
+ },
+ "memoizedIsInitialized": {
+ "type": "byte",
+ "value": "1"
+ },
+ "alwaysUseFieldBuilders": {
+ "type": "boolean",
+ "value": "false"
+ },
+ "unknownFields": {
+ "type": "com.google.protobuf.UnknownFieldSet",
+ "fields": {
+ "fields": {
+ "notCapturedReason": "depth",
+ "type": "java.util.TreeMap"
+ }
+ }
+ },
+ "memoizedSize": {
+ "type": "int",
+ "value": "-1"
+ }
+ }
+ },
+ "stringProjections_": {
+ "type": "com.google.protobuf.MapField",
+ "fields": {
+ "mode": {
+ "type": "com.google.protobuf.MapField$StorageMode",
+ "value": "MAP"
+ },
+ "mapData": {
+ "size": "0",
+ "notCapturedReason": "java.lang.RuntimeException: Unsupported Map type: com.google.protobuf.MapField$MutabilityAwareMap",
+ "type": "com.google.protobuf.MapField$MutabilityAwareMap"
+ },
+ "isMutable": {
+ "type": "boolean",
+ "value": "true"
+ },
+ "listData": {
+ "isNull": true,
+ "type": "java.util.List"
+ },
+ "converter": {
+ "type": "com.google.protobuf.MapField$ImmutableMessageConverter",
+ "fields": {
+ "defaultEntry": {
+ "notCapturedReason": "depth",
+ "type": "com.google.protobuf.MapEntry"
+ }
+ }
+ }
+ }
+ },
+ "threatIntelIndicatorsMatched_": {
+ "size": "0",
+ "notCapturedReason": "java.lang.RuntimeException: Unsupported Collection type: com.google.protobuf.LazyStringArrayList",
+ "type": "com.google.protobuf.LazyStringArrayList"
+ },
+ "bitField0_": {
+ "type": "int",
+ "value": "54"
+ },
+ "entityBuilder_": {
+ "isNull": true,
+ "type": "com.google.protobuf.SingleFieldBuilderV3"
+ },
+ "threatIntelResults_": {
+ "size": "9",
+ "elements": [
+ {
+ "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult",
+ "fields": {
+ "category_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "sourceName_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "memoizedHashCode": {
+ "type": "int",
+ "value": "0"
+ },
+ "indicator_": {
+ "type": "java.lang.String",
+ "value": "15.158.54.42"
+ },
+ "sourceUrl_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "memoizedIsInitialized": {
+ "type": "byte",
+ "value": "1"
+ },
+ "alwaysUseFieldBuilders": {
+ "type": "boolean",
+ "value": "false"
+ },
+ "attribute_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "type_": {
+ "type": "java.lang.String",
+ "value": "IP"
+ },
+ "intention_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "unknownFields": {
+ "notCapturedReason": "depth",
+ "type": "com.google.protobuf.UnknownFieldSet"
+ },
+ "memoizedSize": {
+ "type": "int",
+ "value": "-1"
+ }
+ }
+ },
+ {
+ "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult",
+ "fields": {
+ "category_": {
+ "type": "java.lang.String",
+ "value": "residential_proxy"
+ },
+ "sourceName_": {
+ "type": "java.lang.String",
+ "value": "spur"
+ },
+ "memoizedHashCode": {
+ "type": "int",
+ "value": "0"
+ },
+ "indicator_": {
+ "type": "java.lang.String",
+ "value": "199.66.15.4"
+ },
+ "sourceUrl_": {
+ "type": "java.lang.String",
+ "value": "https://spur.us"
+ },
+ "memoizedIsInitialized": {
+ "type": "byte",
+ "value": "1"
+ },
+ "alwaysUseFieldBuilders": {
+ "type": "boolean",
+ "value": "false"
+ },
+ "attribute_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "type_": {
+ "type": "java.lang.String",
+ "value": "IP"
+ },
+ "intention_": {
+ "type": "java.lang.String",
+ "value": "suspicious"
+ },
+ "unknownFields": {
+ "notCapturedReason": "depth",
+ "type": "com.google.protobuf.UnknownFieldSet"
+ },
+ "memoizedSize": {
+ "type": "int",
+ "value": "-1"
+ }
+ }
+ },
+ {
+ "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult",
+ "fields": {
+ "category_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "sourceName_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "memoizedHashCode": {
+ "type": "int",
+ "value": "0"
+ },
+ "indicator_": {
+ "type": "java.lang.String",
+ "value": "64.252.144.155"
+ },
+ "sourceUrl_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "memoizedIsInitialized": {
+ "type": "byte",
+ "value": "1"
+ },
+ "alwaysUseFieldBuilders": {
+ "type": "boolean",
+ "value": "false"
+ },
+ "attribute_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "type_": {
+ "type": "java.lang.String",
+ "value": "IP"
+ },
+ "intention_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "unknownFields": {
+ "notCapturedReason": "depth",
+ "type": "com.google.protobuf.UnknownFieldSet"
+ },
+ "memoizedSize": {
+ "type": "int",
+ "value": "-1"
+ }
+ }
+ },
+ {
+ "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult",
+ "fields": {
+ "category_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "sourceName_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "memoizedHashCode": {
+ "type": "int",
+ "value": "0"
+ },
+ "indicator_": {
+ "type": "java.lang.String",
+ "value": "70.132.18.132"
+ },
+ "sourceUrl_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "memoizedIsInitialized": {
+ "type": "byte",
+ "value": "1"
+ },
+ "alwaysUseFieldBuilders": {
+ "type": "boolean",
+ "value": "false"
+ },
+ "attribute_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "type_": {
+ "type": "java.lang.String",
+ "value": "IP"
+ },
+ "intention_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "unknownFields": {
+ "notCapturedReason": "depth",
+ "type": "com.google.protobuf.UnknownFieldSet"
+ },
+ "memoizedSize": {
+ "type": "int",
+ "value": "-1"
+ }
+ }
+ },
+ {
+ "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult",
+ "fields": {
+ "category_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "sourceName_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "memoizedHashCode": {
+ "type": "int",
+ "value": "0"
+ },
+ "indicator_": {
+ "type": "java.lang.String",
+ "value": "130.176.185.207"
+ },
+ "sourceUrl_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "memoizedIsInitialized": {
+ "type": "byte",
+ "value": "1"
+ },
+ "alwaysUseFieldBuilders": {
+ "type": "boolean",
+ "value": "false"
+ },
+ "attribute_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "type_": {
+ "type": "java.lang.String",
+ "value": "IP"
+ },
+ "intention_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "unknownFields": {
+ "notCapturedReason": "depth",
+ "type": "com.google.protobuf.UnknownFieldSet"
+ },
+ "memoizedSize": {
+ "type": "int",
+ "value": "-1"
+ }
+ }
+ },
+ {
+ "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult",
+ "fields": {
+ "category_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "sourceName_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "memoizedHashCode": {
+ "type": "int",
+ "value": "0"
+ },
+ "indicator_": {
+ "type": "java.lang.String",
+ "value": "15.158.41.133"
+ },
+ "sourceUrl_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "memoizedIsInitialized": {
+ "type": "byte",
+ "value": "1"
+ },
+ "alwaysUseFieldBuilders": {
+ "type": "boolean",
+ "value": "false"
+ },
+ "attribute_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "type_": {
+ "type": "java.lang.String",
+ "value": "IP"
+ },
+ "intention_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "unknownFields": {
+ "notCapturedReason": "depth",
+ "type": "com.google.protobuf.UnknownFieldSet"
+ },
+ "memoizedSize": {
+ "type": "int",
+ "value": "-1"
+ }
+ }
+ },
+ {
+ "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult",
+ "fields": {
+ "category_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "sourceName_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "memoizedHashCode": {
+ "type": "int",
+ "value": "0"
+ },
+ "indicator_": {
+ "type": "java.lang.String",
+ "value": "130.176.135.146"
+ },
+ "sourceUrl_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "memoizedIsInitialized": {
+ "type": "byte",
+ "value": "1"
+ },
+ "alwaysUseFieldBuilders": {
+ "type": "boolean",
+ "value": "false"
+ },
+ "attribute_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "type_": {
+ "type": "java.lang.String",
+ "value": "IP"
+ },
+ "intention_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "unknownFields": {
+ "notCapturedReason": "depth",
+ "type": "com.google.protobuf.UnknownFieldSet"
+ },
+ "memoizedSize": {
+ "type": "int",
+ "value": "-1"
+ }
+ }
+ },
+ {
+ "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult",
+ "fields": {
+ "category_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "sourceName_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "memoizedHashCode": {
+ "type": "int",
+ "value": "0"
+ },
+ "indicator_": {
+ "type": "java.lang.String",
+ "value": "3.172.1.71"
+ },
+ "sourceUrl_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "memoizedIsInitialized": {
+ "type": "byte",
+ "value": "1"
+ },
+ "alwaysUseFieldBuilders": {
+ "type": "boolean",
+ "value": "false"
+ },
+ "attribute_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "type_": {
+ "type": "java.lang.String",
+ "value": "IP"
+ },
+ "intention_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "unknownFields": {
+ "notCapturedReason": "depth",
+ "type": "com.google.protobuf.UnknownFieldSet"
+ },
+ "memoizedSize": {
+ "type": "int",
+ "value": "-1"
+ }
+ }
+ },
+ {
+ "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult",
+ "fields": {
+ "category_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "sourceName_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "memoizedHashCode": {
+ "type": "int",
+ "value": "0"
+ },
+ "indicator_": {
+ "type": "java.lang.String",
+ "value": "130.176.130.132"
+ },
+ "sourceUrl_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "memoizedIsInitialized": {
+ "type": "byte",
+ "value": "1"
+ },
+ "alwaysUseFieldBuilders": {
+ "type": "boolean",
+ "value": "false"
+ },
+ "attribute_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "type_": {
+ "type": "java.lang.String",
+ "value": "IP"
+ },
+ "intention_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "unknownFields": {
+ "notCapturedReason": "depth",
+ "type": "com.google.protobuf.UnknownFieldSet"
+ },
+ "memoizedSize": {
+ "type": "int",
+ "value": "-1"
+ }
+ }
+ }
+ ],
+ "type": "java.util.ArrayList"
+ },
+ "builderParent": {
+ "isNull": true,
+ "type": "com.google.protobuf.GeneratedMessageV3$BuilderParent"
+ },
+ "unknownFieldsOrBuilder": {
+ "type": "com.google.protobuf.UnknownFieldSet",
+ "fields": {
+ "fields": {
+ "size": "0",
+ "type": "java.util.TreeMap"
+ }
+ }
+ },
+ "geoIpMetadataBuilder_": {
+ "isNull": true,
+ "type": "com.google.protobuf.RepeatedFieldBuilderV3"
+ },
+ "isClean": {
+ "type": "boolean",
+ "value": "false"
+ },
+ "count_": {
+ "type": "long",
+ "value": "1"
+ },
+ "tagProjections_": {
+ "type": "com.google.protobuf.MapField",
+ "fields": {
+ "mode": {
+ "type": "com.google.protobuf.MapField$StorageMode",
+ "value": "MAP"
+ },
+ "mapData": {
+ "size": "0",
+ "notCapturedReason": "java.lang.RuntimeException: Unsupported Map type: com.google.protobuf.MapField$MutabilityAwareMap",
+ "type": "com.google.protobuf.MapField$MutabilityAwareMap"
+ },
+ "isMutable": {
+ "type": "boolean",
+ "value": "true"
+ },
+ "listData": {
+ "isNull": true,
+ "type": "java.util.List"
+ },
+ "converter": {
+ "type": "com.google.protobuf.MapField$ImmutableMessageConverter",
+ "fields": {
+ "defaultEntry": {
+ "notCapturedReason": "depth",
+ "type": "com.google.protobuf.MapEntry"
+ }
+ }
+ }
+ }
+ },
+ "geoIpMetadata_": {
+ "size": "0",
+ "type": "java.util.ArrayList"
+ },
+ "ip_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "meAsParent": {
+ "isNull": true,
+ "type": "com.google.protobuf.GeneratedMessageV3$Builder$BuilderParentImpl"
+ },
+ "threatIntelResultsBuilder_": {
+ "isNull": true,
+ "type": "com.google.protobuf.RepeatedFieldBuilderV3"
+ },
+ "hosts_": {
+ "size": "0",
+ "notCapturedReason": "java.lang.RuntimeException: Unsupported Collection type: com.google.protobuf.LazyStringArrayList",
+ "type": "com.google.protobuf.LazyStringArrayList"
+ },
+ "signalScore_": {
+ "type": "long",
+ "value": "1"
+ }
+ }
+ },
+ "geoIpMetadata": {
+ "size": "0",
+ "type": "java.util.ArrayList"
+ },
+ "hosts": {
+ "size": "5",
+ "elements": [
+ {
+ "type": "java.lang.String",
+ "value": "i-02d87409e6596f562"
+ },
+ {
+ "type": "java.lang.String",
+ "value": "i-0f42b05f770544642"
+ },
+ {
+ "type": "java.lang.String",
+ "value": "i-0ab705684278ad06b"
+ },
+ {
+ "type": "java.lang.String",
+ "value": "i-0218eea919deb6e1a"
+ },
+ {
+ "type": "java.lang.String",
+ "value": "i-0405eec023d49f192"
+ }
+ ],
+ "type": "java.util.ArrayList"
+ },
+ "tagProjections": {
+ "entries": [
+ [
+ {
+ "type": "java.lang.String",
+ "value": "source"
+ },
+ {
+ "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$StringProjectionValues",
+ "fields": {
+ "memoizedHashCode": {
+ "type": "int",
+ "value": "0"
+ },
+ "value_": {
+ "size": "0",
+ "notCapturedReason": "java.lang.RuntimeException: Unsupported Collection type: com.google.protobuf.UnmodifiableLazyStringList",
+ "type": "com.google.protobuf.UnmodifiableLazyStringList"
+ },
+ "memoizedIsInitialized": {
+ "type": "byte",
+ "value": "1"
+ },
+ "alwaysUseFieldBuilders": {
+ "type": "boolean",
+ "value": "false"
+ },
+ "unknownFields": {
+ "type": "com.google.protobuf.UnknownFieldSet",
+ "fields": {
+ "fields": {
+ "notCapturedReason": "depth",
+ "type": "java.util.TreeMap"
+ }
+ }
+ },
+ "memoizedSize": {
+ "type": "int",
+ "value": "-1"
+ }
+ }
+ }
+ ]
+ ],
+ "size": "1",
+ "type": "java.util.HashMap"
+ },
+ "stringProjections": {
+ "entries": [
+ [
+ {
+ "type": "java.lang.String",
+ "value": "service"
+ },
+ {
+ "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$StringProjectionValues",
+ "fields": {
+ "memoizedHashCode": {
+ "type": "int",
+ "value": "0"
+ },
+ "value_": {
+ "size": "0",
+ "notCapturedReason": "java.lang.RuntimeException: Unsupported Collection type: com.google.protobuf.UnmodifiableLazyStringList",
+ "type": "com.google.protobuf.UnmodifiableLazyStringList"
+ },
+ "memoizedIsInitialized": {
+ "type": "byte",
+ "value": "1"
+ },
+ "alwaysUseFieldBuilders": {
+ "type": "boolean",
+ "value": "false"
+ },
+ "unknownFields": {
+ "type": "com.google.protobuf.UnknownFieldSet",
+ "fields": {
+ "fields": {
+ "notCapturedReason": "depth",
+ "type": "java.util.TreeMap"
+ }
+ }
+ },
+ "memoizedSize": {
+ "type": "int",
+ "value": "-1"
+ }
+ }
+ }
+ ],
+ [
+ {
+ "type": "java.lang.String",
+ "value": "custom.usr.id"
+ },
+ {
+ "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$StringProjectionValues",
+ "fields": {
+ "memoizedHashCode": {
+ "type": "int",
+ "value": "0"
+ },
+ "value_": {
+ "size": "0",
+ "notCapturedReason": "java.lang.RuntimeException: Unsupported Collection type: com.google.protobuf.UnmodifiableLazyStringList",
+ "type": "com.google.protobuf.UnmodifiableLazyStringList"
+ },
+ "memoizedIsInitialized": {
+ "type": "byte",
+ "value": "1"
+ },
+ "alwaysUseFieldBuilders": {
+ "type": "boolean",
+ "value": "false"
+ },
+ "unknownFields": {
+ "type": "com.google.protobuf.UnknownFieldSet",
+ "fields": {
+ "fields": {
+ "notCapturedReason": "depth",
+ "type": "java.util.TreeMap"
+ }
+ }
+ },
+ "memoizedSize": {
+ "type": "int",
+ "value": "-1"
+ }
+ }
+ }
+ ]
+ ],
+ "size": "2",
+ "type": "java.util.HashMap"
+ },
+ "threatIntelResults": {
+ "size": "9",
+ "elements": [
+ {
+ "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult",
+ "fields": {
+ "category_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "sourceName_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "memoizedHashCode": {
+ "type": "int",
+ "value": "0"
+ },
+ "indicator_": {
+ "type": "java.lang.String",
+ "value": "15.158.54.42"
+ },
+ "sourceUrl_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "memoizedIsInitialized": {
+ "type": "byte",
+ "value": "1"
+ },
+ "alwaysUseFieldBuilders": {
+ "type": "boolean",
+ "value": "false"
+ },
+ "attribute_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "type_": {
+ "type": "java.lang.String",
+ "value": "IP"
+ },
+ "intention_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "unknownFields": {
+ "type": "com.google.protobuf.UnknownFieldSet",
+ "fields": {
+ "fields": {
+ "notCapturedReason": "depth",
+ "type": "java.util.TreeMap"
+ }
+ }
+ },
+ "memoizedSize": {
+ "type": "int",
+ "value": "-1"
+ }
+ }
+ },
+ {
+ "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult",
+ "fields": {
+ "category_": {
+ "type": "java.lang.String",
+ "value": "residential_proxy"
+ },
+ "sourceName_": {
+ "type": "java.lang.String",
+ "value": "spur"
+ },
+ "memoizedHashCode": {
+ "type": "int",
+ "value": "0"
+ },
+ "indicator_": {
+ "type": "java.lang.String",
+ "value": "199.66.15.4"
+ },
+ "sourceUrl_": {
+ "type": "java.lang.String",
+ "value": "https://spur.us"
+ },
+ "memoizedIsInitialized": {
+ "type": "byte",
+ "value": "1"
+ },
+ "alwaysUseFieldBuilders": {
+ "type": "boolean",
+ "value": "false"
+ },
+ "attribute_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "type_": {
+ "type": "java.lang.String",
+ "value": "IP"
+ },
+ "intention_": {
+ "type": "java.lang.String",
+ "value": "suspicious"
+ },
+ "unknownFields": {
+ "type": "com.google.protobuf.UnknownFieldSet",
+ "fields": {
+ "fields": {
+ "notCapturedReason": "depth",
+ "type": "java.util.TreeMap"
+ }
+ }
+ },
+ "memoizedSize": {
+ "type": "int",
+ "value": "-1"
+ }
+ }
+ },
+ {
+ "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult",
+ "fields": {
+ "category_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "sourceName_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "memoizedHashCode": {
+ "type": "int",
+ "value": "0"
+ },
+ "indicator_": {
+ "type": "java.lang.String",
+ "value": "64.252.144.155"
+ },
+ "sourceUrl_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "memoizedIsInitialized": {
+ "type": "byte",
+ "value": "1"
+ },
+ "alwaysUseFieldBuilders": {
+ "type": "boolean",
+ "value": "false"
+ },
+ "attribute_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "type_": {
+ "type": "java.lang.String",
+ "value": "IP"
+ },
+ "intention_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "unknownFields": {
+ "type": "com.google.protobuf.UnknownFieldSet",
+ "fields": {
+ "fields": {
+ "notCapturedReason": "depth",
+ "type": "java.util.TreeMap"
+ }
+ }
+ },
+ "memoizedSize": {
+ "type": "int",
+ "value": "-1"
+ }
+ }
+ },
+ {
+ "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult",
+ "fields": {
+ "category_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "sourceName_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "memoizedHashCode": {
+ "type": "int",
+ "value": "0"
+ },
+ "indicator_": {
+ "type": "java.lang.String",
+ "value": "70.132.18.132"
+ },
+ "sourceUrl_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "memoizedIsInitialized": {
+ "type": "byte",
+ "value": "1"
+ },
+ "alwaysUseFieldBuilders": {
+ "type": "boolean",
+ "value": "false"
+ },
+ "attribute_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "type_": {
+ "type": "java.lang.String",
+ "value": "IP"
+ },
+ "intention_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "unknownFields": {
+ "type": "com.google.protobuf.UnknownFieldSet",
+ "fields": {
+ "fields": {
+ "notCapturedReason": "depth",
+ "type": "java.util.TreeMap"
+ }
+ }
+ },
+ "memoizedSize": {
+ "type": "int",
+ "value": "-1"
+ }
+ }
+ },
+ {
+ "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult",
+ "fields": {
+ "category_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "sourceName_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "memoizedHashCode": {
+ "type": "int",
+ "value": "0"
+ },
+ "indicator_": {
+ "type": "java.lang.String",
+ "value": "130.176.185.207"
+ },
+ "sourceUrl_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "memoizedIsInitialized": {
+ "type": "byte",
+ "value": "1"
+ },
+ "alwaysUseFieldBuilders": {
+ "type": "boolean",
+ "value": "false"
+ },
+ "attribute_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "type_": {
+ "type": "java.lang.String",
+ "value": "IP"
+ },
+ "intention_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "unknownFields": {
+ "type": "com.google.protobuf.UnknownFieldSet",
+ "fields": {
+ "fields": {
+ "notCapturedReason": "depth",
+ "type": "java.util.TreeMap"
+ }
+ }
+ },
+ "memoizedSize": {
+ "type": "int",
+ "value": "-1"
+ }
+ }
+ },
+ {
+ "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult",
+ "fields": {
+ "category_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "sourceName_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "memoizedHashCode": {
+ "type": "int",
+ "value": "0"
+ },
+ "indicator_": {
+ "type": "java.lang.String",
+ "value": "15.158.41.133"
+ },
+ "sourceUrl_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "memoizedIsInitialized": {
+ "type": "byte",
+ "value": "1"
+ },
+ "alwaysUseFieldBuilders": {
+ "type": "boolean",
+ "value": "false"
+ },
+ "attribute_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "type_": {
+ "type": "java.lang.String",
+ "value": "IP"
+ },
+ "intention_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "unknownFields": {
+ "type": "com.google.protobuf.UnknownFieldSet",
+ "fields": {
+ "fields": {
+ "notCapturedReason": "depth",
+ "type": "java.util.TreeMap"
+ }
+ }
+ },
+ "memoizedSize": {
+ "type": "int",
+ "value": "-1"
+ }
+ }
+ },
+ {
+ "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult",
+ "fields": {
+ "category_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "sourceName_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "memoizedHashCode": {
+ "type": "int",
+ "value": "0"
+ },
+ "indicator_": {
+ "type": "java.lang.String",
+ "value": "130.176.135.146"
+ },
+ "sourceUrl_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "memoizedIsInitialized": {
+ "type": "byte",
+ "value": "1"
+ },
+ "alwaysUseFieldBuilders": {
+ "type": "boolean",
+ "value": "false"
+ },
+ "attribute_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "type_": {
+ "type": "java.lang.String",
+ "value": "IP"
+ },
+ "intention_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "unknownFields": {
+ "type": "com.google.protobuf.UnknownFieldSet",
+ "fields": {
+ "fields": {
+ "notCapturedReason": "depth",
+ "type": "java.util.TreeMap"
+ }
+ }
+ },
+ "memoizedSize": {
+ "type": "int",
+ "value": "-1"
+ }
+ }
+ },
+ {
+ "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult",
+ "fields": {
+ "category_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "sourceName_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "memoizedHashCode": {
+ "type": "int",
+ "value": "0"
+ },
+ "indicator_": {
+ "type": "java.lang.String",
+ "value": "3.172.1.71"
+ },
+ "sourceUrl_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "memoizedIsInitialized": {
+ "type": "byte",
+ "value": "1"
+ },
+ "alwaysUseFieldBuilders": {
+ "type": "boolean",
+ "value": "false"
+ },
+ "attribute_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "type_": {
+ "type": "java.lang.String",
+ "value": "IP"
+ },
+ "intention_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "unknownFields": {
+ "type": "com.google.protobuf.UnknownFieldSet",
+ "fields": {
+ "fields": {
+ "notCapturedReason": "depth",
+ "type": "java.util.TreeMap"
+ }
+ }
+ },
+ "memoizedSize": {
+ "type": "int",
+ "value": "-1"
+ }
+ }
+ },
+ {
+ "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult",
+ "fields": {
+ "category_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "sourceName_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "memoizedHashCode": {
+ "type": "int",
+ "value": "0"
+ },
+ "indicator_": {
+ "type": "java.lang.String",
+ "value": "130.176.130.132"
+ },
+ "sourceUrl_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "memoizedIsInitialized": {
+ "type": "byte",
+ "value": "1"
+ },
+ "alwaysUseFieldBuilders": {
+ "type": "boolean",
+ "value": "false"
+ },
+ "attribute_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "type_": {
+ "type": "java.lang.String",
+ "value": "IP"
+ },
+ "intention_": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "unknownFields": {
+ "type": "com.google.protobuf.UnknownFieldSet",
+ "fields": {
+ "fields": {
+ "notCapturedReason": "depth",
+ "type": "java.util.TreeMap"
+ }
+ }
+ },
+ "memoizedSize": {
+ "type": "int",
+ "value": "-1"
+ }
+ }
+ }
+ ],
+ "type": "java.util.ArrayList"
+ }
+ }
+ }
+ }
+ },
+ "language": "java",
+ "id": "d7141999-c5bd-4887-b855-66c7a4dbb9a4",
+ "probe": {
+ "location": {
+ "file": "domains/cloud-security-platform/apps/security-monitoring-entity-reducer/src/main/java/com/dd/logs/security_analytics/EntityStatsOutput.java",
+ "method": "toByteString",
+ "lines": [
+ "205"
+ ],
+ "type": "com.dd.logs.security_analytics.EntityStatsOutput"
+ },
+ "id": "13da639f-2b81-475c-9366-5aa227a07302",
+ "version": 1
+ },
+ "timestamp": 1709233217858
+ }
+ },
+ "logger": {
+ "thread_id": 120,
+ "method": "toByteString",
+ "thread_name": "FsmaticDataCluster-fsmatic.workload.default.work-dispatcher-42",
+ "name": "com.dd.logs.security_analytics.EntityStatsOutput",
+ "version": 2
+ }
+}
\ No newline at end of file
diff --git a/pkg/dynamicinstrumentation/ditypes/testdata/snapshot-02.json b/pkg/dynamicinstrumentation/ditypes/testdata/snapshot-02.json
new file mode 100644
index 0000000000000..2f65ebd33f8fb
--- /dev/null
+++ b/pkg/dynamicinstrumentation/ditypes/testdata/snapshot-02.json
@@ -0,0 +1,1645 @@
+{
+ "service": "logs-intake-coordinator",
+ "message": "MetricsClient.parseSuccess response={Cannot find symbol: response} returns {status=ok, resType=time_series, series=..., fromDate=1709233490000, toDate=1709233550000}, ...",
+ "ddsource": "dd_debugger",
+ "duration": 2568054,
+ "ddtags": "git.commit.sha:3698e1f3da2142d6399ef311e80c970e8e89eb02,app:logs-intake-coordinator",
+ "debugger": {
+ "snapshot": {
+ "stack": [
+ {
+ "fileName": "MetricsClient.java",
+ "function": "com.dd.logs.metricsclient.MetricsClient.parseSuccess",
+ "lineNumber": 16
+ },
+ {
+ "fileName": "AHttpServiceCall.java",
+ "function": "com.fsmatic.http.AHttpServiceCall.parseResponse",
+ "lineNumber": 203
+ },
+ {
+ "fileName": "AHttpServiceCall.java",
+ "function": "com.fsmatic.http.AHttpServiceCall$HttpCall.lambda$execute$1",
+ "lineNumber": 389
+ },
+ {
+ "function": "com.fsmatic.http.AHttpServiceCall$HttpCall$$Lambda/0x00007f8843a54460.apply",
+ "lineNumber": -1
+ },
+ {
+ "fileName": "CompletableFuture.java",
+ "function": "java.util.concurrent.CompletableFuture$UniApply.tryFire",
+ "lineNumber": 646
+ },
+ {
+ "fileName": "CompletableFuture.java",
+ "function": "java.util.concurrent.CompletableFuture.postComplete",
+ "lineNumber": 510
+ },
+ {
+ "fileName": "CompletableFuture.java",
+ "function": "java.util.concurrent.CompletableFuture.complete",
+ "lineNumber": 2179
+ },
+ {
+ "fileName": "CompletableCallback.java",
+ "function": "com.fsmatic.http.CompletableCallback.onResponse",
+ "lineNumber": 55
+ },
+ {
+ "fileName": "RealCall.java",
+ "function": "okhttp3.RealCall$AsyncCall.execute",
+ "lineNumber": 174
+ },
+ {
+ "fileName": "NamedRunnable.java",
+ "function": "okhttp3.internal.NamedRunnable.run",
+ "lineNumber": 32
+ },
+ {
+ "fileName": "ThreadPoolExecutor.java",
+ "function": "java.util.concurrent.ThreadPoolExecutor.runWorker",
+ "lineNumber": 1144
+ },
+ {
+ "fileName": "ThreadPoolExecutor.java",
+ "function": "java.util.concurrent.ThreadPoolExecutor$Worker.run",
+ "lineNumber": 642
+ },
+ {
+ "fileName": "Thread.java",
+ "function": "java.lang.Thread.runWith",
+ "lineNumber": 1596
+ },
+ {
+ "fileName": "Thread.java",
+ "function": "java.lang.Thread.run",
+ "lineNumber": 1583
+ }
+ ],
+ "captures": {
+ "return": {
+ "arguments": {
+ "p0": {
+ "type": "okhttp3.Response",
+ "fields": {
+ "request": {
+ "type": "okhttp3.Request",
+ "fields": {
+ "headers": {
+ "type": "okhttp3.Headers",
+ "fields": {
+ "namesAndValues": {
+ "notCapturedReason": "depth",
+ "type": "java.lang.String[]"
+ }
+ }
+ },
+ "method": {
+ "type": "java.lang.String",
+ "value": "GET"
+ },
+ "body": {
+ "isNull": true,
+ "type": "okhttp3.RequestBody"
+ },
+ "url": {
+ "type": "okhttp3.HttpUrl",
+ "fields": {
+ "password": {
+ "notCapturedReason": "redactedIdent",
+ "type": "java.lang.String"
+ },
+ "fragment": {
+ "isNull": true,
+ "type": "java.lang.String"
+ },
+ "scheme": {
+ "type": "java.lang.String",
+ "value": "https"
+ },
+ "$$DD$source": {
+ "isNull": true,
+ "type": "datadog.trace.api.iast.Taintable$Source"
+ },
+ "port": {
+ "type": "int",
+ "value": "443"
+ },
+ "queryNamesAndValues": {
+ "notCapturedReason": "depth",
+ "type": "java.util.Collections$UnmodifiableRandomAccessList"
+ },
+ "host": {
+ "type": "java.lang.String",
+ "value": "api.datad0g.com"
+ },
+ "pathSegments": {
+ "notCapturedReason": "depth",
+ "type": "java.util.Collections$UnmodifiableRandomAccessList"
+ },
+ "url": {
+ "size": "663",
+ "truncated": true,
+ "type": "java.lang.String",
+ "value": "https://api.datad0g.com/api/v1/query?query=timeshift%28%28max%3Akubernetes_state.statefulset.replicas_desired%7Bdatacenter%3Aus1.staging.dog%2Cdds%3Aevent-platform%2Cservice%3Alogs-intake-backend%7D%20by%20%7Bkube_stateful_set%7D%20-%20max%3Akubernetes_st"
+ },
+ "username": {
+ "type": "java.lang.String",
+ "value": ""
+ }
+ }
+ },
+ "cacheControl": {
+ "isNull": true,
+ "type": "okhttp3.CacheControl"
+ },
+ "tags": {
+ "size": "0",
+ "type": "java.util.Collections$EmptyMap"
+ }
+ }
+ },
+ "handshake": {
+ "type": "okhttp3.Handshake",
+ "fields": {
+ "localCertificates": {
+ "size": "0",
+ "type": "java.util.Collections$EmptyList"
+ },
+ "peerCertificates": {
+ "size": "2",
+ "elements": [
+ {
+ "notCapturedReason": "depth",
+ "type": "sun.security.x509.X509CertImpl"
+ },
+ {
+ "notCapturedReason": "depth",
+ "type": "sun.security.x509.X509CertImpl"
+ }
+ ],
+ "type": "java.util.Collections$UnmodifiableRandomAccessList"
+ },
+ "tlsVersion": {
+ "type": "okhttp3.TlsVersion",
+ "value": "TLS_1_2"
+ },
+ "cipherSuite": {
+ "type": "okhttp3.CipherSuite",
+ "fields": {
+ "javaName": {
+ "type": "java.lang.String",
+ "value": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"
+ }
+ }
+ }
+ }
+ },
+ "headers": {
+ "type": "okhttp3.Headers",
+ "fields": {
+ "namesAndValues": {
+ "size": "26",
+ "elements": [
+ {
+ "type": "java.lang.String",
+ "value": "date"
+ },
+ {
+ "type": "java.lang.String",
+ "value": "Thu, 29 Feb 2024 19:05:52 GMT"
+ },
+ {
+ "type": "java.lang.String",
+ "value": "content-type"
+ },
+ {
+ "type": "java.lang.String",
+ "value": "application/json"
+ },
+ {
+ "type": "java.lang.String",
+ "value": "x-frame-options"
+ },
+ {
+ "type": "java.lang.String",
+ "value": "SAMEORIGIN"
+ },
+ {
+ "type": "java.lang.String",
+ "value": "content-security-policy"
+ },
+ {
+ "type": "java.lang.String",
+ "value": "frame-ancestors 'self'; report-uri https://logs.browser-intake-datadoghq.com/api/v2/logs?dd-api-key=pub293163a918901030b79492fe1ab424cf&dd-evp-origin=content-security-policy&ddsource=csp-report&ddtags=site%3Adatad0g.com"
+ },
+ {
+ "type": "java.lang.String",
+ "value": "vary"
+ },
+ {
+ "type": "java.lang.String",
+ "value": "Accept-Encoding"
+ },
+ {
+ "type": "java.lang.String",
+ "value": "x-ratelimit-limit"
+ },
+ {
+ "type": "java.lang.String",
+ "value": "100"
+ },
+ {
+ "type": "java.lang.String",
+ "value": "x-ratelimit-period"
+ },
+ {
+ "type": "java.lang.String",
+ "value": "10"
+ },
+ {
+ "type": "java.lang.String",
+ "value": "x-ratelimit-remaining"
+ },
+ {
+ "type": "java.lang.String",
+ "value": "96"
+ },
+ {
+ "type": "java.lang.String",
+ "value": "x-ratelimit-reset"
+ },
+ {
+ "type": "java.lang.String",
+ "value": "10"
+ },
+ {
+ "type": "java.lang.String",
+ "value": "x-ratelimit-name"
+ },
+ {
+ "type": "java.lang.String",
+ "value": "batch_query"
+ },
+ {
+ "type": "java.lang.String",
+ "value": "x-content-type-options"
+ },
+ {
+ "type": "java.lang.String",
+ "value": "nosniff"
+ },
+ {
+ "type": "java.lang.String",
+ "value": "strict-transport-security"
+ },
+ {
+ "type": "java.lang.String",
+ "value": "max-age=31536000; includeSubDomains; preload"
+ },
+ {
+ "type": "java.lang.String",
+ "value": "transfer-encoding"
+ },
+ {
+ "type": "java.lang.String",
+ "value": "chunked"
+ }
+ ],
+ "type": "java.lang.String[]"
+ }
+ }
+ },
+ "code": {
+ "type": "int",
+ "value": "200"
+ },
+ "sentRequestAtMillis": {
+ "type": "long",
+ "value": "1709233550915"
+ },
+ "networkResponse": {
+ "type": "okhttp3.Response",
+ "fields": {
+ "request": {
+ "type": "okhttp3.Request",
+ "fields": {
+ "headers": {
+ "notCapturedReason": "depth",
+ "type": "okhttp3.Headers"
+ },
+ "method": {
+ "type": "java.lang.String",
+ "value": "GET"
+ },
+ "body": {
+ "isNull": true,
+ "type": "okhttp3.RequestBody"
+ },
+ "url": {
+ "notCapturedReason": "depth",
+ "type": "okhttp3.HttpUrl"
+ },
+ "cacheControl": {
+ "isNull": true,
+ "type": "okhttp3.CacheControl"
+ },
+ "tags": {
+ "notCapturedReason": "depth",
+ "type": "java.util.Collections$EmptyMap"
+ }
+ }
+ },
+ "handshake": {
+ "type": "okhttp3.Handshake",
+ "fields": {
+ "localCertificates": {
+ "notCapturedReason": "depth",
+ "type": "java.util.Collections$EmptyList"
+ },
+ "peerCertificates": {
+ "notCapturedReason": "depth",
+ "type": "java.util.Collections$UnmodifiableRandomAccessList"
+ },
+ "tlsVersion": {
+ "type": "okhttp3.TlsVersion",
+ "value": "TLS_1_2"
+ },
+ "cipherSuite": {
+ "notCapturedReason": "depth",
+ "type": "okhttp3.CipherSuite"
+ }
+ }
+ },
+ "headers": {
+ "type": "okhttp3.Headers",
+ "fields": {
+ "namesAndValues": {
+ "notCapturedReason": "depth",
+ "type": "java.lang.String[]"
+ }
+ }
+ },
+ "code": {
+ "type": "int",
+ "value": "200"
+ },
+ "sentRequestAtMillis": {
+ "type": "long",
+ "value": "1709233550915"
+ },
+ "networkResponse": {
+ "isNull": true,
+ "type": "okhttp3.Response"
+ },
+ "message": {
+ "type": "java.lang.String",
+ "value": "OK"
+ },
+ "body": {
+ "isNull": true,
+ "type": "okhttp3.ResponseBody"
+ },
+ "cacheControl": {
+ "isNull": true,
+ "type": "okhttp3.CacheControl"
+ },
+ "cacheResponse": {
+ "isNull": true,
+ "type": "okhttp3.Response"
+ },
+ "protocol": {
+ "type": "okhttp3.Protocol",
+ "value": "HTTP_1_1"
+ },
+ "priorResponse": {
+ "isNull": true,
+ "type": "okhttp3.Response"
+ },
+ "receivedResponseAtMillis": {
+ "type": "long",
+ "value": "1709233552199"
+ },
+ "exchange": {
+ "type": "okhttp3.internal.connection.Exchange",
+ "fields": {
+ "call": {
+ "notCapturedReason": "depth",
+ "type": "okhttp3.RealCall"
+ },
+ "codec": {
+ "notCapturedReason": "depth",
+ "type": "okhttp3.internal.http1.Http1ExchangeCodec"
+ },
+ "eventListener": {
+ "notCapturedReason": "depth",
+ "type": "com.fsmatic.http.OkHttpEventLogger"
+ },
+ "duplex": {
+ "type": "boolean",
+ "value": "false"
+ },
+ "transmitter": {
+ "notCapturedReason": "depth",
+ "type": "okhttp3.internal.connection.Transmitter"
+ },
+ "finder": {
+ "notCapturedReason": "depth",
+ "type": "okhttp3.internal.connection.ExchangeFinder"
+ }
+ }
+ }
+ }
+ },
+ "message": {
+ "type": "java.lang.String",
+ "value": "OK"
+ },
+ "body": {
+ "type": "okhttp3.internal.http.RealResponseBody",
+ "fields": {
+ "reader": {
+ "type": "okhttp3.ResponseBody$BomAwareReader",
+ "fields": {
+ "delegate": {
+ "notCapturedReason": "depth",
+ "type": "java.io.InputStreamReader"
+ },
+ "charset": {
+ "notCapturedReason": "depth",
+ "type": "sun.nio.cs.UTF_8"
+ },
+ "skipBuffer": {
+ "notCapturedReason": "java.lang.reflect.InaccessibleObjectException: Unable to make field private char[] java.io.Reader.skipBuffer accessible: module java.base does not \"opens java.io\" to unnamed module @dc24521",
+ "type": "char[]"
+ },
+ "closed": {
+ "type": "boolean",
+ "value": "true"
+ },
+ "lock": {
+ "notCapturedReason": "java.lang.reflect.InaccessibleObjectException: Unable to make field protected java.lang.Object java.io.Reader.lock accessible: module java.base does not \"opens java.io\" to unnamed module @dc24521",
+ "type": "java.lang.Object"
+ },
+ "source": {
+ "notCapturedReason": "depth",
+ "type": "okio.RealBufferedSource"
+ }
+ }
+ },
+ "contentTypeString": {
+ "type": "java.lang.String",
+ "value": "application/json"
+ },
+ "contentLength": {
+ "type": "long",
+ "value": "-1"
+ },
+ "source": {
+ "type": "okio.RealBufferedSource",
+ "fields": {
+ "closed": {
+ "type": "boolean",
+ "value": "true"
+ },
+ "buffer": {
+ "notCapturedReason": "depth",
+ "type": "okio.Buffer"
+ },
+ "source": {
+ "notCapturedReason": "depth",
+ "type": "com.fsmatic.http.OkHttpMonitoringInterceptor$LengthTrackingSource"
+ }
+ }
+ }
+ }
+ },
+ "cacheControl": {
+ "isNull": true,
+ "type": "okhttp3.CacheControl"
+ },
+ "cacheResponse": {
+ "isNull": true,
+ "type": "okhttp3.Response"
+ },
+ "protocol": {
+ "type": "okhttp3.Protocol",
+ "value": "HTTP_1_1"
+ },
+ "priorResponse": {
+ "isNull": true,
+ "type": "okhttp3.Response"
+ },
+ "receivedResponseAtMillis": {
+ "type": "long",
+ "value": "1709233552199"
+ },
+ "exchange": {
+ "type": "okhttp3.internal.connection.Exchange",
+ "fields": {
+ "call": {
+ "type": "okhttp3.RealCall",
+ "fields": {
+ "originalRequest": {
+ "notCapturedReason": "depth",
+ "type": "okhttp3.Request"
+ },
+ "forWebSocket": {
+ "type": "boolean",
+ "value": "false"
+ },
+ "client": {
+ "notCapturedReason": "depth",
+ "type": "okhttp3.OkHttpClient"
+ },
+ "executed": {
+ "type": "boolean",
+ "value": "true"
+ },
+ "transmitter": {
+ "notCapturedReason": "depth",
+ "type": "okhttp3.internal.connection.Transmitter"
+ }
+ }
+ },
+ "codec": {
+ "type": "okhttp3.internal.http1.Http1ExchangeCodec",
+ "fields": {
+ "sink": {
+ "notCapturedReason": "depth",
+ "type": "okio.RealBufferedSink"
+ },
+ "client": {
+ "notCapturedReason": "depth",
+ "type": "okhttp3.OkHttpClient"
+ },
+ "realConnection": {
+ "notCapturedReason": "depth",
+ "type": "okhttp3.internal.connection.RealConnection"
+ },
+ "headerLimit": {
+ "type": "long",
+ "value": "261503"
+ },
+ "source": {
+ "notCapturedReason": "depth",
+ "type": "okio.RealBufferedSource"
+ },
+ "state": {
+ "type": "int",
+ "value": "6"
+ },
+ "trailers": {
+ "notCapturedReason": "depth",
+ "type": "okhttp3.Headers"
+ }
+ }
+ },
+ "eventListener": {
+ "type": "com.fsmatic.http.OkHttpEventLogger",
+ "fields": {
+ "loggers": {
+ "notCapturedReason": "depth",
+ "type": "java.util.concurrent.ConcurrentHashMap"
+ },
+ "hostRegexps": {
+ "notCapturedReason": "depth",
+ "type": "java.util.ArrayList"
+ }
+ }
+ },
+ "duplex": {
+ "type": "boolean",
+ "value": "false"
+ },
+ "transmitter": {
+ "type": "okhttp3.internal.connection.Transmitter",
+ "fields": {
+ "request": {
+ "notCapturedReason": "depth",
+ "type": "okhttp3.Request"
+ },
+ "noMoreExchanges": {
+ "type": "boolean",
+ "value": "true"
+ },
+ "connectionPool": {
+ "notCapturedReason": "depth",
+ "type": "okhttp3.internal.connection.RealConnectionPool"
+ },
+ "callStackTrace": {
+ "isNull": true,
+ "type": "java.lang.Object"
+ },
+ "timeoutEarlyExit": {
+ "type": "boolean",
+ "value": "false"
+ },
+ "exchangeResponseDone": {
+ "type": "boolean",
+ "value": "true"
+ },
+ "timeout": {
+ "notCapturedReason": "depth",
+ "type": "okhttp3.internal.connection.Transmitter$1"
+ },
+ "call": {
+ "notCapturedReason": "depth",
+ "type": "okhttp3.RealCall"
+ },
+ "canceled": {
+ "type": "boolean",
+ "value": "false"
+ },
+ "exchangeRequestDone": {
+ "type": "boolean",
+ "value": "true"
+ },
+ "exchangeFinder": {
+ "notCapturedReason": "depth",
+ "type": "okhttp3.internal.connection.ExchangeFinder"
+ },
+ "eventListener": {
+ "notCapturedReason": "depth",
+ "type": "com.fsmatic.http.OkHttpEventLogger"
+ },
+ "client": {
+ "notCapturedReason": "depth",
+ "type": "okhttp3.OkHttpClient"
+ },
+ "connection": {
+ "isNull": true,
+ "type": "okhttp3.internal.connection.RealConnection"
+ },
+ "exchange": {
+ "isNull": true,
+ "type": "okhttp3.internal.connection.Exchange"
+ }
+ }
+ },
+ "finder": {
+ "type": "okhttp3.internal.connection.ExchangeFinder",
+ "fields": {
+ "call": {
+ "notCapturedReason": "depth",
+ "type": "okhttp3.RealCall"
+ },
+ "address": {
+ "notCapturedReason": "depth",
+ "type": "okhttp3.Address"
+ },
+ "connectingConnection": {
+ "isNull": true,
+ "type": "okhttp3.internal.connection.RealConnection"
+ },
+ "routeSelector": {
+ "notCapturedReason": "depth",
+ "type": "okhttp3.internal.connection.RouteSelector"
+ },
+ "hasStreamFailure": {
+ "type": "boolean",
+ "value": "false"
+ },
+ "eventListener": {
+ "notCapturedReason": "depth",
+ "type": "com.fsmatic.http.OkHttpEventLogger"
+ },
+ "connectionPool": {
+ "notCapturedReason": "depth",
+ "type": "okhttp3.internal.connection.RealConnectionPool"
+ },
+ "routeSelection": {
+ "isNull": true,
+ "type": "okhttp3.internal.connection.RouteSelector$Selection"
+ },
+ "nextRouteToTry": {
+ "isNull": true,
+ "type": "okhttp3.Route"
+ },
+ "transmitter": {
+ "notCapturedReason": "depth",
+ "type": "okhttp3.internal.connection.Transmitter"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "p1": {
+ "type": "com.dd.logs.metricsclient.MetricsClientActions$QueryMetric",
+ "fields": {
+ "query": {
+ "size": "492",
+ "truncated": true,
+ "type": "java.lang.String",
+ "value": "timeshift((max:kubernetes_state.statefulset.replicas_desired{datacenter:us1.staging.dog,dds:event-platform,service:logs-intake-backend} by {kube_stateful_set} - max:kubernetes_state.statefulset.replicas_desired{datacenter:us1.staging.dog,dds:event-platfor"
+ },
+ "notCritical": {
+ "type": "boolean",
+ "value": "false"
+ },
+ "from": {
+ "type": "java.time.Instant",
+ "value": "2024-02-29T19:04:50.915679003Z"
+ },
+ "to": {
+ "type": "java.time.Instant",
+ "value": "2024-02-29T19:05:50.915679003Z"
+ }
+ }
+ },
+ "this": {
+ "type": "com.dd.logs.metricsclient.MetricsClient",
+ "fields": {
+ "shouldTrace": {
+ "type": "boolean",
+ "value": "true"
+ },
+ "apiKey": {
+ "notCapturedReason": "redactedIdent",
+ "type": "java.lang.String"
+ },
+ "throttledLogger": {
+ "type": "com.dd.logging.ThrottledLogger",
+ "fields": {
+ "logger": {
+ "type": "com.dd.logging.BasicLogger",
+ "fields": {
+ "metas": {
+ "isNull": true,
+ "type": "java.util.Map"
+ },
+ "logger": {
+ "notCapturedReason": "depth",
+ "type": "ch.qos.logback.classic.Logger"
+ },
+ "name": {
+ "type": "java.lang.String",
+ "value": "com.fsmatic.http.AHttpServiceCall"
+ }
+ }
+ },
+ "throttler": {
+ "type": "com.dd.logging.throttler.ByPassThrottler",
+ "fields": {
+ "current": {
+ "notCapturedReason": "depth",
+ "type": "java.util.concurrent.atomic.AtomicReference"
+ },
+ "rate": {
+ "notCapturedReason": "depth",
+ "type": "com.dd.logging.throttler.ByPassThrottler$Rate"
+ },
+ "clock": {
+ "notCapturedReason": "depth",
+ "type": "java.time.Clock$SystemClock"
+ }
+ }
+ }
+ }
+ },
+ "executor": {
+ "type": "com.fsmatic.rpc.RPCCallExecutor",
+ "fields": {
+ "policy": {
+ "type": "com.fsmatic.rpc.RPCCallExecutor$Policy$NoOp"
+ }
+ }
+ },
+ "applicationKey": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "wrapper": {
+ "type": "com.fsmatic.http.HttpWrapper",
+ "fields": {
+ "shouldTrace": {
+ "type": "boolean",
+ "value": "true"
+ },
+ "clientInstrumentation": {
+ "type": "com.fsmatic.http.OkHttpClientInstrumentation",
+ "fields": {
+ "dispatcherInstrumentation": {
+ "notCapturedReason": "depth",
+ "type": "com.fsmatic.http.OkHttpDispatcherInstrumentation"
+ }
+ }
+ },
+ "client": {
+ "notCapturedReason": "fieldCount",
+ "type": "okhttp3.OkHttpClient",
+ "fields": {
+ "cache": {
+ "isNull": true,
+ "type": "okhttp3.Cache"
+ },
+ "socketFactory": {
+ "notCapturedReason": "depth",
+ "type": "javax.net.DefaultSocketFactory"
+ },
+ "internalCache": {
+ "isNull": true,
+ "type": "okhttp3.internal.cache.InternalCache"
+ },
+ "hostnameVerifier": {
+ "notCapturedReason": "depth",
+ "type": "okhttp3.internal.tls.OkHostnameVerifier"
+ },
+ "dns": {
+ "notCapturedReason": "depth",
+ "type": "okhttp3.Dns$$Lambda/0x00007f8843706ac8"
+ },
+ "connectionPool": {
+ "notCapturedReason": "depth",
+ "type": "okhttp3.ConnectionPool"
+ },
+ "certificateChainCleaner": {
+ "notCapturedReason": "depth",
+ "type": "okhttp3.internal.tls.BasicCertificateChainCleaner"
+ },
+ "certificatePinner": {
+ "notCapturedReason": "depth",
+ "type": "okhttp3.CertificatePinner"
+ },
+ "cookieJar": {
+ "notCapturedReason": "depth",
+ "type": "okhttp3.CookieJar$1"
+ },
+ "connectionSpecs": {
+ "notCapturedReason": "depth",
+ "type": "java.util.Collections$UnmodifiableRandomAccessList"
+ },
+ "networkInterceptors": {
+ "notCapturedReason": "depth",
+ "type": "java.util.Collections$UnmodifiableRandomAccessList"
+ },
+ "proxySelector": {
+ "notCapturedReason": "depth",
+ "type": "sun.net.spi.DefaultProxySelector"
+ },
+ "proxy": {
+ "isNull": true,
+ "type": "java.net.Proxy"
+ },
+ "sslSocketFactory": {
+ "notCapturedReason": "depth",
+ "type": "sun.security.ssl.SSLSocketFactoryImpl"
+ },
+ "eventListenerFactory": {
+ "notCapturedReason": "depth",
+ "type": "okhttp3.EventListener$$Lambda/0x00007f88437051a8"
+ },
+ "proxyAuthenticator": {
+ "notCapturedReason": "depth",
+ "type": "okhttp3.Authenticator$$Lambda/0x00007f88437066a8"
+ },
+ "protocols": {
+ "notCapturedReason": "depth",
+ "type": "java.util.Collections$UnmodifiableRandomAccessList"
+ },
+ "dispatcher": {
+ "notCapturedReason": "depth",
+ "type": "okhttp3.Dispatcher"
+ },
+ "authenticator": {
+ "notCapturedReason": "depth",
+ "type": "okhttp3.Authenticator$$Lambda/0x00007f88437066a8"
+ },
+ "interceptors": {
+ "notCapturedReason": "depth",
+ "type": "java.util.Collections$UnmodifiableRandomAccessList"
+ }
+ }
+ },
+ "mapper": {
+ "type": "com.fasterxml.jackson.databind.ObjectMapper",
+ "fields": {
+ "_serializerFactory": {
+ "notCapturedReason": "depth",
+ "type": "com.fasterxml.jackson.databind.ser.BeanSerializerFactory"
+ },
+ "_deserializationContext": {
+ "notCapturedReason": "depth",
+ "type": "com.fasterxml.jackson.databind.deser.DefaultDeserializationContext$Impl"
+ },
+ "_deserializationConfig": {
+ "notCapturedReason": "depth",
+ "type": "com.fasterxml.jackson.databind.DeserializationConfig"
+ },
+ "_injectableValues": {
+ "isNull": true,
+ "type": "com.fasterxml.jackson.databind.InjectableValues"
+ },
+ "_registeredModuleTypes": {
+ "notCapturedReason": "depth",
+ "type": "java.util.LinkedHashSet"
+ },
+ "_jsonFactory": {
+ "notCapturedReason": "depth",
+ "type": "com.fasterxml.jackson.databind.MappingJsonFactory"
+ },
+ "_coercionConfigs": {
+ "notCapturedReason": "depth",
+ "type": "com.fasterxml.jackson.databind.cfg.CoercionConfigs"
+ },
+ "_subtypeResolver": {
+ "notCapturedReason": "depth",
+ "type": "com.fasterxml.jackson.databind.jsontype.impl.StdSubtypeResolver"
+ },
+ "_configOverrides": {
+ "notCapturedReason": "depth",
+ "type": "com.fasterxml.jackson.databind.cfg.ConfigOverrides"
+ },
+ "_serializerProvider": {
+ "notCapturedReason": "depth",
+ "type": "com.fasterxml.jackson.databind.ser.DefaultSerializerProvider$Impl"
+ },
+ "_serializationConfig": {
+ "notCapturedReason": "depth",
+ "type": "com.fasterxml.jackson.databind.SerializationConfig"
+ },
+ "_mixIns": {
+ "notCapturedReason": "depth",
+ "type": "com.fasterxml.jackson.databind.introspect.SimpleMixInResolver"
+ },
+ "_typeFactory": {
+ "notCapturedReason": "depth",
+ "type": "com.fasterxml.jackson.databind.type.TypeFactory"
+ },
+ "_rootDeserializers": {
+ "notCapturedReason": "depth",
+ "type": "java.util.concurrent.ConcurrentHashMap"
+ }
+ }
+ },
+ "metrics": {
+ "type": "com.dd.metrics.WeakRefMetricsCache",
+ "fields": {
+ "cache": {
+ "notCapturedReason": "depth",
+ "type": "com.dd.metrics.WeakRefDoubleCache"
+ },
+ "rootRegistry": {
+ "notCapturedReason": "depth",
+ "type": "com.dd.metrics.RootMetricRegistry"
+ }
+ }
+ },
+ "internalHttpPort": {
+ "type": "int",
+ "value": "9091"
+ }
+ }
+ },
+ "metrics": {
+ "type": "com.dd.metrics.WeakRefMetricsCache",
+ "fields": {
+ "cache": {
+ "type": "com.dd.metrics.WeakRefDoubleCache",
+ "fields": {
+ "layer1": {
+ "notCapturedReason": "depth",
+ "type": "java.util.concurrent.ConcurrentHashMap"
+ }
+ }
+ },
+ "rootRegistry": {
+ "type": "com.dd.metrics.RootMetricRegistry",
+ "fields": {
+ "metrics": {
+ "notCapturedReason": "depth",
+ "type": "java.util.concurrent.ConcurrentHashMap"
+ }
+ }
+ }
+ }
+ },
+ "uri": {
+ "type": "okhttp3.HttpUrl",
+ "fields": {
+ "password": {
+ "notCapturedReason": "redactedIdent",
+ "type": "java.lang.String"
+ },
+ "fragment": {
+ "isNull": true,
+ "type": "java.lang.String"
+ },
+ "scheme": {
+ "type": "java.lang.String",
+ "value": "https"
+ },
+ "$$DD$source": {
+ "isNull": true,
+ "type": "datadog.trace.api.iast.Taintable$Source"
+ },
+ "port": {
+ "type": "int",
+ "value": "443"
+ },
+ "queryNamesAndValues": {
+ "isNull": true,
+ "type": "java.util.List"
+ },
+ "host": {
+ "type": "java.lang.String",
+ "value": "api.datad0g.com"
+ },
+ "pathSegments": {
+ "size": "3",
+ "elements": [
+ {
+ "type": "java.lang.String",
+ "value": "api"
+ },
+ {
+ "type": "java.lang.String",
+ "value": "v1"
+ },
+ {
+ "type": "java.lang.String",
+ "value": "query"
+ }
+ ],
+ "type": "java.util.Collections$UnmodifiableRandomAccessList"
+ },
+ "url": {
+ "type": "java.lang.String",
+ "value": "https://api.datad0g.com/api/v1/query"
+ },
+ "username": {
+ "type": "java.lang.String",
+ "value": ""
+ }
+ }
+ },
+ "timeout": {
+ "type": "long",
+ "value": "60000"
+ },
+ "tags": {
+ "type": "com.dd.metrics.Tags",
+ "fields": {
+ "hashIsZero": {
+ "type": "boolean",
+ "value": "false"
+ },
+ "hash": {
+ "type": "int",
+ "value": "0"
+ },
+ "tags": {
+ "size": "1",
+ "elements": [
+ {
+ "type": "java.lang.String",
+ "value": "action_name:metricsclient"
+ }
+ ],
+ "type": "java.util.ArrayList"
+ }
+ }
+ }
+ }
+ }
+ },
+ "locals": {
+ "@return": {
+ "type": "com.dd.logs.metricsclient.ImmutableQueryResponse",
+ "fields": {
+ "fromDate": {
+ "type": "long",
+ "value": "1709233490000"
+ },
+ "resType": {
+ "type": "java.lang.String",
+ "value": "time_series"
+ },
+ "series": {
+ "size": "9",
+ "elements": [
+ {
+ "type": "com.dd.logs.metricsclient.ImmutableSeries",
+ "fields": {
+ "unit": {
+ "notCapturedReason": "depth",
+ "type": "java.util.Collections$EmptyList"
+ },
+ "expression": {
+ "size": "577",
+ "truncated": true,
+ "type": "java.lang.String",
+ "value": "timeshift(((max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-intake-backend-internal-all,datacenter:us1.staging.dog,dds:event-platform,service:logs-intake-backend} - max:kubernetes_state.statefulset.replicas_desired{kube_stateful_s"
+ },
+ "metric": {
+ "type": "java.lang.String",
+ "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)"
+ },
+ "pointlist": {
+ "notCapturedReason": "depth",
+ "type": "java.util.Collections$UnmodifiableRandomAccessList"
+ },
+ "displayName": {
+ "type": "java.lang.String",
+ "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)"
+ },
+ "scope": {
+ "type": "java.lang.String",
+ "value": "datacenter:us1.staging.dog,dds:event-platform,kube_cluster_name:oddish-b,kube_stateful_set:logs-intake-backend-internal-all,service:logs-intake-backend"
+ },
+ "start": {
+ "type": "long",
+ "value": "1709233490000"
+ },
+ "length": {
+ "type": "long",
+ "value": "16"
+ },
+ "end": {
+ "type": "long",
+ "value": "1709233542000"
+ },
+ "interval": {
+ "type": "long",
+ "value": "1"
+ },
+ "aggr": {
+ "type": "java.lang.String",
+ "value": "max"
+ }
+ }
+ },
+ {
+ "type": "com.dd.logs.metricsclient.ImmutableSeries",
+ "fields": {
+ "unit": {
+ "notCapturedReason": "depth",
+ "type": "java.util.Collections$EmptyList"
+ },
+ "expression": {
+ "size": "556",
+ "truncated": true,
+ "type": "java.lang.String",
+ "value": "timeshift(((max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-intake-backend-spans,datacenter:us1.staging.dog,dds:event-platform,service:logs-intake-backend} - max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs"
+ },
+ "metric": {
+ "type": "java.lang.String",
+ "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)"
+ },
+ "pointlist": {
+ "notCapturedReason": "depth",
+ "type": "java.util.Collections$UnmodifiableRandomAccessList"
+ },
+ "displayName": {
+ "type": "java.lang.String",
+ "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)"
+ },
+ "scope": {
+ "type": "java.lang.String",
+ "value": "datacenter:us1.staging.dog,dds:event-platform,kube_cluster_name:oddish-b,kube_stateful_set:logs-intake-backend-spans,service:logs-intake-backend"
+ },
+ "start": {
+ "type": "long",
+ "value": "1709233490000"
+ },
+ "length": {
+ "type": "long",
+ "value": "16"
+ },
+ "end": {
+ "type": "long",
+ "value": "1709233542000"
+ },
+ "interval": {
+ "type": "long",
+ "value": "1"
+ },
+ "aggr": {
+ "type": "java.lang.String",
+ "value": "max"
+ }
+ }
+ },
+ {
+ "type": "com.dd.logs.metricsclient.ImmutableSeries",
+ "fields": {
+ "unit": {
+ "notCapturedReason": "depth",
+ "type": "java.util.Collections$EmptyList"
+ },
+ "expression": {
+ "size": "550",
+ "truncated": true,
+ "type": "java.lang.String",
+ "value": "timeshift(((max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-intake-backend-tcp,datacenter:us1.staging.dog,dds:event-platform,service:logs-intake-backend} - max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-i"
+ },
+ "metric": {
+ "type": "java.lang.String",
+ "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)"
+ },
+ "pointlist": {
+ "notCapturedReason": "depth",
+ "type": "java.util.Collections$UnmodifiableRandomAccessList"
+ },
+ "displayName": {
+ "type": "java.lang.String",
+ "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)"
+ },
+ "scope": {
+ "type": "java.lang.String",
+ "value": "datacenter:us1.staging.dog,dds:event-platform,kube_cluster_name:oddish-b,kube_stateful_set:logs-intake-backend-tcp,service:logs-intake-backend"
+ },
+ "start": {
+ "type": "long",
+ "value": "1709233490000"
+ },
+ "length": {
+ "type": "long",
+ "value": "16"
+ },
+ "end": {
+ "type": "long",
+ "value": "1709233542000"
+ },
+ "interval": {
+ "type": "long",
+ "value": "1"
+ },
+ "aggr": {
+ "type": "java.lang.String",
+ "value": "max"
+ }
+ }
+ },
+ {
+ "type": "com.dd.logs.metricsclient.ImmutableSeries",
+ "fields": {
+ "unit": {
+ "notCapturedReason": "depth",
+ "type": "java.util.Collections$EmptyList"
+ },
+ "expression": {
+ "size": "562",
+ "truncated": true,
+ "type": "java.lang.String",
+ "value": "timeshift(((max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-intake-backend-testing,datacenter:us1.staging.dog,dds:event-platform,service:logs-intake-backend} - max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:lo"
+ },
+ "metric": {
+ "type": "java.lang.String",
+ "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)"
+ },
+ "pointlist": {
+ "notCapturedReason": "depth",
+ "type": "java.util.Collections$UnmodifiableRandomAccessList"
+ },
+ "displayName": {
+ "type": "java.lang.String",
+ "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)"
+ },
+ "scope": {
+ "type": "java.lang.String",
+ "value": "datacenter:us1.staging.dog,dds:event-platform,kube_cluster_name:oddish-b,kube_stateful_set:logs-intake-backend-testing,service:logs-intake-backend"
+ },
+ "start": {
+ "type": "long",
+ "value": "1709233490000"
+ },
+ "length": {
+ "type": "long",
+ "value": "16"
+ },
+ "end": {
+ "type": "long",
+ "value": "1709233542000"
+ },
+ "interval": {
+ "type": "long",
+ "value": "1"
+ },
+ "aggr": {
+ "type": "java.lang.String",
+ "value": "max"
+ }
+ }
+ },
+ {
+ "type": "com.dd.logs.metricsclient.ImmutableSeries",
+ "fields": {
+ "unit": {
+ "notCapturedReason": "depth",
+ "type": "java.util.Collections$EmptyList"
+ },
+ "expression": {
+ "size": "559",
+ "truncated": true,
+ "type": "java.lang.String",
+ "value": "timeshift(((max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-intake-backend-upload,datacenter:us1.staging.dog,dds:event-platform,service:logs-intake-backend} - max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:log"
+ },
+ "metric": {
+ "type": "java.lang.String",
+ "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)"
+ },
+ "pointlist": {
+ "notCapturedReason": "depth",
+ "type": "java.util.Collections$UnmodifiableRandomAccessList"
+ },
+ "displayName": {
+ "type": "java.lang.String",
+ "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)"
+ },
+ "scope": {
+ "type": "java.lang.String",
+ "value": "datacenter:us1.staging.dog,dds:event-platform,kube_cluster_name:oddish-b,kube_stateful_set:logs-intake-backend-upload,service:logs-intake-backend"
+ },
+ "start": {
+ "type": "long",
+ "value": "1709233490000"
+ },
+ "length": {
+ "type": "long",
+ "value": "16"
+ },
+ "end": {
+ "type": "long",
+ "value": "1709233542000"
+ },
+ "interval": {
+ "type": "long",
+ "value": "1"
+ },
+ "aggr": {
+ "type": "java.lang.String",
+ "value": "max"
+ }
+ }
+ },
+ {
+ "type": "com.dd.logs.metricsclient.ImmutableSeries",
+ "fields": {
+ "unit": {
+ "notCapturedReason": "depth",
+ "type": "java.util.Collections$EmptyList"
+ },
+ "expression": {
+ "size": "550",
+ "truncated": true,
+ "type": "java.lang.String",
+ "value": "timeshift(((max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-intake-backend-all,datacenter:us1.staging.dog,dds:event-platform,service:logs-intake-backend} - max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-i"
+ },
+ "metric": {
+ "type": "java.lang.String",
+ "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)"
+ },
+ "pointlist": {
+ "notCapturedReason": "depth",
+ "type": "java.util.Collections$UnmodifiableRandomAccessList"
+ },
+ "displayName": {
+ "type": "java.lang.String",
+ "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)"
+ },
+ "scope": {
+ "type": "java.lang.String",
+ "value": "datacenter:us1.staging.dog,dds:event-platform,kube_cluster_name:oddish-b,kube_stateful_set:logs-intake-backend-all,service:logs-intake-backend"
+ },
+ "start": {
+ "type": "long",
+ "value": "1709233490000"
+ },
+ "length": {
+ "type": "long",
+ "value": "16"
+ },
+ "end": {
+ "type": "long",
+ "value": "1709233542000"
+ },
+ "interval": {
+ "type": "long",
+ "value": "1"
+ },
+ "aggr": {
+ "type": "java.lang.String",
+ "value": "max"
+ }
+ }
+ },
+ {
+ "type": "com.dd.logs.metricsclient.ImmutableSeries",
+ "fields": {
+ "unit": {
+ "notCapturedReason": "depth",
+ "type": "java.util.Collections$EmptyList"
+ },
+ "expression": {
+ "size": "574",
+ "truncated": true,
+ "type": "java.lang.String",
+ "value": "timeshift(((max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-intake-backend-all-datadog,datacenter:us1.staging.dog,dds:event-platform,service:logs-intake-backend} - max:kubernetes_state.statefulset.replicas_desired{kube_stateful_se"
+ },
+ "metric": {
+ "type": "java.lang.String",
+ "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)"
+ },
+ "pointlist": {
+ "notCapturedReason": "depth",
+ "type": "java.util.Collections$UnmodifiableRandomAccessList"
+ },
+ "displayName": {
+ "type": "java.lang.String",
+ "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)"
+ },
+ "scope": {
+ "type": "java.lang.String",
+ "value": "datacenter:us1.staging.dog,dds:event-platform,kube_cluster_name:oddish-b,kube_stateful_set:logs-intake-backend-all-datadog,service:logs-intake-backend"
+ },
+ "start": {
+ "type": "long",
+ "value": "1709233490000"
+ },
+ "length": {
+ "type": "long",
+ "value": "16"
+ },
+ "end": {
+ "type": "long",
+ "value": "1709233542000"
+ },
+ "interval": {
+ "type": "long",
+ "value": "1"
+ },
+ "aggr": {
+ "type": "java.lang.String",
+ "value": "max"
+ }
+ }
+ },
+ {
+ "type": "com.dd.logs.metricsclient.ImmutableSeries",
+ "fields": {
+ "unit": {
+ "notCapturedReason": "depth",
+ "type": "java.util.Collections$EmptyList"
+ },
+ "expression": {
+ "size": "562",
+ "truncated": true,
+ "type": "java.lang.String",
+ "value": "timeshift(((max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-intake-backend-all-rum,datacenter:us1.staging.dog,dds:event-platform,service:logs-intake-backend} - max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:lo"
+ },
+ "metric": {
+ "type": "java.lang.String",
+ "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)"
+ },
+ "pointlist": {
+ "notCapturedReason": "depth",
+ "type": "java.util.Collections$UnmodifiableRandomAccessList"
+ },
+ "displayName": {
+ "type": "java.lang.String",
+ "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)"
+ },
+ "scope": {
+ "type": "java.lang.String",
+ "value": "datacenter:us1.staging.dog,dds:event-platform,kube_cluster_name:oddish-b,kube_stateful_set:logs-intake-backend-all-rum,service:logs-intake-backend"
+ },
+ "start": {
+ "type": "long",
+ "value": "1709233490000"
+ },
+ "length": {
+ "type": "long",
+ "value": "16"
+ },
+ "end": {
+ "type": "long",
+ "value": "1709233542000"
+ },
+ "interval": {
+ "type": "long",
+ "value": "1"
+ },
+ "aggr": {
+ "type": "java.lang.String",
+ "value": "max"
+ }
+ }
+ },
+ {
+ "type": "com.dd.logs.metricsclient.ImmutableSeries",
+ "fields": {
+ "unit": {
+ "notCapturedReason": "depth",
+ "type": "java.util.Collections$EmptyList"
+ },
+ "expression": {
+ "size": "553",
+ "truncated": true,
+ "type": "java.lang.String",
+ "value": "timeshift(((max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-intake-backend-logs,datacenter:us1.staging.dog,dds:event-platform,service:logs-intake-backend} - max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-"
+ },
+ "metric": {
+ "type": "java.lang.String",
+ "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)"
+ },
+ "pointlist": {
+ "notCapturedReason": "depth",
+ "type": "java.util.Collections$UnmodifiableRandomAccessList"
+ },
+ "displayName": {
+ "type": "java.lang.String",
+ "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)"
+ },
+ "scope": {
+ "type": "java.lang.String",
+ "value": "datacenter:us1.staging.dog,dds:event-platform,kube_cluster_name:oddish-b,kube_stateful_set:logs-intake-backend-logs,service:logs-intake-backend"
+ },
+ "start": {
+ "type": "long",
+ "value": "1709233490000"
+ },
+ "length": {
+ "type": "long",
+ "value": "16"
+ },
+ "end": {
+ "type": "long",
+ "value": "1709233542000"
+ },
+ "interval": {
+ "type": "long",
+ "value": "1"
+ },
+ "aggr": {
+ "type": "java.lang.String",
+ "value": "max"
+ }
+ }
+ }
+ ],
+ "type": "java.util.Collections$UnmodifiableRandomAccessList"
+ },
+ "toDate": {
+ "type": "long",
+ "value": "1709233550000"
+ },
+ "query": {
+ "size": "492",
+ "truncated": true,
+ "type": "java.lang.String",
+ "value": "timeshift((max:kubernetes_state.statefulset.replicas_desired{datacenter:us1.staging.dog,dds:event-platform,service:logs-intake-backend} by {kube_stateful_set} - max:kubernetes_state.statefulset.replicas_desired{datacenter:us1.staging.dog,dds:event-platfor"
+ },
+ "groupBy": {
+ "size": "1",
+ "elements": [
+ {
+ "type": "java.lang.String",
+ "value": "kube_stateful_set"
+ }
+ ],
+ "type": "java.util.Collections$SingletonList"
+ },
+ "message": {
+ "type": "java.lang.String",
+ "value": ""
+ },
+ "status": {
+ "type": "java.lang.String",
+ "value": "ok"
+ }
+ }
+ }
+ }
+ }
+ },
+ "language": "java",
+ "id": "97775bd9-ca14-4192-8c15-21a177819305",
+ "evaluationErrors": [
+ {
+ "expr": "response",
+ "message": "Cannot find symbol: response"
+ }
+ ],
+ "probe": {
+ "location": {
+ "method": "parseSuccess",
+ "type": "com.dd.logs.metricsclient.MetricsClient"
+ },
+ "id": "23a08460-521f-4364-aff5-081221aba86d",
+ "version": 3
+ },
+ "timestamp": 1709233552203
+ }
+ },
+ "logger": {
+ "thread_id": 18170,
+ "method": "parseSuccess",
+ "thread_name": "OkHttp https://api.datad0g.com/...",
+ "name": "com.dd.logs.metricsclient.MetricsClient",
+ "version": 2
+ }
+}
\ No newline at end of file
diff --git a/pkg/dynamicinstrumentation/ebpf/ebpf.go b/pkg/dynamicinstrumentation/ebpf/ebpf.go
new file mode 100644
index 0000000000000..177dcc2146f17
--- /dev/null
+++ b/pkg/dynamicinstrumentation/ebpf/ebpf.go
@@ -0,0 +1,174 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux_bpf
+
+// Package ebpf provides utility for setting up and instrumenting the bpf code
+// used by dynamic instrumentation
+package ebpf
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "text/template"
+ "time"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/link"
+
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/diagnostics"
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes"
+ ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf"
+ "github.com/DataDog/datadog-agent/pkg/ebpf/bytecode/runtime"
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+)
+
+// SetupEventsMap creates the ringbuffer which all programs will use for sending output
+func SetupEventsMap() error {
+ var err error
+ events, err := ebpf.NewMap(&ebpf.MapSpec{
+ Name: "events",
+ Type: ebpf.RingBuf,
+ MaxEntries: 1 << 24,
+ })
+ if err != nil {
+ return fmt.Errorf("could not create bpf map for sharing events with userspace: %w", err)
+ }
+ ditypes.EventsRingbuffer = events
+ return nil
+}
+
+// AttachBPFUprobe attaches the probe to the specified process
+func AttachBPFUprobe(procInfo *ditypes.ProcessInfo, probe *ditypes.Probe) error {
+ executable, err := link.OpenExecutable(procInfo.BinaryPath)
+ if err != nil {
+ diagnostics.Diagnostics.SetError(procInfo.ServiceName, procInfo.RuntimeID, probe.ID, "ATTACH_ERROR", err.Error())
+ return fmt.Errorf("could not open proc executable for attaching bpf probe: %w", err)
+ }
+
+ spec, err := ebpf.LoadCollectionSpecFromReader(probe.InstrumentationInfo.BPFObjectFileReader)
+ if err != nil {
+ diagnostics.Diagnostics.SetError(procInfo.ServiceName, procInfo.RuntimeID, probe.ID, "ATTACH_ERROR", err.Error())
+ return fmt.Errorf("could not create bpf collection for probe %s: %w", probe.ID, err)
+ }
+
+ mapReplacements := map[string]*ebpf.Map{}
+ if probe.ID != ditypes.ConfigBPFProbeID {
+ // config probe is special and should not be on the same ringbuffer
+ // as the rest of regular events. Despite having the same "events" name,
+ // not using the pinned map means the config program uses a different
+ // ringbuffer.
+ mapReplacements["events"] = ditypes.EventsRingbuffer
+ } else {
+ configEvents, err := ebpf.NewMap(&ebpf.MapSpec{
+ Type: ebpf.RingBuf,
+ MaxEntries: 1 << 24,
+ })
+ if err != nil {
+ return fmt.Errorf("could not create bpf map for receiving probe configurations: %w", err)
+ }
+ mapReplacements["events"] = configEvents
+ }
+
+ // Load the ebpf object
+ opts := ebpf.CollectionOptions{
+ MapReplacements: mapReplacements,
+ }
+
+ bpfObject, err := ebpf.NewCollectionWithOptions(spec, opts)
+ if err != nil {
+ var ve *ebpf.VerifierError
+ if errors.As(err, &ve) {
+ log.Infof("Verifier error: %+v\n", ve)
+ }
+ diagnostics.Diagnostics.SetError(procInfo.ServiceName, procInfo.RuntimeID, probe.ID, "ATTACH_ERROR", err.Error())
+ return fmt.Errorf("could not load bpf collection for probe %s: %w", probe.ID, err)
+ }
+
+ procInfo.InstrumentationObjects[probe.ID] = bpfObject
+
+ // Populate map used for zero'ing out regions of memory
+ zeroValMap, ok := bpfObject.Maps["zeroval"]
+ if !ok {
+ diagnostics.Diagnostics.SetError(procInfo.ServiceName, procInfo.RuntimeID, probe.ID, "ATTACH_ERROR", "could not find bpf map for zero value")
+ return fmt.Errorf("could not find bpf map for zero value in bpf object")
+ }
+
+ var zeroSlice = make([]uint8, probe.InstrumentationInfo.InstrumentationOptions.ArgumentsMaxSize)
+ var index uint32
+ err = zeroValMap.Update(index, zeroSlice, 0)
+ if err != nil {
+ diagnostics.Diagnostics.SetError(procInfo.ServiceName, procInfo.RuntimeID, probe.ID, "ATTACH_ERROR", "could not find bpf map for zero value")
+ return fmt.Errorf("could not use bpf map for zero value in bpf object: %w", err)
+ }
+
+ // Attach BPF probe to function in executable
+ bpfProgram, ok := bpfObject.Programs[probe.GetBPFFuncName()]
+ if !ok {
+ diagnostics.Diagnostics.SetError(procInfo.ServiceName, procInfo.RuntimeID, probe.ID, "ATTACH_ERROR", fmt.Sprintf("couldn't find bpf program for symbol %s", probe.FuncName))
+ return fmt.Errorf("could not find bpf program for symbol %s", probe.FuncName)
+ }
+
+ link, err := executable.Uprobe(probe.FuncName, bpfProgram, &link.UprobeOptions{
+ PID: int(procInfo.PID),
+ })
+ if err != nil {
+ diagnostics.Diagnostics.SetError(procInfo.ServiceName, procInfo.RuntimeID, probe.ID, "UPROBE_FAILURE", err.Error())
+ return fmt.Errorf("could not attach bpf program via uprobe: %w", err)
+ }
+
+ procInfo.SetUprobeLink(probe.ID, &link)
+ diagnostics.Diagnostics.SetStatus(procInfo.ServiceName, procInfo.RuntimeID, probe.ID, ditypes.StatusInstalled)
+
+ return nil
+}
+
+// CompileBPFProgram compiles the code for a single probe associated with the process given by procInfo
+func CompileBPFProgram(procInfo *ditypes.ProcessInfo, probe *ditypes.Probe) error {
+ f := func(in io.Reader, out io.Writer) error {
+ fileContents, err := io.ReadAll(in)
+ if err != nil {
+ return err
+ }
+ programTemplate, err := template.New("program_template").Parse(string(fileContents))
+ if err != nil {
+ return err
+ }
+ err = programTemplate.Execute(out, probe)
+ if err != nil {
+ return err
+ }
+ return nil
+ }
+
+ cfg := ddebpf.NewConfig()
+ opts := runtime.CompileOptions{
+ AdditionalFlags: getCFlags(cfg),
+ ModifyCallback: f,
+ UseKernelHeaders: true,
+ }
+ compiledOutput, err := runtime.Dynamicinstrumentation.CompileWithOptions(cfg, opts)
+ if err != nil {
+ return err
+ }
+ probe.InstrumentationInfo.BPFObjectFileReader = compiledOutput
+ return nil
+}
+
+func getCFlags(config *ddebpf.Config) []string {
+ cflags := []string{
+ "-g",
+ "-Wno-unused-variable",
+ }
+ if config.BPFDebug {
+ cflags = append(cflags, "-DDEBUG=1")
+ }
+ return cflags
+}
+
+const (
+ compilationStepTimeout = 60 * time.Second
+)
diff --git a/pkg/dynamicinstrumentation/eventparser/event_parser.go b/pkg/dynamicinstrumentation/eventparser/event_parser.go
new file mode 100644
index 0000000000000..952a1bb435cfd
--- /dev/null
+++ b/pkg/dynamicinstrumentation/eventparser/event_parser.go
@@ -0,0 +1,268 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux_bpf
+
+// Package eventparser is used for parsing raw bytes from bpf code into events
+package eventparser
+
+import (
+ "encoding/binary"
+ "fmt"
+ "reflect"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes"
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ratelimiter"
+)
+
+// MaxBufferSize is the maximum size of the output buffer from bpf which is read by this package
+const MaxBufferSize = 10000
+
+var (
+ byteOrder = binary.LittleEndian
+)
+
+// ParseEvent takes the raw buffer from bpf and parses it into an event. It also potentially
+// applies a rate limit
+func ParseEvent(record []byte, ratelimiters *ratelimiter.MultiProbeRateLimiter) *ditypes.DIEvent {
+ event := ditypes.DIEvent{}
+
+ if len(record) < ditypes.SizeofBaseEvent {
+ log.Tracef("malformed event record (length %d)", len(record))
+ return nil
+ }
+ baseEvent := *(*ditypes.BaseEvent)(unsafe.Pointer(&record[0]))
+ event.ProbeID = unix.ByteSliceToString(baseEvent.Probe_id[:])
+
+ allowed, _, _ := ratelimiters.AllowOneEvent(event.ProbeID)
+ if !allowed {
+ // log.Infof("event dropped by rate limit. Probe %s\t(%d dropped events out of %d)\n",
+ // event.ProbeID, droppedEvents, droppedEvents+successfulEvents)
+ return nil
+ }
+
+ event.PID = baseEvent.Pid
+ event.UID = baseEvent.Uid
+ event.StackPCs = baseEvent.Program_counters[:]
+ event.Argdata = readParams(record[ditypes.SizeofBaseEvent:])
+ return &event
+}
+
+// ParseParams extracts just the parsed parameters from the full event record
+func ParseParams(record []byte) ([]*ditypes.Param, error) {
+ if len(record) < 392 {
+ return nil, fmt.Errorf("malformed event record (length %d)", len(record))
+ }
+ return readParams(record[392:]), nil
+}
+
+func readParams(values []byte) []*ditypes.Param {
+ outputParams := []*ditypes.Param{}
+ for i := 0; i < MaxBufferSize; {
+ if i+3 >= len(values) {
+ break
+ }
+ paramTypeDefinition := parseTypeDefinition(values[i:])
+ if paramTypeDefinition == nil {
+ break
+ }
+
+ sizeOfTypeDefinition := countBufferUsedByTypeDefinition(paramTypeDefinition)
+ i += sizeOfTypeDefinition
+ val, numBytesRead := parseParamValue(paramTypeDefinition, values[i:])
+ if reflect.Kind(val.Kind) == reflect.Slice {
+ // In BPF we read the slice by reading the maximum size of a slice
+ // that we allow, instead of just the size of the slice (which we
+ // know at runtime). This is to satisfy the verifier. When parsing
+ // here, we read just the actual slice content, but have to move the
+ // buffer index ahead by the amount of space used by the max read.
+ i += ditypes.SliceMaxSize
+ } else {
+ i += numBytesRead
+ }
+ outputParams = append(outputParams, val)
+ }
+ return outputParams
+}
+
+// parseParamValue takes the representation of the param type's definition and the
+// actual values in the buffer and populates the definition with the value parsed
+// from the byte buffer. It returns the resulting parameter and an indication of
+// how many bytes were read from the buffer
+func parseParamValue(definition *ditypes.Param, buffer []byte) (*ditypes.Param, int) {
+ // Start by creating a stack with each layer of the definition
+ // which will correspond with the layers of the values read from buffer.
+ // This is done using a temporary stack.
+ tempStack := newParamStack()
+ definitionStack := newParamStack()
+ tempStack.push(definition)
+ for !tempStack.isEmpty() {
+ current := tempStack.pop()
+ definitionStack.push(copyParam(current))
+ for i := 0; i < len(current.Fields); i++ {
+ tempStack.push(current.Fields[i])
+ }
+ }
+ var i int
+ valueStack := newParamStack()
+ for i = 0; i+3 < len(buffer); {
+ paramDefinition := definitionStack.pop()
+ if paramDefinition == nil {
+ break
+ }
+ if !isTypeWithHeader(paramDefinition.Kind) {
+ // This is a regular value (no sub-fields).
+ // We parse the value of it from the buffer and push it to the value stack
+ paramDefinition.ValueStr = parseIndividualValue(paramDefinition.Kind, buffer[i:i+int(paramDefinition.Size)])
+ i += int(paramDefinition.Size)
+ valueStack.push(paramDefinition)
+ } else if reflect.Kind(paramDefinition.Kind) == reflect.Pointer {
+ // Pointers are unique in that they have their own value, and sub-fields.
+ // We parse the value of it from the buffer, place it in the value for
+ // the pointer itself, then pop the next value and place it as a sub-field.
+ paramDefinition.ValueStr = parseIndividualValue(paramDefinition.Kind, buffer[i:i+int(paramDefinition.Size)])
+ i += int(paramDefinition.Size)
+ paramDefinition.Fields = append(paramDefinition.Fields, valueStack.pop())
+ valueStack.push(paramDefinition)
+ } else {
+ // This is a type with sub-fields which have already been parsed and push
+ // onto the value stack. We pop those and set them as fields in this type.
+ // We then push this type onto the value stack as it may also be a sub-field.
+ // In header types like this, paramDefinition.Size corresponds with the number of
+ // fields under it.
+ for n := 0; n < int(paramDefinition.Size); n++ {
+ paramDefinition.Fields = append([]*ditypes.Param{valueStack.pop()}, paramDefinition.Fields...)
+ }
+ valueStack.push(paramDefinition)
+ }
+ }
+ return valueStack.pop(), i
+}
+
+func copyParam(p *ditypes.Param) *ditypes.Param {
+ return &ditypes.Param{
+ Type: p.Type,
+ Kind: p.Kind,
+ Size: p.Size,
+ }
+}
+
+func parseKindToString(kind byte) string {
+ if kind == 255 {
+ return "Unsupported"
+ } else if kind == 254 {
+ return "reached field limit"
+ }
+
+ return reflect.Kind(kind).String()
+}
+
+// parseTypeDefinition is given a buffer which contains the header type definition
+// for basic/complex types, and the actual content of those types.
+// It returns a fully populated tree of `ditypes.Param` which will be used for parsing
+// the actual values
+func parseTypeDefinition(b []byte) *ditypes.Param {
+ stack := newParamStack()
+ i := 0
+ for {
+ if len(b) < 3 {
+ return nil
+ }
+ newParam := &ditypes.Param{
+ Kind: b[i],
+ Size: binary.LittleEndian.Uint16(b[i+1 : i+3]),
+ Type: parseKindToString(b[i]),
+ }
+ if newParam.Kind == 0 && newParam.Size == 0 {
+ break
+ }
+ i += 3
+ if isTypeWithHeader(newParam.Kind) {
+ stack.push(newParam)
+ continue
+ }
+
+ stackCheck:
+ if stack.isEmpty() {
+ return newParam
+ }
+ top := stack.peek()
+ top.Fields = append(top.Fields, newParam)
+ if len(top.Fields) == int(top.Size) ||
+ (reflect.Kind(top.Kind) == reflect.Pointer && len(top.Fields) == 1) {
+ newParam = stack.pop()
+ goto stackCheck
+ }
+
+ }
+ return nil
+}
+
+// countBufferUsedByTypeDefinition is used to determine that amount of bytes
+// that were used to read the type definition. Each individual element of the
+// definition uses 3 bytes (1 for kind, 2 for size). This is a needed calculation
+// so we know where we should read the actual values in the buffer.
+func countBufferUsedByTypeDefinition(root *ditypes.Param) int {
+ queue := []*ditypes.Param{root}
+ counter := 0
+ for len(queue) != 0 {
+ front := queue[0]
+ queue = queue[1:]
+ counter += 3
+ queue = append(queue, front.Fields...)
+ }
+ return counter
+}
+
+func isTypeWithHeader(pieceType byte) bool {
+ return reflect.Kind(pieceType) == reflect.Struct ||
+ reflect.Kind(pieceType) == reflect.Slice ||
+ reflect.Kind(pieceType) == reflect.Array ||
+ reflect.Kind(pieceType) == reflect.Pointer
+}
+
+func parseIndividualValue(paramType byte, paramValueBytes []byte) string {
+ switch reflect.Kind(paramType) {
+ case reflect.Uint8:
+ return fmt.Sprintf("%d", uint8(paramValueBytes[0]))
+ case reflect.Int8:
+ return fmt.Sprintf("%d", int8(paramValueBytes[0]))
+ case reflect.Uint16:
+ return fmt.Sprintf("%d", byteOrder.Uint16(paramValueBytes))
+ case reflect.Int16:
+ return fmt.Sprintf("%d", int16(byteOrder.Uint16(paramValueBytes)))
+ case reflect.Uint32:
+ return fmt.Sprintf("%d", byteOrder.Uint32(paramValueBytes))
+ case reflect.Int32:
+ return fmt.Sprintf("%d", int32(byteOrder.Uint32(paramValueBytes)))
+ case reflect.Uint64:
+ return fmt.Sprintf("%d", byteOrder.Uint64(paramValueBytes))
+ case reflect.Int64:
+ return fmt.Sprintf("%d", int64(byteOrder.Uint64(paramValueBytes)))
+ case reflect.Uint:
+ return fmt.Sprintf("%d", byteOrder.Uint64(paramValueBytes))
+ case reflect.Int:
+ return fmt.Sprintf("%d", int(byteOrder.Uint64(paramValueBytes)))
+ case reflect.Pointer:
+ return fmt.Sprintf("0x%X", byteOrder.Uint64(paramValueBytes))
+ case reflect.String:
+ return string(paramValueBytes)
+ case reflect.Bool:
+ if paramValueBytes[0] == 1 {
+ return "true"
+ } else {
+ return "false"
+ }
+ case ditypes.KindUnsupported:
+ return "UNSUPPORTED"
+ default:
+ return ""
+ }
+}
diff --git a/pkg/dynamicinstrumentation/eventparser/event_parser_test.go b/pkg/dynamicinstrumentation/eventparser/event_parser_test.go
new file mode 100644
index 0000000000000..94496b5cd2d0f
--- /dev/null
+++ b/pkg/dynamicinstrumentation/eventparser/event_parser_test.go
@@ -0,0 +1,298 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux_bpf
+
+package eventparser
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes"
+)
+
+func TestCountBufferUsedByTypeDefinition(t *testing.T) {
+ tests := []struct {
+ name string
+ param *ditypes.Param
+ expected int
+ }{
+ {
+ name: "Struct with nested structs and ints",
+ param: &ditypes.Param{
+ Kind: byte(reflect.Struct),
+ Size: 2,
+ Fields: []*ditypes.Param{
+ {Kind: byte(reflect.Struct), Size: 2, Fields: []*ditypes.Param{
+ {Kind: byte(reflect.Int), Size: 8},
+ {Kind: byte(reflect.Int), Size: 8},
+ }},
+ {Kind: byte(reflect.Int), Size: 8},
+ },
+ },
+ expected: 15,
+ },
+ {
+ name: "Complex nested structure",
+ param: &ditypes.Param{
+ Type: "slice", Size: 0x2, Kind: 0x17,
+ Fields: []*ditypes.Param{
+ {Type: "struct", Size: 0x2, Kind: 0x19, Fields: []*ditypes.Param{
+ {Type: "uint8", Size: 0x1, Kind: 0x8},
+ {Type: "struct", Size: 0x2, Kind: 0x19, Fields: []*ditypes.Param{
+ {Type: "uint8", Size: 0x1, Kind: 0x8},
+ {Type: "uint8", Size: 0x1, Kind: 0x8},
+ }},
+ }},
+ },
+ },
+ expected: 18,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := countBufferUsedByTypeDefinition(tt.param)
+ if result != tt.expected {
+ t.Errorf("Expected %d, got %d", tt.expected, result)
+ }
+ })
+ }
+}
+
+func TestParseParamValue(t *testing.T) {
+ tests := []struct {
+ name string
+ inputBuffer []byte
+ inputDefinition *ditypes.Param
+ expectedValue *ditypes.Param
+ }{
+ {
+ name: "Basic slice of structs",
+ inputBuffer: []byte{
+ 1, 2, 0, 3, 0, 0, 0, // Content of slice element 1
+ 4, 5, 0, 6, 0, 0, 0, // Content of slice element 2
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // Extra padding
+ },
+ inputDefinition: &ditypes.Param{
+ Type: "slice", Size: 0x2, Kind: 0x17,
+ Fields: []*ditypes.Param{
+ {Type: "struct", Size: 0x3, Kind: 0x19, Fields: []*ditypes.Param{
+ {Type: "uint8", Size: 0x1, Kind: 0x8},
+ {Type: "uint16", Size: 0x2, Kind: 0x9},
+ {Type: "uint32", Size: 0x4, Kind: 0xa},
+ }},
+ {Type: "struct", Size: 0x3, Kind: 0x19, Fields: []*ditypes.Param{
+ {Type: "uint8", Size: 0x1, Kind: 0x8},
+ {Type: "uint16", Size: 0x2, Kind: 0x9},
+ {Type: "uint32", Size: 0x4, Kind: 0xa},
+ }},
+ },
+ },
+ expectedValue: &ditypes.Param{
+ Type: "slice", Size: 0x2, Kind: 0x17,
+ Fields: []*ditypes.Param{
+ {Type: "struct", Size: 0x3, Kind: 0x19, Fields: []*ditypes.Param{
+ {ValueStr: "1", Type: "uint8", Size: 0x1, Kind: 0x8},
+ {ValueStr: "2", Type: "uint16", Size: 0x2, Kind: 0x9},
+ {ValueStr: "3", Type: "uint32", Size: 0x4, Kind: 0xa},
+ }},
+ {Type: "struct", Size: 0x3, Kind: 0x19, Fields: []*ditypes.Param{
+ {ValueStr: "4", Type: "uint8", Size: 0x1, Kind: 0x8},
+ {ValueStr: "5", Type: "uint16", Size: 0x2, Kind: 0x9},
+ {ValueStr: "6", Type: "uint32", Size: 0x4, Kind: 0xa},
+ }},
+ },
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ val, _ := parseParamValue(tt.inputDefinition, tt.inputBuffer)
+ if !reflect.DeepEqual(val, tt.expectedValue) {
+ t.Errorf("Parsed incorrectly! Got %+v, expected %+v", val, tt.expectedValue)
+ }
+ })
+ }
+}
+
+func TestReadParams(t *testing.T) {
+ tests := []struct {
+ name string
+ inputBuffer []byte
+ expectedResult []*ditypes.Param
+ }{
+ {
+ name: "Basic slice of structs",
+ inputBuffer: []byte{
+ 23, 2, 0, // Slice with 2 elements
+ 25, 3, 0, // Slice elements are each a struct with 3 fields
+ 8, 1, 0, // Struct field 1 is a uint8 (size 1)
+ 9, 2, 0, // Struct field 2 is a uint16 (size 2)
+ 8, 1, 0, // Struct field 3 is a uint8 (size 1)
+ 25, 3, 0, // Slice elements are each a struct with 3 fields
+ 8, 1, 0, // Struct field 1 is a uint8 (size 1)
+ 9, 2, 0, // Struct field 2 is a uint16 (size 2)
+ 8, 1, 0, // Struct field 3 is a uint8 (size 1)
+ 1, 2, 0, 3, // Content of slice element 1 (not relevant for this function)
+ 4, 5, 0, 6, // Content of slice element 2 (not relevant for this function)
+ // Padding
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ },
+ expectedResult: []*ditypes.Param{{
+ Type: "slice", Size: 0x2, Kind: 0x17,
+ Fields: []*ditypes.Param{
+ {Type: "struct", Size: 0x3, Kind: 0x19, Fields: []*ditypes.Param{
+ {ValueStr: "1", Type: "uint8", Size: 0x1, Kind: 0x8},
+ {ValueStr: "2", Type: "uint16", Size: 0x2, Kind: 0x9},
+ {ValueStr: "3", Type: "uint8", Size: 0x1, Kind: 0x8},
+ }},
+ {Type: "struct", Size: 0x3, Kind: 0x19, Fields: []*ditypes.Param{
+ {ValueStr: "4", Type: "uint8", Size: 0x1, Kind: 0x8},
+ {ValueStr: "5", Type: "uint16", Size: 0x2, Kind: 0x9},
+ {ValueStr: "6", Type: "uint8", Size: 0x1, Kind: 0x8},
+ }},
+ },
+ }},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ output := readParams(tt.inputBuffer)
+ if !reflect.DeepEqual(output, tt.expectedResult) {
+ fmt.Printf("Got: %v\n", output)
+ fmt.Printf("Expected: %v\n", tt.expectedResult)
+ t.Errorf("Didn't read correctly!")
+ }
+ })
+ }
+}
+func TestParseTypeDefinition(t *testing.T) {
+ tests := []struct {
+ name string
+ inputBuffer []byte
+ expectedResult *ditypes.Param
+ }{
+ {
+ name: "Slice of structs with uint8 and uint16 fields",
+ inputBuffer: []byte{
+ 23, 2, 0, // Slice with 2 elements
+
+ 25, 3, 0, // Slice elements are each a struct with 3 fields
+
+ 8, 1, 0, // Struct field 1 is a uint8 (size 1)
+ 9, 2, 0, // Struct field 2 is a uint16 (size 2)
+ 8, 1, 0, // Struct field 3 is a uint8 (size 1)
+
+ 25, 3, 0, // Slice elements are each a struct with 3 fields
+
+ 8, 1, 0, // Struct field 1 is a uint8 (size 1)
+ 9, 2, 0, // Struct field 2 is a uint16 (size 2)
+ 8, 1, 0, // Struct field 3 is a uint8 (size 1)
+
+ // Padding
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ expectedResult: &ditypes.Param{
+ Type: "slice", Size: 0x2, Kind: 0x17,
+ Fields: []*ditypes.Param{
+ {
+ Type: "struct", Size: 0x3, Kind: 0x19,
+ Fields: []*ditypes.Param{
+ {Type: "uint8", Size: 0x1, Kind: 0x8},
+ {Type: "uint16", Size: 0x2, Kind: 0x9},
+ {Type: "uint8", Size: 0x1, Kind: 0x8},
+ },
+ },
+ {
+ Type: "struct", Size: 0x3, Kind: 0x19,
+ Fields: []*ditypes.Param{
+ {Type: "uint8", Size: 0x1, Kind: 0x8},
+ {Type: "uint16", Size: 0x2, Kind: 0x9},
+ {Type: "uint8", Size: 0x1, Kind: 0x8},
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Nested struct fields",
+ inputBuffer: []byte{
+ 23, 2, 0, // Slice with 2 elements
+ 25, 4, 0, // Slice elements are each a struct with 2 fields
+ 8, 1, 0, // Struct field 1 is a uint8 (size 1)
+ 8, 1, 0, // Struct field 2 is a uint8 (size 1)
+ 8, 1, 0, // Struct field 3 is a uint8 (size 1)
+ 25, 2, 0, // Struct field 4 is a struct with 2 fields
+ 8, 1, 0, // Nested struct field 1 is a uint8 (size 1)
+ 8, 1, 0, // Nested struct field 2 is a uint8 (size 1)
+ 25, 4, 0, // Slice elements are each a struct with 2 fields
+ 8, 1, 0, // Struct field 1 is a uint8 (size 1)
+ 8, 1, 0, // Struct field 2 is a uint8 (size 1)
+ 8, 1, 0, // Struct field 3 is a uint8 (size 1)
+ 25, 2, 0, // Struct field 4 is a struct with 2 fields
+ 8, 1, 0, // Nested struct field 1 is a uint8 (size 1)
+ 8, 1, 0, // Nested struct field 2 is a uint8 (size 1)
+ 1, 2, 3, // Content of slice element 1 (top-level uint8, then 2 second tier uint8s)
+ 4, 5, 6, // Content of slice element 2 (top-level uint8, then 2 second tier uint8s)
+ // Padding
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ expectedResult: &ditypes.Param{
+ Type: "slice", Size: 0x2, Kind: 0x17,
+ Fields: []*ditypes.Param{
+ {
+ Type: "struct", Size: 0x4, Kind: 0x19,
+ Fields: []*ditypes.Param{
+ {Type: "uint8", Size: 0x1, Kind: 0x8},
+ {Type: "uint8", Size: 0x1, Kind: 0x8},
+ {Type: "uint8", Size: 0x1, Kind: 0x8},
+ {
+ Type: "struct", Size: 0x2, Kind: 0x19,
+ Fields: []*ditypes.Param{
+ {Type: "uint8", Size: 0x1, Kind: 0x8},
+ {Type: "uint8", Size: 0x1, Kind: 0x8},
+ },
+ },
+ },
+ },
+ {
+ Type: "struct", Size: 0x4, Kind: 0x19,
+ Fields: []*ditypes.Param{
+ {Type: "uint8", Size: 0x1, Kind: 0x8},
+ {Type: "uint8", Size: 0x1, Kind: 0x8},
+ {Type: "uint8", Size: 0x1, Kind: 0x8},
+ {
+ Type: "struct", Size: 0x2, Kind: 0x19,
+ Fields: []*ditypes.Param{
+ {Type: "uint8", Size: 0x1, Kind: 0x8},
+ {Type: "uint8", Size: 0x1, Kind: 0x8},
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ typeDefinition := parseTypeDefinition(tt.inputBuffer)
+ if !reflect.DeepEqual(typeDefinition, tt.expectedResult) {
+ fmt.Printf("%v\n", typeDefinition)
+ fmt.Printf("%v\n", tt.expectedResult)
+ t.Errorf("Not equal!")
+ }
+ })
+ }
+}
diff --git a/pkg/dynamicinstrumentation/eventparser/param_stack.go b/pkg/dynamicinstrumentation/eventparser/param_stack.go
new file mode 100644
index 0000000000000..b2359951ca25a
--- /dev/null
+++ b/pkg/dynamicinstrumentation/eventparser/param_stack.go
@@ -0,0 +1,45 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux_bpf
+
+package eventparser
+
+import (
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes"
+)
+
+type paramStack struct {
+ arr []*ditypes.Param
+}
+
+func newParamStack() *paramStack {
+ s := paramStack{arr: []*ditypes.Param{}}
+ return &s
+}
+
+func (s *paramStack) isEmpty() bool {
+ return len(s.arr) == 0
+}
+
+func (s *paramStack) pop() *ditypes.Param {
+ if s.isEmpty() {
+ return nil
+ }
+ top := s.peek()
+ s.arr = s.arr[0 : len(s.arr)-1]
+ return top
+}
+
+func (s *paramStack) peek() *ditypes.Param {
+ if s.isEmpty() {
+ return nil
+ }
+ return s.arr[len(s.arr)-1]
+}
+
+func (s *paramStack) push(p *ditypes.Param) {
+ s.arr = append(s.arr, p)
+}
diff --git a/pkg/dynamicinstrumentation/config.go b/pkg/dynamicinstrumentation/module/config.go
similarity index 96%
rename from pkg/dynamicinstrumentation/config.go
rename to pkg/dynamicinstrumentation/module/config.go
index 8265cf8d5a3f1..fa8c7530d2242 100644
--- a/pkg/dynamicinstrumentation/config.go
+++ b/pkg/dynamicinstrumentation/module/config.go
@@ -3,7 +3,9 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-package dynamicinstrumentation
+//go:build linux_bpf
+
+package module
import (
"github.com/DataDog/datadog-agent/cmd/system-probe/config"
diff --git a/pkg/dynamicinstrumentation/doc.go b/pkg/dynamicinstrumentation/module/doc.go
similarity index 51%
rename from pkg/dynamicinstrumentation/doc.go
rename to pkg/dynamicinstrumentation/module/doc.go
index 026de960a20b0..145cc294d401c 100644
--- a/pkg/dynamicinstrumentation/doc.go
+++ b/pkg/dynamicinstrumentation/module/doc.go
@@ -3,6 +3,9 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-// Package dynamicinstrumentation encapsulates a system-probe module which uses uprobes and bpf
-// to exfiltrate data from running processes
-package dynamicinstrumentation
+//go:build linux_bpf
+
+// Package module encapsulates a system-probe module which uses uprobes and bpf
+// to exfiltrate data from running processes. This is the Go implementation of
+// the dynamic instrumentation product.
+package module
diff --git a/pkg/dynamicinstrumentation/module/module.go b/pkg/dynamicinstrumentation/module/module.go
new file mode 100644
index 0000000000000..c5cbfced2b919
--- /dev/null
+++ b/pkg/dynamicinstrumentation/module/module.go
@@ -0,0 +1,73 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux_bpf
+
+package module
+
+import (
+ "net/http"
+
+ "github.com/DataDog/datadog-agent/cmd/system-probe/api/module"
+ "github.com/DataDog/datadog-agent/cmd/system-probe/utils"
+ coreconfig "github.com/DataDog/datadog-agent/pkg/config/setup"
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+
+ di "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation"
+)
+
+// Module is the dynamic instrumentation system probe module
+type Module struct {
+ godi *di.GoDI
+}
+
+// NewModule creates a new dynamic instrumentation system probe module
+func NewModule(config *Config) (*Module, error) {
+ godi, err := di.RunDynamicInstrumentation(&di.DIOptions{
+ Offline: coreconfig.SystemProbe().GetBool("dynamic_instrumentation.offline_mode"),
+ ProbesFilePath: coreconfig.SystemProbe().GetString("dynamic_instrumentation.probes_file_path"),
+ SnapshotOutput: coreconfig.SystemProbe().GetString("dynamic_instrumentation.snapshot_output_file_path"),
+ DiagnosticOutput: coreconfig.SystemProbe().GetString("dynamic_instrumentation.diagnostics_output_file_path"),
+ })
+ if err != nil {
+ return nil, err
+ }
+ return &Module{godi}, nil
+}
+
+// Close disables the dynamic instrumentation system probe module
+func (m *Module) Close() {
+ if m.godi == nil {
+ log.Info("Could not close dynamic instrumentation module, already closed")
+ return
+ }
+ log.Info("Closing dynamic instrumentation module")
+ m.godi.Close()
+}
+
+// GetStats returns a map of various metrics about the state of the module
+func (m *Module) GetStats() map[string]interface{} {
+ if m == nil || m.godi == nil {
+ log.Info("Could not get stats from dynamic instrumentation module, closed")
+ return map[string]interface{}{}
+ }
+ debug := map[string]interface{}{}
+ stats := m.godi.GetStats()
+ debug["PIDEventsCreated"] = stats.PIDEventsCreatedCount
+ debug["ProbeEventsCreated"] = stats.ProbeEventsCreatedCount
+ return debug
+}
+
+// Register creates a health check endpoint for the dynamic instrumentation module
+func (m *Module) Register(httpMux *module.Router) error {
+ httpMux.HandleFunc("/check", utils.WithConcurrencyLimit(utils.DefaultMaxConcurrentRequests,
+ func(w http.ResponseWriter, req *http.Request) {
+ stats := []string{}
+ utils.WriteAsJSON(w, stats)
+ }))
+
+ log.Info("Registering dynamic instrumentation module")
+ return nil
+}
diff --git a/pkg/dynamicinstrumentation/module_linux.go b/pkg/dynamicinstrumentation/module/module_stub.go
similarity index 55%
rename from pkg/dynamicinstrumentation/module_linux.go
rename to pkg/dynamicinstrumentation/module/module_stub.go
index 193e8a90646ac..83956088c9466 100644
--- a/pkg/dynamicinstrumentation/module_linux.go
+++ b/pkg/dynamicinstrumentation/module/module_stub.go
@@ -3,34 +3,44 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-package dynamicinstrumentation
+//go:build !linux_bpf
+
+// Package module provides the dynamic instrumentaiton module. This is a stub meaning
+// this empty file is used if the target platform does not support features required
+// by dynamic instrumentation.
+package module
import (
"github.com/DataDog/datadog-agent/cmd/system-probe/api/module"
- "github.com/DataDog/datadog-agent/pkg/util/log"
+ sysconfigtypes "github.com/DataDog/datadog-agent/cmd/system-probe/config/types"
)
//nolint:revive // TODO(DEBUG) Fix revive linter
-type Module struct{}
+type Config struct{}
//nolint:revive // TODO(DEBUG) Fix revive linter
-func NewModule(config *Config) (*Module, error) {
- return &Module{}, nil
+func NewConfig(_ *sysconfigtypes.Config) (*Config, error) {
+ return &Config{}, nil
}
//nolint:revive // TODO(DEBUG) Fix revive linter
-func (m *Module) Close() {
- log.Info("Closing user tracer module")
+type Module struct {
}
+//nolint:revive // TODO(DEBUG) Fix revive linter
+func NewModule(config *Config) (*Module, error) {
+ return nil, nil
+}
+
+//nolint:revive // TODO(DEBUG) Fix revive linter
+func (m *Module) Close() {}
+
//nolint:revive // TODO(DEBUG) Fix revive linter
func (m *Module) GetStats() map[string]interface{} {
- debug := map[string]interface{}{}
- return debug
+ return nil
}
//nolint:revive // TODO(DEBUG) Fix revive linter
func (m *Module) Register(_ *module.Router) error {
- log.Info("Registering dynamic instrumentation module")
return nil
}
diff --git a/pkg/dynamicinstrumentation/proctracker/proctracker.go b/pkg/dynamicinstrumentation/proctracker/proctracker.go
new file mode 100644
index 0000000000000..f03d86c17efaa
--- /dev/null
+++ b/pkg/dynamicinstrumentation/proctracker/proctracker.go
@@ -0,0 +1,251 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux_bpf
+
+// Package proctracker provides a facility for Dynamic Instrumentation to discover
+// and track the lifecycle of processes running on the same host
+package proctracker
+
+import (
+ "debug/elf"
+ "errors"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "sync"
+ "syscall"
+
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes"
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/link"
+
+ "github.com/DataDog/datadog-agent/pkg/network/go/bininspect"
+ "github.com/DataDog/datadog-agent/pkg/network/go/binversion"
+ "github.com/DataDog/datadog-agent/pkg/process/monitor"
+ "github.com/DataDog/datadog-agent/pkg/security/secl/model"
+ "github.com/DataDog/datadog-agent/pkg/security/utils"
+ "github.com/DataDog/datadog-agent/pkg/util/kernel"
+ "golang.org/x/sys/unix"
+)
+
+type processTrackerCallback func(ditypes.DIProcs)
+
+// ProcessTracker is adapted from https://github.com/DataDog/datadog-agent/blob/main/pkg/network/protocols/http/ebpf_gotls.go
+type ProcessTracker struct {
+ procRoot string
+ lock sync.RWMutex
+ pm *monitor.ProcessMonitor
+ processes processes
+ binaries binaries
+ callback processTrackerCallback
+ unsubscribe []func()
+}
+
+// NewProcessTracker creates a new ProcessTracer
+func NewProcessTracker(callback processTrackerCallback) *ProcessTracker {
+ pt := ProcessTracker{
+ pm: monitor.GetProcessMonitor(),
+ procRoot: kernel.ProcFSRoot(),
+ callback: callback,
+ binaries: make(map[binaryID]*runningBinary),
+ processes: make(map[pid]binaryID),
+ }
+ return &pt
+}
+
+// Start subscribes to exec and exit events so dynamic instrumentation can be made
+// aware of new processes that may need to be instrumented or instrumented processes
+// that should no longer be instrumented
+func (pt *ProcessTracker) Start() error {
+
+ unsubscribeExec := pt.pm.SubscribeExec(pt.handleProcessStart)
+ unsubscribeExit := pt.pm.SubscribeExit(pt.handleProcessStop)
+
+ pt.unsubscribe = append(pt.unsubscribe, unsubscribeExec)
+ pt.unsubscribe = append(pt.unsubscribe, unsubscribeExit)
+
+ err := pt.pm.Initialize(false)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// Stop unsubscribes from exec and exit events
+func (pt *ProcessTracker) Stop() {
+ for _, unsubscribe := range pt.unsubscribe {
+ unsubscribe()
+ }
+}
+
+func (pt *ProcessTracker) handleProcessStart(pid uint32) {
+ exePath := filepath.Join(pt.procRoot, strconv.FormatUint(uint64(pid), 10), "exe")
+
+ go pt.inspectBinary(exePath, pid)
+}
+
+func (pt *ProcessTracker) handleProcessStop(pid uint32) {
+ pt.unregisterProcess(pid)
+}
+
+func (pt *ProcessTracker) inspectBinary(exePath string, pid uint32) {
+ serviceName := getServiceName(pid)
+ if serviceName == "" {
+ // if the expected env vars are not set we don't inspect the binary
+ return
+ }
+ log.Info("Found instrumentation candidate", serviceName)
+ // binPath, err := os.Readlink(exePath)
+ // if err != nil {
+ // // /proc could be slow to update so we retry a few times
+ // end := time.Now().Add(10 * time.Millisecond)
+ // for end.After(time.Now()) {
+ // binPath, err = os.Readlink(exePath)
+ // if err == nil {
+ // break
+ // }
+ // time.Sleep(time.Millisecond)
+ // }
+ // }
+ // if err != nil {
+ // // we can't access the binary path here (pid probably ended already)
+ // // there is not much we can do, and we don't want to flood the logs
+ // log.Infof("cannot follow link %s -> %s, %s", exePath, binPath, err)
+ // // in docker, following the symlink does not work, but we can open the file in /proc
+ // // if we can't follow the symlink we try to open /proc directly
+ // // TODO: validate this approach
+ // binPath = exePath
+ // }
+
+ // TODO: switch to using exePath for the demo, use conditional logic above moving forward
+ binPath := exePath
+ f, err := os.Open(exePath)
+ if err != nil {
+ // this should be a debug log, but we want to know if this happens
+ log.Infof("could not open file %s, %s", binPath, err)
+ return
+ }
+ defer f.Close()
+
+ elfFile, err := elf.NewFile(f)
+ if err != nil {
+ log.Infof("file %s could not be parsed as an ELF file: %s", binPath, err)
+ return
+ }
+
+ noFuncs := make(map[string]bininspect.FunctionConfiguration)
+ noStructs := make(map[bininspect.FieldIdentifier]bininspect.StructLookupFunction)
+ _, err = bininspect.InspectNewProcessBinary(elfFile, noFuncs, noStructs)
+ if errors.Is(err, binversion.ErrNotGoExe) {
+ return
+ }
+ if err != nil {
+ log.Infof("error reading exe: %s", err)
+ return
+ }
+
+ var stat syscall.Stat_t
+ if err = syscall.Stat(binPath, &stat); err != nil {
+ log.Infof("could not stat binary path %s: %s", binPath, err)
+ return
+ }
+ binID := binaryID{
+ Id_major: unix.Major(stat.Dev),
+ Id_minor: unix.Minor(stat.Dev),
+ Ino: stat.Ino,
+ }
+ pt.registerProcess(binID, pid, stat.Mtim, binPath, serviceName)
+}
+
+func (pt *ProcessTracker) registerProcess(binID binaryID, pid pid, mTime syscall.Timespec, binaryPath string, serviceName string) {
+ pt.lock.Lock()
+ defer pt.lock.Unlock()
+
+ pt.processes[pid] = binID
+ if bin, ok := pt.binaries[binID]; ok {
+ // process that uses this binary already exists
+ bin.processCount++
+ } else {
+
+ pt.binaries[binID] = &runningBinary{
+ binID: binID,
+ mTime: mTime,
+ processCount: 1,
+ binaryPath: binaryPath,
+ serviceName: serviceName,
+ }
+ }
+ state := pt.currentState()
+ pt.callback(state)
+}
+
+func getServiceName(pid uint32) string {
+ envVars, _, err := utils.EnvVars([]string{"DD"}, pid, model.MaxArgsEnvsSize)
+ if err != nil {
+ return ""
+ }
+
+ serviceName := ""
+ diEnabled := false
+ for _, envVar := range envVars {
+ parts := strings.SplitN(envVar, "=", 2)
+ if len(parts) == 2 && parts[0] == "DD_SERVICE" {
+ serviceName = parts[1]
+ }
+ if len(parts) == 2 && parts[0] == "DD_DYNAMIC_INSTRUMENTATION_ENABLED" {
+ diEnabled = parts[1] == "true"
+ }
+ }
+
+ if !diEnabled {
+ return ""
+ }
+ return serviceName
+}
+
+func (pt *ProcessTracker) unregisterProcess(pid pid) {
+ pt.lock.Lock()
+ defer pt.lock.Unlock()
+
+ binID, ok := pt.processes[pid]
+ if !ok {
+ return
+ }
+ delete(pt.processes, pid)
+
+ bin, ok := pt.binaries[binID]
+ if !ok {
+ return
+ }
+ bin.processCount--
+ if bin.processCount == 0 {
+ delete(pt.binaries, binID)
+ state := pt.currentState()
+ pt.callback(state)
+ }
+}
+
+func (pt *ProcessTracker) currentState() map[ditypes.PID]*ditypes.ProcessInfo {
+ state := make(map[ditypes.PID]*ditypes.ProcessInfo)
+
+ for pid, binID := range pt.processes {
+ bin := pt.binaries[binID]
+ state[pid] = &ditypes.ProcessInfo{
+ PID: pid,
+ BinaryPath: bin.binaryPath,
+ ServiceName: bin.serviceName,
+
+ ProbesByID: make(map[ditypes.ProbeID]*ditypes.Probe),
+ InstrumentationUprobes: make(map[ditypes.ProbeID]*link.Link),
+ InstrumentationObjects: make(map[ditypes.ProbeID]*ebpf.Collection),
+ }
+ }
+ return state
+}
diff --git a/pkg/dynamicinstrumentation/proctracker/types.go b/pkg/dynamicinstrumentation/proctracker/types.go
new file mode 100644
index 0000000000000..a377cbef780d8
--- /dev/null
+++ b/pkg/dynamicinstrumentation/proctracker/types.go
@@ -0,0 +1,43 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux_bpf
+
+package proctracker
+
+import (
+ "syscall"
+
+ "github.com/DataDog/datadog-agent/pkg/network/protocols/http/gotls"
+)
+
+type pid = uint32
+
+type binaryID = gotls.TlsBinaryId
+
+type runningBinary struct {
+ // Inode number of the binary
+ binID binaryID
+
+ // Modification time of the hooked binary, at the time of hooking.
+ mTime syscall.Timespec
+
+ // Reference counter for the number of currently running processes for
+ // this binary.
+ processCount int32
+
+ // The location of the binary on the filesystem, as a string.
+ binaryPath string
+
+ // The value of DD_SERVICE for the given binary.
+ // Associating a service name with a binary is not correct because
+ // we may have the same binary running with different service names
+ // on the same machine. However, for simplicity in the prototype we
+ // assume a 1:1 mapping.
+ serviceName string
+}
+
+type binaries map[binaryID]*runningBinary
+type processes map[pid]binaryID
diff --git a/pkg/dynamicinstrumentation/ratelimiter/ratelimit.go b/pkg/dynamicinstrumentation/ratelimiter/ratelimit.go
new file mode 100644
index 0000000000000..0283c526c5c05
--- /dev/null
+++ b/pkg/dynamicinstrumentation/ratelimiter/ratelimit.go
@@ -0,0 +1,92 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux_bpf
+
+// Package ratelimiter implements a simple rate limiter used for tracking and limiting
+// the rate of events being produced per probe
+package ratelimiter
+
+import (
+ "math"
+
+ "golang.org/x/time/rate"
+)
+
+// SingleRateLimiter is a wrapper on top of golang.org/x/time/rate which implements a rate limiter but also
+// returns the effective rate of allowance.
+type SingleRateLimiter struct {
+ rate float64
+ limiter *rate.Limiter
+ droppedEvents int64
+ successfulEvents int64
+}
+
+// MultiProbeRateLimiter is used for tracking and limiting the rate of events
+// being produced for multiple probes
+type MultiProbeRateLimiter struct {
+ defaultRate float64
+ x map[string]*SingleRateLimiter
+}
+
+// NewMultiProbeRateLimiter creates a new MultiProbeRateLimiter
+func NewMultiProbeRateLimiter(defaultRatePerSecond float64) *MultiProbeRateLimiter {
+ return &MultiProbeRateLimiter{
+ defaultRate: defaultRatePerSecond,
+ x: map[string]*SingleRateLimiter{},
+ }
+}
+
+// SetRate sets the rate for events with a specific ID. Specify mps=0 to
+// disable rate limiting.
+func (mr *MultiProbeRateLimiter) SetRate(id string, mps float64) {
+ mr.x[id] = NewSingleEventRateLimiter(mps)
+}
+
+// AllowOneEvent is called to determine if an event should be allowed according to
+// the configured rate limit. It returns a bool to say allowed or not, then the number
+// of dropped events, and then the number of successful events
+func (mr *MultiProbeRateLimiter) AllowOneEvent(id string) (bool, int64, int64) {
+ rateLimiter, ok := mr.x[id]
+ if !ok {
+ mr.SetRate(id, mr.defaultRate)
+ rateLimiter = mr.x[id]
+ }
+ return rateLimiter.AllowOneEvent(),
+ rateLimiter.droppedEvents, rateLimiter.successfulEvents
+}
+
+// NewSingleEventRateLimiter returns a rate limiter which restricts the number of single events sampled per second.
+// This defaults to infinite, allow all behaviour. The MaxPerSecond value of the rule may override the default.
+func NewSingleEventRateLimiter(mps float64) *SingleRateLimiter {
+ limit := math.MaxFloat64
+ if mps > 0 {
+ limit = mps
+ }
+ return &SingleRateLimiter{
+ rate: mps,
+ limiter: rate.NewLimiter(rate.Limit(limit), int(math.Ceil(limit))),
+ }
+}
+
+// AllowOneEvent returns the rate limiter's decision to allow an event to be processed, and the
+// effective rate at the time it is called. The effective rate is computed by averaging the rate
+// for the previous second with the current rate
+func (r *SingleRateLimiter) AllowOneEvent() bool {
+
+ if r.rate == 0 {
+ return true
+ }
+
+ var sampled = false
+ if r.limiter.Allow() {
+ sampled = true
+ r.successfulEvents++
+ } else {
+ r.droppedEvents++
+ }
+
+ return sampled
+}
diff --git a/pkg/dynamicinstrumentation/ratelimiter/ratelimit_test.go b/pkg/dynamicinstrumentation/ratelimiter/ratelimit_test.go
new file mode 100644
index 0000000000000..88cc21aa199a0
--- /dev/null
+++ b/pkg/dynamicinstrumentation/ratelimiter/ratelimit_test.go
@@ -0,0 +1,48 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux_bpf
+
+package ratelimiter
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestRateLimit(t *testing.T) {
+
+ testCases := []struct {
+ name string
+ limitPerSecond float64
+ }{
+ {
+ name: "expected1",
+ limitPerSecond: 1.0,
+ },
+ {
+ name: "expected2",
+ limitPerSecond: 5.0,
+ },
+ }
+
+ for _, testcase := range testCases {
+
+ const timesToRun = 10000
+ t.Run(testcase.name, func(t *testing.T) {
+
+ r := NewSingleEventRateLimiter(testcase.limitPerSecond)
+
+ for i := 0; i < timesToRun; i++ {
+ r.AllowOneEvent()
+ }
+
+ assert.Equal(t, float64(timesToRun-float64(r.droppedEvents)), testcase.limitPerSecond)
+ assert.Equal(t, float64(r.droppedEvents), timesToRun-testcase.limitPerSecond)
+ assert.Equal(t, float64(r.successfulEvents), testcase.limitPerSecond)
+ })
+ }
+}
diff --git a/pkg/dynamicinstrumentation/ringbufconsumer.go b/pkg/dynamicinstrumentation/ringbufconsumer.go
new file mode 100644
index 0000000000000..ed6c574377ce5
--- /dev/null
+++ b/pkg/dynamicinstrumentation/ringbufconsumer.go
@@ -0,0 +1,64 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux_bpf
+
+package dynamicinstrumentation
+
+import (
+ "fmt"
+
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes"
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/eventparser"
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ratelimiter"
+ "github.com/cilium/ebpf/ringbuf"
+)
+
+// startRingbufferConsumer opens the pinned bpf ringbuffer map
+func (goDI *GoDI) startRingbufferConsumer() (func(), error) {
+ r, err := ringbuf.NewReader(ditypes.EventsRingbuffer)
+ if err != nil {
+ return nil, fmt.Errorf("couldn't set up reader for ringbuffer: %w", err)
+ }
+
+ var (
+ record ringbuf.Record
+ closed = false
+ )
+
+ closeFunc := func() {
+ closed = true
+ r.Close()
+ }
+
+ // TODO: ensure rate limiters are removed once probes are removed
+ rateLimiters := ratelimiter.NewMultiProbeRateLimiter(1.0)
+ rateLimiters.SetRate(ditypes.ConfigBPFProbeID, 0)
+
+ go func() {
+ for {
+ if closed {
+ break
+ }
+ err = r.ReadInto(&record)
+ if err != nil {
+ log.Infof("couldn't read event off ringbuffer: %s", err.Error())
+ continue
+ }
+
+ event := eventparser.ParseEvent(record.RawSample, rateLimiters)
+ if event == nil {
+ continue
+ }
+ goDI.stats.PIDEventsCreatedCount[event.PID]++
+ goDI.stats.ProbeEventsCreatedCount[event.ProbeID]++
+ goDI.processEvent(event)
+ }
+ }()
+
+ return closeFunc, nil
+}
diff --git a/pkg/dynamicinstrumentation/uploader/di_log_converter.go b/pkg/dynamicinstrumentation/uploader/di_log_converter.go
new file mode 100644
index 0000000000000..8f6d3063dc7bf
--- /dev/null
+++ b/pkg/dynamicinstrumentation/uploader/di_log_converter.go
@@ -0,0 +1,159 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux_bpf
+
+package uploader
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes"
+
+ "github.com/google/uuid"
+)
+
+// NewDILog creates a new snapshot upload based on the event and relevant process
+func NewDILog(procInfo *ditypes.ProcessInfo, event *ditypes.DIEvent) *ditypes.SnapshotUpload {
+ if procInfo == nil {
+ log.Infof("Process with pid %d not found, ignoring event", event.PID)
+ return nil
+ }
+ probe := procInfo.GetProbe(event.ProbeID)
+ if probe == nil {
+ log.Info("Probe ID not found, ignoring event", event.ProbeID)
+ return nil
+ }
+
+ snapshotID, _ := uuid.NewUUID()
+ argDefs := getFunctionArguments(procInfo, probe)
+ var captures ditypes.Captures
+ if probe.InstrumentationInfo.InstrumentationOptions.CaptureParameters {
+ captures = convertCaptures(argDefs, event.Argdata)
+ } else {
+ captures = reportCaptureError(argDefs)
+ }
+
+ capturesJSON, _ := json.Marshal(captures)
+ stackTrace, err := parseStackTrace(procInfo, event.StackPCs)
+ if err != nil {
+ log.Infof("event from pid/probe %d/%s does not include stack trace: %s\n", event.PID, event.ProbeID, err)
+ }
+ return &ditypes.SnapshotUpload{
+ Service: probe.ServiceName,
+ Message: fmt.Sprintf("%s %s", probe.FuncName, capturesJSON),
+ DDSource: "dd_debugger",
+ DDTags: "",
+ Debugger: struct {
+ ditypes.Snapshot `json:"snapshot"`
+ }{
+ Snapshot: ditypes.Snapshot{
+ ID: &snapshotID,
+ Timestamp: time.Now().UnixNano() / int64(time.Millisecond),
+ Language: "go",
+ ProbeInSnapshot: convertProbe(probe),
+ Captures: captures,
+ Stack: stackTrace,
+ },
+ },
+ Duration: 0,
+ }
+}
+
+func convertProbe(probe *ditypes.Probe) ditypes.ProbeInSnapshot {
+ module, function := parseFuncName(probe.FuncName)
+ return ditypes.ProbeInSnapshot{
+ ID: getProbeUUID(probe.ID),
+ ProbeLocation: ditypes.ProbeLocation{
+ Method: function,
+ Type: module,
+ },
+ }
+}
+
+func convertCaptures(defs []ditypes.Parameter, captures []*ditypes.Param) ditypes.Captures {
+ return ditypes.Captures{
+ Entry: &ditypes.Capture{
+ Arguments: convertArgs(defs, captures),
+ },
+ }
+}
+
+func reportCaptureError(defs []ditypes.Parameter) ditypes.Captures {
+ args := make(map[string]*ditypes.CapturedValue)
+ for _, def := range defs {
+ args[def.Name] = &ditypes.CapturedValue{
+ Type: def.Type,
+ NotCapturedReason: "Failed to instrument, type is unsupported or too complex",
+ }
+ }
+ return ditypes.Captures{
+ Entry: &ditypes.Capture{
+ Arguments: args,
+ },
+ }
+}
+
+func convertArgs(defs []ditypes.Parameter, captures []*ditypes.Param) map[string]*ditypes.CapturedValue {
+ args := make(map[string]*ditypes.CapturedValue)
+ for idx, capture := range captures {
+ var argName string
+ if idx < len(defs) {
+ argName = defs[idx].Name
+ } else {
+ argName = fmt.Sprintf("arg_%d", idx)
+ }
+
+ if capture == nil {
+ continue
+ }
+
+ cv := &ditypes.CapturedValue{Type: capture.Type}
+ if capture.ValueStr != "" || capture.Type == "string" {
+ // we make a copy of the string so the pointer isn't overwritten in the loop
+ valueCopy := capture.ValueStr
+ cv.Value = &valueCopy
+ }
+ if capture.Fields != nil && idx < len(defs) {
+ cv.Fields = convertArgs(defs[idx].ParameterPieces, capture.Fields)
+ }
+ args[argName] = cv
+ }
+ return args
+}
+
+func parseFuncName(funcName string) (string, string) {
+ parts := strings.Split(funcName, ".")
+ if len(parts) == 2 {
+ return parts[0], parts[1]
+ }
+ return "", funcName
+}
+
+func getFunctionArguments(proc *ditypes.ProcessInfo, probe *ditypes.Probe) []ditypes.Parameter {
+ return proc.TypeMap.Functions[probe.FuncName]
+}
+
+func getProbeUUID(probeID string) string {
+ // the RC config ID format is datadog///_/
+ // if we fail to parse it, we just return the original probeID string
+ parts := strings.Split(probeID, "/")
+ if len(parts) != 5 {
+ return probeID
+ }
+ idPart := parts[len(parts)-2]
+ parts = strings.Split(idPart, "_")
+ if len(parts) != 2 {
+ return probeID
+ }
+ // we could also validate that the extracted string is a valid UUID,
+ // but it's not necessary since we tolerate IDs that don't parse
+ return parts[1]
+}
diff --git a/pkg/dynamicinstrumentation/uploader/offline.go b/pkg/dynamicinstrumentation/uploader/offline.go
new file mode 100644
index 0000000000000..a1d19375ebf47
--- /dev/null
+++ b/pkg/dynamicinstrumentation/uploader/offline.go
@@ -0,0 +1,83 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux_bpf
+
+package uploader
+
+import (
+ "encoding/json"
+ "os"
+ "sync"
+
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/diagnostics"
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes"
+)
+
+// OfflineSerializer is used for serializing events and printing instead of
+// uploading to the DataDog backend
+type OfflineSerializer[T any] struct {
+ outputFile *os.File
+ mu sync.Mutex
+}
+
+// NewOfflineLogSerializer creates an offline serializer for serializing events and printing instead of
+// uploading to the DataDog backend
+func NewOfflineLogSerializer(outputPath string) (*OfflineSerializer[ditypes.SnapshotUpload], error) {
+ if outputPath == "" {
+ panic("No snapshot output path set")
+ }
+ return NewOfflineSerializer[ditypes.SnapshotUpload](outputPath)
+}
+
+// NewOfflineDiagnosticSerializer creates an offline serializer for serializing diagnostic information
+// and printing instead of uploading to the DataDog backend
+func NewOfflineDiagnosticSerializer(dm *diagnostics.DiagnosticManager, outputPath string) (*OfflineSerializer[ditypes.DiagnosticUpload], error) {
+ if outputPath == "" {
+ panic("No diagnostic output path set")
+ }
+ ds, err := NewOfflineSerializer[ditypes.DiagnosticUpload](outputPath)
+ if err != nil {
+ return nil, err
+ }
+ go func() {
+ for diagnostic := range dm.Updates {
+ ds.Enqueue(diagnostic)
+ }
+ }()
+ return ds, nil
+}
+
+// NewOfflineSerializer is the generic create method for offline serialization
+// of events or diagnostic output
+func NewOfflineSerializer[T any](outputPath string) (*OfflineSerializer[T], error) {
+ file, err := os.OpenFile(outputPath, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0644)
+ if err != nil {
+ return nil, err
+ }
+ u := &OfflineSerializer[T]{
+ outputFile: file,
+ }
+ return u, nil
+}
+
+// Enqueue writes data to the offline serializer
+func (s *OfflineSerializer[T]) Enqueue(item *T) bool {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ bs, err := json.Marshal(item)
+ if err != nil {
+ log.Info("Failed to marshal item", item)
+ return false
+ }
+
+ _, err = s.outputFile.WriteString(string(bs) + "\n")
+ if err != nil {
+ log.Error(err)
+ }
+ return true
+}
diff --git a/pkg/dynamicinstrumentation/uploader/stack_trace.go b/pkg/dynamicinstrumentation/uploader/stack_trace.go
new file mode 100644
index 0000000000000..f428e2c40e0d2
--- /dev/null
+++ b/pkg/dynamicinstrumentation/uploader/stack_trace.go
@@ -0,0 +1,151 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux_bpf
+
+package uploader
+
+import (
+ "cmp"
+ "debug/dwarf"
+ "errors"
+ "fmt"
+ "slices"
+
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes"
+)
+
+// parseStackTrace parses a raw byte array into 10 uint64 program counters
+// which then get resolved into strings representing lines of a stack trace
+func parseStackTrace(procInfo *ditypes.ProcessInfo, rawProgramCounters []uint64) ([]ditypes.StackFrame, error) {
+ stackTrace := make([]ditypes.StackFrame, 0)
+ if procInfo == nil {
+ return stackTrace, errors.New("nil process info")
+ }
+
+ for i := range rawProgramCounters {
+ if rawProgramCounters[i] == 0 {
+ break
+ }
+
+ entries, ok := procInfo.TypeMap.InlinedFunctions[rawProgramCounters[i]]
+ if ok {
+ for n := range entries {
+ inlinedFuncInfo, err := pcToLine(procInfo, rawProgramCounters[i])
+ if err != nil {
+ return stackTrace, fmt.Errorf("could not resolve pc to inlined function info: %w", err)
+ }
+
+ symName, lineNumber, err := parseInlinedEntry(procInfo.DwarfData.Reader(), entries[n])
+ if err != nil {
+ return stackTrace, fmt.Errorf("could not get inlined entries: %w", err)
+ }
+ stackFrame := ditypes.StackFrame{Function: fmt.Sprintf("%s [inlined in %s]", symName, inlinedFuncInfo.fn), FileName: inlinedFuncInfo.file, Line: int(lineNumber)}
+ stackTrace = append(stackTrace, stackFrame)
+ }
+ }
+
+ funcInfo, err := pcToLine(procInfo, rawProgramCounters[i])
+ if err != nil {
+ return stackTrace, fmt.Errorf("could not resolve pc to function info: %w", err)
+ }
+ stackFrame := ditypes.StackFrame{Function: funcInfo.fn, FileName: funcInfo.file, Line: int(funcInfo.line)}
+ stackTrace = append(stackTrace, stackFrame)
+
+ if funcInfo.fn == "main.main" {
+ break
+ }
+ }
+ return stackTrace, nil
+}
+
+type funcInfo struct {
+ file string
+ line int64
+ fn string
+}
+
+func pcToLine(procInfo *ditypes.ProcessInfo, pc uint64) (*funcInfo, error) {
+
+ var (
+ file string
+ line int64
+ fn string
+ )
+
+ typeMap := procInfo.TypeMap
+
+ functionIndex, _ := slices.BinarySearchFunc(typeMap.FunctionsByPC, &ditypes.LowPCEntry{LowPC: pc}, func(a, b *ditypes.LowPCEntry) int {
+ return cmp.Compare(b.LowPC, a.LowPC)
+ })
+
+ var fileNumber int64
+
+ if functionIndex >= len(typeMap.FunctionsByPC) {
+ return nil, fmt.Errorf("invalid function index")
+ }
+ funcEntry := typeMap.FunctionsByPC[functionIndex].Entry
+ for _, field := range funcEntry.Field {
+ if field.Attr == dwarf.AttrName {
+ fn = field.Val.(string)
+ }
+ if field.Attr == dwarf.AttrDeclFile {
+ fileNumber = field.Val.(int64)
+ }
+ if field.Attr == dwarf.AttrDeclLine {
+ line = field.Val.(int64)
+ }
+ }
+
+ compileUnitIndex, _ := slices.BinarySearchFunc(typeMap.DeclaredFiles, &ditypes.LowPCEntry{LowPC: pc}, func(a, b *ditypes.LowPCEntry) int {
+ return cmp.Compare(b.LowPC, a.LowPC)
+ })
+
+ compileUnitEntry := typeMap.DeclaredFiles[compileUnitIndex].Entry
+
+ cuLineReader, err := procInfo.DwarfData.LineReader(compileUnitEntry)
+ if err != nil {
+ return nil, fmt.Errorf("could not get file line reader for compile unit: %w", err)
+ }
+ files := cuLineReader.Files()
+ if len(files) < int(fileNumber) {
+ return nil, fmt.Errorf("invalid file number in dwarf function entry associated with compile unit")
+ }
+
+ file = files[fileNumber].Name
+
+ return &funcInfo{
+ file: file,
+ line: line,
+ fn: fn,
+ }, nil
+}
+
+func parseInlinedEntry(reader *dwarf.Reader, e *dwarf.Entry) (name string, line int64, err error) {
+
+ var offset dwarf.Offset
+
+ for i := range e.Field {
+ if e.Field[i].Attr == dwarf.AttrAbstractOrigin {
+ offset = e.Field[i].Val.(dwarf.Offset)
+ reader.Seek(offset)
+ entry, err := reader.Next()
+ if err != nil {
+ return "", -1, fmt.Errorf("could not read inlined function origin: %w", err)
+ }
+ for j := range entry.Field {
+ if entry.Field[j].Attr == dwarf.AttrName {
+ name = entry.Field[j].Val.(string)
+ }
+ }
+ }
+
+ if e.Field[i].Attr == dwarf.AttrCallLine {
+ line = e.Field[i].Val.(int64)
+ }
+ }
+
+ return name, line, nil
+}
diff --git a/pkg/dynamicinstrumentation/uploader/uploader.go b/pkg/dynamicinstrumentation/uploader/uploader.go
new file mode 100644
index 0000000000000..f14fa8233e0a4
--- /dev/null
+++ b/pkg/dynamicinstrumentation/uploader/uploader.go
@@ -0,0 +1,221 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux_bpf
+
+// Package uploader provides functionality for uploading events and diagnostic
+// information to the DataDog backend
+package uploader
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "mime/multipart"
+ "net/http"
+ "net/textproto"
+ "os"
+ "time"
+
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/diagnostics"
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes"
+)
+
+// LogUploader is the interface for uploading Dynamic Instrumentation logs
+type LogUploader interface {
+ Enqueue(item *ditypes.SnapshotUpload) bool
+}
+
+// DiagnosticUploader is the interface for uploading Dynamic Instrumentation
+// diagnostic information
+type DiagnosticUploader interface {
+ Enqueue(item *ditypes.DiagnosticUpload) bool
+}
+
+// Uploader is a generic form of uploader functionality
+type Uploader[T any] struct {
+ buffer chan *T
+ client *http.Client
+
+ batchSize int
+ uploadMode UploadMode
+}
+
+// UploadMode reflects the kind of data that is being uploaded
+type UploadMode bool
+
+const (
+ // UploadModeDiagnostic means the data being uploaded is diagnostic information
+ UploadModeDiagnostic UploadMode = true
+ //UploadModeLog means the data being uploaded is logs
+ UploadModeLog UploadMode = false
+)
+
+func startDiagnosticUploader(dm *diagnostics.DiagnosticManager) *Uploader[ditypes.DiagnosticUpload] {
+ u := NewUploader[ditypes.DiagnosticUpload](UploadModeDiagnostic)
+ go func() {
+ for diagnostic := range dm.Updates {
+ u.Enqueue(diagnostic)
+ }
+ }()
+ return u
+}
+
+// NewLogUploader creates a new log uploader
+func NewLogUploader() *Uploader[ditypes.SnapshotUpload] {
+ return NewUploader[ditypes.SnapshotUpload](UploadModeLog)
+}
+
+// NewDiagnosticUploader creates a new diagnostic uploader
+func NewDiagnosticUploader() *Uploader[ditypes.DiagnosticUpload] {
+ return startDiagnosticUploader(diagnostics.Diagnostics)
+}
+
+// NewUploader creates a new uploader of a specified generic type
+func NewUploader[T any](mode UploadMode) *Uploader[T] {
+ u := &Uploader[T]{
+ buffer: make(chan *T, 100),
+ client: &http.Client{},
+
+ batchSize: 100,
+ uploadMode: mode,
+ }
+ go u.processBuffer()
+ return u
+}
+
+// Enqueue enqueues data to be uploaded. It's return value reflects whether
+// or not the upload queue was full
+func (u *Uploader[T]) Enqueue(item *T) bool {
+ select {
+ case u.buffer <- item:
+ return true
+ default:
+ log.Infof("Uploader buffer full, dropping message %+v", item)
+ return false
+ }
+}
+
+func (u *Uploader[T]) processBuffer() {
+ flushTimer := time.NewTicker(1 * time.Second)
+ defer flushTimer.Stop()
+
+ batch := make([]*T, 0, 5)
+
+ for {
+ select {
+ case item := <-u.buffer:
+ batch = append(batch, item)
+ if len(batch) >= u.batchSize {
+ batchCopy := make([]*T, len(batch))
+ copy(batchCopy, batch)
+ go u.uploadBatch(batchCopy)
+ batch = batch[:0]
+ flushTimer.Reset(1 * time.Second)
+ }
+ case <-flushTimer.C:
+ if len(batch) > 0 {
+ batchCopy := make([]*T, len(batch))
+ copy(batchCopy, batch)
+ go u.uploadBatch(batchCopy)
+ batch = batch[:0]
+ }
+ flushTimer.Reset(1 * time.Second)
+ }
+ }
+}
+
+func (u *Uploader[T]) uploadBatch(batch []*T) {
+ switch u.uploadMode {
+ case UploadModeDiagnostic:
+ u.uploadDiagnosticBatch(batch)
+ case UploadModeLog:
+ u.uploadLogBatch(batch)
+ }
+}
+
+// there's no need to do endpoint discovery, we can just hardcode the URLs
+// it's guaranteed that if datadog-agent has Go DI it will also have the proxy upload endpoints
+
+func (u *Uploader[T]) uploadLogBatch(batch []*T) {
+ // TODO: find out if there are more efficient ways of sending logs to the backend
+ // this is the way all other DI runtimes upload data
+ url := fmt.Sprintf("http://%s:8126/debugger/v1/input", getAgentHost())
+ body, _ := json.Marshal(batch)
+ req, err := http.NewRequest("POST", url, bytes.NewReader(body))
+ if err != nil {
+ log.Info("Failed to build request", err)
+ return
+ }
+ req.Header.Set("Content-Type", "application/json")
+
+ resp, err := u.client.Do(req)
+ if err != nil {
+ log.Info("Error uploading log batch", err)
+ return
+ }
+ defer resp.Body.Close()
+ log.Info("HTTP", resp.StatusCode, url)
+}
+
+func (u *Uploader[T]) uploadDiagnosticBatch(batch []*T) {
+ url := fmt.Sprintf("http://%s:8126/debugger/v1/diagnostics", getAgentHost())
+
+ // Create a buffer to hold the multipart form data
+ var b bytes.Buffer
+ w := multipart.NewWriter(&b)
+
+ diagnosticJSON, err := json.Marshal(batch)
+ if err != nil {
+ log.Info("Failed to marshal diagnostic batch", err, batch)
+ return
+ }
+
+ header := make(textproto.MIMEHeader)
+ header.Set("Content-Disposition", `form-data; name="event"; filename="event.json"`)
+ header.Set("Content-Type", "application/json")
+ fw, err := w.CreatePart(header)
+ if err != nil {
+ log.Info("Failed to create form file", err)
+ return
+ }
+
+ // Write the JSON data to the form-data part
+ if _, err = fw.Write(diagnosticJSON); err != nil {
+ log.Info("Failed to write data to form file", err)
+ return
+ }
+
+ // Close the multipart writer, otherwise the request will be missing the terminating boundary.
+ w.Close()
+
+ // Create a new request
+ req, err := http.NewRequest("POST", url, &b)
+ if err != nil {
+ log.Info("Failed to build request", err)
+ return
+ }
+
+ // Set the content type to multipart/form-data and include the boundary
+ req.Header.Set("Content-Type", w.FormDataContentType())
+ resp, err := u.client.Do(req)
+ if err != nil {
+ log.Info("Error uploading diagnostic batch", err)
+ return
+ }
+ defer resp.Body.Close()
+
+ log.Info("HTTP", resp.StatusCode, url)
+}
+
+func getAgentHost() string {
+ ddAgentHost := os.Getenv("DD_AGENT_HOST")
+ if ddAgentHost == "" {
+ ddAgentHost = "localhost"
+ }
+ return ddAgentHost
+}
diff --git a/pkg/dynamicinstrumentation/util/file_watcher.go b/pkg/dynamicinstrumentation/util/file_watcher.go
new file mode 100644
index 0000000000000..ea05116ea4b24
--- /dev/null
+++ b/pkg/dynamicinstrumentation/util/file_watcher.go
@@ -0,0 +1,61 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux_bpf
+
+package util
+
+import (
+ "os"
+ "time"
+
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+)
+
+// FileWatcher is used to track updates to a particular filepath
+type FileWatcher struct {
+ filePath string
+}
+
+// NewFileWatcher creates a FileWatcher to track updates to a specified file
+func NewFileWatcher(filePath string) *FileWatcher {
+ return &FileWatcher{filePath: filePath}
+}
+
+func (fw *FileWatcher) readFile() ([]byte, error) {
+ content, err := os.ReadFile(fw.filePath)
+ if err != nil {
+ return nil, err
+ }
+ return content, nil
+}
+
+// Watch watches the target file for changes and returns a channel that will receive
+// the file's content whenever it changes.
+// The initial implementation used fsnotify, but this was losing update events when running
+// e2e tests - this simpler implementation behaves as expected, even if it's less efficient.
+// Since this is meant to be used only for testing and development, it's fine to keep this
+// implementation.
+func (fw *FileWatcher) Watch() (<-chan []byte, error) {
+ updateChan := make(chan []byte)
+ prevContent := []byte{}
+ ticker := time.NewTicker(100 * time.Millisecond)
+ go func() {
+ defer close(updateChan)
+ for range ticker.C {
+ content, err := fw.readFile()
+ if err != nil {
+ log.Infof("Error reading file %s: %s", fw.filePath, err)
+ return
+ }
+ if len(content) > 0 && string(content) != string(prevContent) {
+ prevContent = content
+ updateChan <- content
+ }
+ }
+ }()
+
+ return updateChan, nil
+}
diff --git a/pkg/dynamicinstrumentation/util/file_watcher_test.go b/pkg/dynamicinstrumentation/util/file_watcher_test.go
new file mode 100644
index 0000000000000..894395b9f61c0
--- /dev/null
+++ b/pkg/dynamicinstrumentation/util/file_watcher_test.go
@@ -0,0 +1,110 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux_bpf
+
+package util
+
+import (
+ "io/fs"
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestFileWatcherMultipleFiles(t *testing.T) {
+ // create two temporary files
+ f1, _ := os.CreateTemp("", "file-watcher-test-")
+ f2, _ := os.CreateTemp("", "file-watcher-test-")
+ defer f1.Close()
+ defer f2.Close()
+ defer os.Remove(f1.Name())
+ defer os.Remove(f2.Name())
+
+ // get the absolute path for both files
+ fp1, _ := filepath.Abs(f1.Name())
+ fp2, _ := filepath.Abs(f2.Name())
+
+ // initialize file contents
+ os.WriteFile(fp1, []byte("This is file 1"), fs.ModeAppend)
+ os.WriteFile(fp2, []byte("This is file 2"), fs.ModeAppend)
+
+ // initialize file watchers
+ fw1 := NewFileWatcher(fp1)
+ fw2 := NewFileWatcher(fp2)
+
+ ch1, err := fw1.Watch()
+ assert.NoError(t, err)
+ ch2, err := fw2.Watch()
+ assert.NoError(t, err)
+
+ fc1 := <-ch1
+ assert.Equal(t, "This is file 1", string(fc1))
+ fc2 := <-ch2
+ assert.Equal(t, "This is file 2", string(fc2))
+
+ os.WriteFile(fp1, []byte("Updated file 1"), fs.ModeAppend)
+ os.WriteFile(fp2, []byte("Updated file 2"), fs.ModeAppend)
+
+ fc1 = <-ch1
+ assert.Equal(t, "Updated file 1", string(fc1))
+ fc2 = <-ch2
+ assert.Equal(t, "Updated file 2", string(fc2))
+}
+
+func TestFileWatcherDeletedFile(t *testing.T) {
+ timeout := time.After(1 * time.Second)
+ done := make(chan bool)
+ go func() {
+ f, _ := os.CreateTemp("", "file-watcher-delete-test-")
+ defer f.Close()
+ defer os.Remove(f.Name())
+
+ fp, _ := filepath.Abs(f.Name())
+ os.WriteFile(fp, []byte("Initial"), fs.ModeAppend)
+
+ info, err := os.Stat(f.Name())
+ if err != nil {
+ panic(err)
+ }
+ m := info.Mode()
+
+ fw := NewFileWatcher(fp)
+ ch, err := fw.Watch()
+ assert.NoError(t, err)
+
+ fc := <-ch
+ assert.Equal(t, "Initial", string(fc))
+
+ // delete file and check that we are still receiving updates
+ os.Remove(f.Name())
+ os.WriteFile(fp, []byte("Updated"), fs.ModeAppend)
+ err = os.Chmod(fp, m)
+ assert.NoError(t, err)
+
+ info, err = os.Stat(f.Name())
+ if err != nil {
+ panic(err)
+ }
+ m = info.Mode()
+ log.Info(m)
+
+ fc, ok := <-ch
+ assert.True(t, ok, "expected channel to be open")
+ assert.Equal(t, "Updated", string(fc), "expected to receive new file contents on channel")
+ done <- true
+ }()
+
+ select {
+ case <-timeout:
+ t.Fatal("Timeout exceeded")
+ case <-done:
+ }
+}
diff --git a/pkg/ebpf/bytecode/runtime/.gitignore b/pkg/ebpf/bytecode/runtime/.gitignore
index a4383358ec72f..9b4fc67872634 100644
--- a/pkg/ebpf/bytecode/runtime/.gitignore
+++ b/pkg/ebpf/bytecode/runtime/.gitignore
@@ -1 +1,14 @@
*.d
+
+# runtime compilation asset integrity files
+conntrack.go
+dynamicinstrumentation.go
+http.go
+logdebug-test.go
+offsetguess-test.go
+oom-kill.go
+runtime-security.go
+shared-libraries.go
+tcp-queue-length.go
+tracer.go
+usm.go
diff --git a/pkg/ebpf/bytecode/runtime/asset.go b/pkg/ebpf/bytecode/runtime/asset.go
index 2d0812368b5b2..0c0add2da5702 100644
--- a/pkg/ebpf/bytecode/runtime/asset.go
+++ b/pkg/ebpf/bytecode/runtime/asset.go
@@ -8,7 +8,9 @@
package runtime
import (
+ "bytes"
"crypto/sha256"
+ "encoding/hex"
"fmt"
"io"
"os"
@@ -37,34 +39,59 @@ func newAsset(filename, hash string) *asset {
}
}
+// CompileOptions are options used to compile eBPF programs at runtime
+type CompileOptions struct {
+ // AdditionalFlags are extra flags passed to clang
+ AdditionalFlags []string
+ // ModifyCallback is a callback function that is allowed to modify the contents before compilation
+ ModifyCallback func(in io.Reader, out io.Writer) error
+ // StatsdClient is a statsd client to use for telemetry
+ StatsdClient statsd.ClientInterface
+ // UseKernelHeaders enables the inclusion of kernel headers from the host
+ UseKernelHeaders bool
+}
+
// Compile compiles the asset to an object file, writes it to the configured output directory, and
// then opens and returns the compiled output
func (a *asset) Compile(config *ebpf.Config, additionalFlags []string, client statsd.ClientInterface) (CompiledOutput, error) {
+ return a.compile(config, CompileOptions{AdditionalFlags: additionalFlags, StatsdClient: client, UseKernelHeaders: true})
+}
+
+// CompileWithOptions is the same as Compile, but takes an options struct with additional choices.
+func (a *asset) CompileWithOptions(config *ebpf.Config, opts CompileOptions) (CompiledOutput, error) {
+ return a.compile(config, opts)
+}
+
+func (a *asset) compile(config *ebpf.Config, opts CompileOptions) (CompiledOutput, error) {
log.Debugf("starting runtime compilation of %s", a.filename)
start := time.Now()
a.tm.compilationEnabled = true
defer func() {
a.tm.compilationDuration = time.Since(start)
- if client != nil {
- a.tm.SubmitTelemetry(a.filename, client)
+ if opts.StatsdClient != nil {
+ a.tm.SubmitTelemetry(a.filename, opts.StatsdClient)
}
}()
- opts := kernel.HeaderOptions{
- DownloadEnabled: config.EnableKernelHeaderDownload,
- Dirs: config.KernelHeadersDirs,
- DownloadDir: config.KernelHeadersDownloadDir,
- AptConfigDir: config.AptConfigDir,
- YumReposDir: config.YumReposDir,
- ZypperReposDir: config.ZypperReposDir,
- }
- kernelHeaders := kernel.GetKernelHeaders(opts, client)
- if len(kernelHeaders) == 0 {
- a.tm.compilationResult = headerFetchErr
- return nil, fmt.Errorf("unable to find kernel headers")
+ var kernelHeaders []string
+ if opts.UseKernelHeaders {
+ headerOpts := kernel.HeaderOptions{
+ DownloadEnabled: config.EnableKernelHeaderDownload,
+ Dirs: config.KernelHeadersDirs,
+ DownloadDir: config.KernelHeadersDownloadDir,
+ AptConfigDir: config.AptConfigDir,
+ YumReposDir: config.YumReposDir,
+ ZypperReposDir: config.ZypperReposDir,
+ }
+ kernelHeaders = kernel.GetKernelHeaders(headerOpts, opts.StatsdClient)
+ if len(kernelHeaders) == 0 {
+ a.tm.compilationResult = headerFetchErr
+ return nil, fmt.Errorf("unable to find kernel headers")
+ }
}
+ a.tm.compilationResult = verificationError
outputDir := config.RuntimeCompilerOutputDir
p := filepath.Join(config.BPFDir, "runtime", a.filename)
@@ -78,22 +105,61 @@ func (a *asset) Compile(config *ebpf.Config, additionalFlags []string, client st
return nil, fmt.Errorf("unable to create compiler output directory %s: %w", outputDir, err)
}
- protectedFile, err := createProtectedFile(fmt.Sprintf("%s-%s", a.filename, a.hash), outputDir, f)
+ diskProtectedFile, err := createProtectedFile(fmt.Sprintf("%s-%s", a.filename, a.hash), outputDir, f)
if err != nil {
return nil, fmt.Errorf("failed to create ram backed file from %s: %w", f.Name(), err)
}
defer func() {
- if err := protectedFile.Close(); err != nil {
- log.Debugf("error closing protected file %s: %s", protectedFile.Name(), err)
+ if err := diskProtectedFile.Close(); err != nil {
+ log.Debugf("error closing protected file %s: %s", diskProtectedFile.Name(), err)
}
}()
+ protectedFile := diskProtectedFile
+ hash := a.hash
- if err = a.verify(protectedFile); err != nil {
- a.tm.compilationResult = verificationError
+ if err = a.verify(diskProtectedFile); err != nil {
return nil, fmt.Errorf("error reading input file: %s", err)
}
- out, result, err := compileToObjectFile(protectedFile.Name(), outputDir, a.filename, a.hash, additionalFlags, kernelHeaders)
+ a.tm.compilationResult = compilationErr
+ if opts.ModifyCallback != nil {
+ outBuf := &bytes.Buffer{}
+ // seek to the start and read all of protected file contents
+ if _, err := diskProtectedFile.Seek(0, io.SeekStart); err != nil {
+ return nil, fmt.Errorf("seek disk protected file: %w", err)
+ }
+
+ // run modify callback
+ if err := opts.ModifyCallback(diskProtectedFile, outBuf); err != nil {
+ return nil, fmt.Errorf("modify callback: %w", err)
+ }
+ outReader := bytes.NewReader(outBuf.Bytes())
+
+ // update hash
+ hash, err = sha256Reader(outReader)
+ if err != nil {
+ return nil, fmt.Errorf("hash post-modification protected file: %w", err)
+ }
+ if _, err := outReader.Seek(0, io.SeekStart); err != nil {
+ return nil, fmt.Errorf("seek post-modification contents: %w", err)
+ }
+
+ // create new protected file with the post-modification contents
+ postModifyProtectedFile, err := createProtectedFile(fmt.Sprintf("%s-%s", a.filename, hash), outputDir, outReader)
+ if err != nil {
+ return nil, fmt.Errorf("create post-modification protected file: %w", err)
+ }
+ defer func() {
+ if err := postModifyProtectedFile.Close(); err != nil {
+ log.Debugf("close post-modification protected file %s: %s", postModifyProtectedFile.Name(), err)
+ }
+ }()
+
+ // set compilation to use post-modification contents
+ protectedFile = postModifyProtectedFile
+ }
+
+ out, result, err := compileToObjectFile(protectedFile.Name(), outputDir, a.filename, hash, opts.AdditionalFlags, kernelHeaders)
a.tm.compilationResult = result
return out, err
@@ -111,17 +177,24 @@ func createProtectedFile(name, runtimeDir string, source io.Reader) (ProtectedFi
// verify reads the asset from the reader and verifies the content hash matches what is expected.
func (a *asset) verify(source ProtectedFile) error {
- h := sha256.New()
- if _, err := io.Copy(h, source.Reader()); err != nil {
- return fmt.Errorf("error hashing file %s: %w", source.Name(), err)
+ sum, err := sha256Reader(source)
+ if err != nil {
+ return fmt.Errorf("hash file %s: %w", source.Name(), err)
}
- if fmt.Sprintf("%x", h.Sum(nil)) != a.hash {
+ if sum != a.hash {
return fmt.Errorf("file content hash does not match expected value")
}
-
return nil
}
+func sha256Reader(r io.Reader) (string, error) {
+ h := sha256.New()
+ if _, err := io.Copy(h, r); err != nil {
+ return "", err
+ }
+ return hex.EncodeToString(h.Sum(nil)), nil
+}
+
// GetTelemetry returns the compilation telemetry for this asset
func (a *asset) GetTelemetry() CompilationTelemetry {
return a.tm
diff --git a/pkg/ebpf/bytecode/runtime/protected_file.go b/pkg/ebpf/bytecode/runtime/protected_file.go
index a59224f09cf19..4e25867cc815b 100644
--- a/pkg/ebpf/bytecode/runtime/protected_file.go
+++ b/pkg/ebpf/bytecode/runtime/protected_file.go
@@ -20,8 +20,7 @@ import (
// ProtectedFile represents a symlink to a sealed ram-backed file
type ProtectedFile interface {
- Close() error
- Reader() io.Reader
+ io.ReadSeekCloser
Name() string
}
@@ -104,6 +103,10 @@ func (m *ramBackedFile) Name() string {
return m.symlink
}
-func (m *ramBackedFile) Reader() io.Reader {
- return m.file
+func (m *ramBackedFile) Seek(offset int64, whence int) (int64, error) {
+ return m.file.Seek(offset, whence)
+}
+
+func (m *ramBackedFile) Read(p []byte) (n int, err error) {
+ return m.file.Read(p)
}
diff --git a/pkg/ebpf/bytecode/runtime/runtime_compilation_helpers.go b/pkg/ebpf/bytecode/runtime/runtime_compilation_helpers.go
index 9394d168d971a..4cd657d96d26f 100644
--- a/pkg/ebpf/bytecode/runtime/runtime_compilation_helpers.go
+++ b/pkg/ebpf/bytecode/runtime/runtime_compilation_helpers.go
@@ -76,7 +76,7 @@ func compileToObjectFile(inFile, outputDir, filename, inHash string, additionalF
}
// RHEL platforms back-ported the __BPF_FUNC_MAPPER macro, so we can always use the dynamic method there
- if kv >= kernel.VersionCode(4, 10, 0) || family == "rhel" {
+ if len(kernelHeaders) > 0 && (kv >= kernel.VersionCode(4, 10, 0) || family == "rhel") {
var helperPath string
helperPath, err = includeHelperAvailability(kernelHeaders)
if err != nil {
diff --git a/pkg/ebpf/cgo/genpost.go b/pkg/ebpf/cgo/genpost.go
index 65c357c2e6623..512d0542c62d9 100644
--- a/pkg/ebpf/cgo/genpost.go
+++ b/pkg/ebpf/cgo/genpost.go
@@ -13,6 +13,7 @@ import (
"os"
"regexp"
"runtime"
+ "strings"
)
func main() {
@@ -23,11 +24,30 @@ func main() {
b = removeAbsolutePath(b, runtime.GOOS)
+ int8variableNames := []string{
+ "Buf",
+ "Cgroup",
+ "Cgroup_name",
+ "LocalAddr",
+ "LocalAddress",
+ "Probe_id",
+ "RemoteAddr",
+ "RemoteAddress",
+ "Request_fragment",
+ "Topic_name",
+ "Trigger_comm",
+ "Victim_comm",
+ }
+
// Convert []int8 to []byte in multiple generated fields from the kernel, to simplify
// conversion to string; see golang.org/issue/20753
- convertInt8ArrayToByteArrayRegex := regexp.MustCompile(`(Request_fragment|Topic_name|Buf|Cgroup|RemoteAddr|LocalAddr|Cgroup_name|Fcomm|Tcomm)(\s+)\[(\d+)\]u?int8`)
+ convertInt8ArrayToByteArrayRegex := regexp.MustCompile(`(` + strings.Join(int8variableNames, "|") + `)(\s+)\[(\d+)\]u?int8`)
b = convertInt8ArrayToByteArrayRegex.ReplaceAll(b, []byte("$1$2[$3]byte"))
+ // Convert generated pointers to CGo structs to uintptr
+ convertPointerToUint64Regex := regexp.MustCompile(`\*_Ctype_struct_(\w+)`)
+ b = convertPointerToUint64Regex.ReplaceAll(b, []byte("uintptr"))
+
b, err = format.Source(b)
if err != nil {
log.Fatal(err)
diff --git a/pkg/ebpf/compiler/compiler.go b/pkg/ebpf/compiler/compiler.go
index a26c90c57c4e2..1553ef5d42ac4 100644
--- a/pkg/ebpf/compiler/compiler.go
+++ b/pkg/ebpf/compiler/compiler.go
@@ -66,10 +66,6 @@ func kernelHeaderPaths(headerDirs []string) []string {
// CompileToObjectFile compiles an eBPF program
func CompileToObjectFile(inFile, outputFile string, cflags []string, headerDirs []string) error {
- if len(headerDirs) == 0 {
- return fmt.Errorf("unable to find kernel headers")
- }
-
tmpIncludeDir, err := writeStdarg()
if err != nil {
return err
diff --git a/pkg/fleet/daemon/daemon_test.go b/pkg/fleet/daemon/daemon_test.go
index 4e3d0941553ee..1aa78c95d4b4e 100644
--- a/pkg/fleet/daemon/daemon_test.go
+++ b/pkg/fleet/daemon/daemon_test.go
@@ -20,7 +20,6 @@ import (
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
- "github.com/DataDog/datadog-agent/pkg/config/remote/client"
"github.com/DataDog/datadog-agent/pkg/fleet/env"
"github.com/DataDog/datadog-agent/pkg/fleet/installer/repository"
pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core"
@@ -104,13 +103,13 @@ func (m *testPackageManager) UninstrumentAPMInjector(ctx context.Context, method
type testRemoteConfigClient struct {
sync.Mutex
t *testing.T
- listeners map[string][]client.Handler
+ listeners map[string][]func(map[string]state.RawConfig, func(cfgPath string, status state.ApplyStatus))
}
func newTestRemoteConfigClient(t *testing.T) *testRemoteConfigClient {
return &testRemoteConfigClient{
t: t,
- listeners: make(map[string][]client.Handler),
+ listeners: make(map[string][]func(map[string]state.RawConfig, func(cfgPath string, status state.ApplyStatus))),
}
}
@@ -123,7 +122,7 @@ func (c *testRemoteConfigClient) Close() {
func (c *testRemoteConfigClient) Subscribe(product string, fn func(update map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus))) {
c.Lock()
defer c.Unlock()
- c.listeners[product] = append(c.listeners[product], client.Handler(fn))
+ c.listeners[product] = append(c.listeners[product], fn)
}
func (c *testRemoteConfigClient) SetInstallerState(_ []*pbgo.PackageState) {
diff --git a/pkg/fleet/daemon/remote_config.go b/pkg/fleet/daemon/remote_config.go
index 9509185899ccd..d68b8489ad62e 100644
--- a/pkg/fleet/daemon/remote_config.go
+++ b/pkg/fleet/daemon/remote_config.go
@@ -100,7 +100,7 @@ func (c *catalog) getPackage(pkg string, version string, arch string, platform s
type handleCatalogUpdate func(catalog catalog) error
-func handleUpdaterCatalogDDUpdate(h handleCatalogUpdate, firstCatalogApplied func()) client.Handler {
+func handleUpdaterCatalogDDUpdate(h handleCatalogUpdate, firstCatalogApplied func()) func(map[string]state.RawConfig, func(cfgPath string, status state.ApplyStatus)) {
var catalogOnce sync.Once
return func(catalogConfigs map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus)) {
var mergedCatalog catalog
@@ -192,7 +192,7 @@ type taskWithVersionParams struct {
type handleRemoteAPIRequest func(request remoteAPIRequest) error
-func handleUpdaterTaskUpdate(h handleRemoteAPIRequest) client.Handler {
+func handleUpdaterTaskUpdate(h handleRemoteAPIRequest) func(map[string]state.RawConfig, func(cfgPath string, status state.ApplyStatus)) {
var executedRequests = make(map[string]struct{})
return func(requestConfigs map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus)) {
requests := map[string]remoteAPIRequest{}
diff --git a/pkg/fleet/internal/cdn/cdn.go b/pkg/fleet/internal/cdn/cdn.go
index 53d93568777b4..3689ee2e67f24 100644
--- a/pkg/fleet/internal/cdn/cdn.go
+++ b/pkg/fleet/internal/cdn/cdn.go
@@ -73,8 +73,6 @@ func (c *CDN) getOrderedLayers(ctx context.Context) ([]*layer, error) {
// HACK(baptiste): Create a dedicated one-shot RC service just for the configuration
// We should use the CDN instead
config := pkgconfigsetup.Datadog()
- config.Set("run_path", "/opt/datadog-packages/datadog-installer/stable/run", model.SourceAgentRuntime)
-
detectenv.DetectFeatures(config)
hostname, err := pkghostname.Get(ctx)
if err != nil {
@@ -84,6 +82,7 @@ func (c *CDN) getOrderedLayers(ctx context.Context) ([]*layer, error) {
remoteconfig.WithAPIKey(c.env.APIKey),
remoteconfig.WithConfigRootOverride(c.env.Site, ""),
remoteconfig.WithDirectorRootOverride(c.env.Site, ""),
+ remoteconfig.WithDatabaseFileName("remote-config-cdn-tmp"),
}
service, err := remoteconfig.NewService(
config,
diff --git a/pkg/kubestatemetrics/builder/builder.go b/pkg/kubestatemetrics/builder/builder.go
index 148318bc1d6fb..56fa8ff3e0c19 100644
--- a/pkg/kubestatemetrics/builder/builder.go
+++ b/pkg/kubestatemetrics/builder/builder.go
@@ -50,6 +50,7 @@ type Builder struct {
collectPodsFromKubelet bool
collectOnlyUnassignedPods bool
+ KubeletReflector *kubeletReflector
}
// New returns new Builder instance
@@ -161,7 +162,17 @@ func (b *Builder) Build() metricsstore.MetricsWriterList {
// BuildStores initializes and registers all enabled stores.
// It returns metric cache stores.
func (b *Builder) BuildStores() [][]cache.Store {
- return b.ksmBuilder.BuildStores()
+ stores := b.ksmBuilder.BuildStores()
+
+ if b.KubeletReflector != nil {
+ // Starting the reflector here allows us to start just one for all stores.
+ err := b.KubeletReflector.start(b.ctx)
+ if err != nil {
+ log.Errorf("Failed to start the kubelet reflector: %s", err)
+ }
+ }
+
+ return stores
}
// WithResync is used if a resync period is configured
@@ -302,7 +313,22 @@ func (c *cacheEnabledListerWatcher) List(options v1.ListOptions) (runtime.Object
func handlePodCollection[T any](b *Builder, store cache.Store, client T, listWatchFunc func(kubeClient T, ns string, fieldSelector string) cache.ListerWatcher, namespace string, useAPIServerCache bool) {
if b.collectPodsFromKubelet {
- b.startKubeletPodWatcher(store, namespace)
+ if b.KubeletReflector == nil {
+ kr, err := newKubeletReflector(b.namespaces)
+ if err != nil {
+ log.Errorf("Failed to create kubeletReflector: %s", err)
+ return
+ }
+ b.KubeletReflector = &kr
+ }
+
+ err := b.KubeletReflector.addStore(store)
+ if err != nil {
+ log.Errorf("Failed to add store to kubeletReflector: %s", err)
+ return
+ }
+
+ // The kubelet reflector will be started when all stores are added.
return
}
diff --git a/pkg/kubestatemetrics/builder/kubelet_pods.go b/pkg/kubestatemetrics/builder/kubelet_pods.go
index ce7af8ce6683c..c0a50018c110a 100644
--- a/pkg/kubestatemetrics/builder/kubelet_pods.go
+++ b/pkg/kubestatemetrics/builder/kubelet_pods.go
@@ -10,6 +10,7 @@ package builder
import (
"context"
"fmt"
+ "slices"
"strings"
"time"
@@ -22,57 +23,107 @@ import (
"github.com/DataDog/datadog-agent/pkg/util/log"
)
-// PodWatcher is an interface for a component that watches for changes in pods
-type PodWatcher interface {
+const (
+ podWatcherExpiryDuration = 15 * time.Second
+ updateStoresPeriod = 5 * time.Second
+)
+
+// podWatcher is an interface for a component that watches for changes in pods
+type podWatcher interface {
PullChanges(ctx context.Context) ([]*kubelet.Pod, error)
Expire() ([]string, error)
}
-func (b *Builder) startKubeletPodWatcher(store cache.Store, namespace string) {
- podWatcher, err := kubelet.NewPodWatcher(15 * time.Second)
+type kubeletReflector struct {
+ namespaces []string
+ watchAllNamespaces bool
+ podWatcher podWatcher
+
+ // Having an array of stores allows us to have a single watcher for all the
+ // collectors configured (by default it's the pods one plus "pods_extended")
+ stores []cache.Store
+
+ started bool
+}
+
+func newKubeletReflector(namespaces []string) (kubeletReflector, error) {
+ watcher, err := kubelet.NewPodWatcher(podWatcherExpiryDuration)
if err != nil {
- log.Warnf("Failed to create pod watcher: %s", err)
+ return kubeletReflector{}, fmt.Errorf("failed to create kubelet-based reflector: %w", err)
+ }
+
+ watchAllNamespaces := slices.Contains(namespaces, corev1.NamespaceAll)
+
+ return kubeletReflector{
+ namespaces: namespaces,
+ watchAllNamespaces: watchAllNamespaces,
+ podWatcher: watcher,
+ }, nil
+}
+
+func (kr *kubeletReflector) addStore(store cache.Store) error {
+ if kr.started {
+ return fmt.Errorf("cannot add store after reflector has started")
}
- ticker := time.NewTicker(5 * time.Second)
+ kr.stores = append(kr.stores, store)
+
+ return nil
+}
+
+// start starts the reflector. It should be called only once after all the
+// stores have been added
+func (kr *kubeletReflector) start(context context.Context) error {
+ if kr.started {
+ return fmt.Errorf("reflector already started")
+ }
+
+ kr.started = true
+
+ ticker := time.NewTicker(updateStoresPeriod)
go func() {
for {
select {
case <-ticker.C:
- err = updateStore(b.ctx, store, podWatcher, namespace)
+ err := kr.updateStores(context)
if err != nil {
- log.Errorf("Failed to update store: %s", err)
+ log.Errorf("Failed to update stores: %s", err)
}
- case <-b.ctx.Done():
+ case <-context.Done():
ticker.Stop()
return
}
}
}()
+
+ return nil
}
-func updateStore(ctx context.Context, store cache.Store, podWatcher PodWatcher, namespace string) error {
- pods, err := podWatcher.PullChanges(ctx)
+func (kr *kubeletReflector) updateStores(ctx context.Context) error {
+ pods, err := kr.podWatcher.PullChanges(ctx)
if err != nil {
return fmt.Errorf("failed to pull changes from pod watcher: %w", err)
}
for _, pod := range pods {
- if namespace != corev1.NamespaceAll && pod.Metadata.Namespace != namespace {
+ if !kr.watchAllNamespaces && !slices.Contains(kr.namespaces, pod.Metadata.Namespace) {
continue
}
kubePod := kubelet.ConvertKubeletPodToK8sPod(pod)
- err = store.Add(kubePod)
- if err != nil {
- log.Warnf("Failed to add pod to KSM store: %s", err)
+ for _, store := range kr.stores {
+ err := store.Add(kubePod)
+ if err != nil {
+ // log instead of returning error to continue updating other stores
+ log.Warnf("Failed to add pod to store: %s", err)
+ }
}
}
- expiredEntities, err := podWatcher.Expire()
+ expiredEntities, err := kr.podWatcher.Expire()
if err != nil {
return fmt.Errorf("failed to expire pods: %w", err)
}
@@ -91,9 +142,12 @@ func updateStore(ctx context.Context, store cache.Store, podWatcher PodWatcher,
},
}
- err = store.Delete(&expiredPod)
- if err != nil {
- log.Warnf("Failed to delete pod from KSM store: %s", err)
+ for _, store := range kr.stores {
+ err := store.Delete(&expiredPod)
+ if err != nil {
+ // log instead of returning error to continue updating other stores
+ log.Warnf("Failed to delete pod from store: %s", err)
+ }
}
}
diff --git a/pkg/kubestatemetrics/builder/kubelet_pods_stub.go b/pkg/kubestatemetrics/builder/kubelet_pods_stub.go
index b4da17ab6227d..7682655232056 100644
--- a/pkg/kubestatemetrics/builder/kubelet_pods_stub.go
+++ b/pkg/kubestatemetrics/builder/kubelet_pods_stub.go
@@ -8,9 +8,24 @@
package builder
import (
+ "context"
+
"k8s.io/client-go/tools/cache"
)
-func (b *Builder) startKubeletPodWatcher(_ cache.Store, _ string) {
- // Do nothing
+// When the Kubelet flag is not set, we don't need a kubeletReflector, so we can
+// return a struct that does nothing
+
+type kubeletReflector struct{}
+
+func newKubeletReflector(_ []string) (kubeletReflector, error) {
+ return kubeletReflector{}, nil
+}
+
+func (kr *kubeletReflector) addStore(_ cache.Store) error {
+ return nil
+}
+
+func (kr *kubeletReflector) start(_ context.Context) error {
+ return nil
}
diff --git a/pkg/kubestatemetrics/builder/kubelet_pods_test.go b/pkg/kubestatemetrics/builder/kubelet_pods_test.go
index 94f5f26a798ee..a9020b2143549 100644
--- a/pkg/kubestatemetrics/builder/kubelet_pods_test.go
+++ b/pkg/kubestatemetrics/builder/kubelet_pods_test.go
@@ -9,10 +9,11 @@ package builder
import (
"context"
+ "slices"
"testing"
- "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
@@ -83,83 +84,140 @@ func (m *MockStore) Resync() error {
return nil
}
-func TestUpdateStore_AddPodToStore(t *testing.T) {
- store := new(MockStore)
- podWatcher := new(MockPodWatcher)
-
- kubeletPod := &kubelet.Pod{
- Metadata: kubelet.PodMetadata{
- Name: "test-pod",
- Namespace: "default",
- UID: "12345",
+func TestUpdateStores_AddPods(t *testing.T) {
+ tests := []struct {
+ name string
+ reflectorNamespaces []string
+ addedPodNamespace string
+ podShouldBeAdded bool
+ }{
+ {
+ name: "add pod in watched namespace",
+ reflectorNamespaces: []string{"default"},
+ addedPodNamespace: "default",
+ podShouldBeAdded: true,
},
- }
-
- kubernetesPod := kubelet.ConvertKubeletPodToK8sPod(kubeletPod)
-
- podWatcher.On("PullChanges", mock.Anything).Return([]*kubelet.Pod{kubeletPod}, nil)
- podWatcher.On("Expire").Return([]string{}, nil)
- store.On("Add", kubernetesPod).Return(nil)
-
- err := updateStore(context.TODO(), store, podWatcher, "default")
- assert.NoError(t, err)
-
- store.AssertCalled(t, "Add", kubernetesPod)
-}
-
-func TestUpdateStore_FilterPodsByNamespace(t *testing.T) {
- store := new(MockStore)
- podWatcher := new(MockPodWatcher)
-
- kubeletPod := &kubelet.Pod{
- Metadata: kubelet.PodMetadata{
- Name: "test-pod",
- Namespace: "other-namespace",
- UID: "12345",
+ {
+ name: "add pod in non-watched namespace",
+ reflectorNamespaces: []string{"default"},
+ addedPodNamespace: "other-namespace",
+ podShouldBeAdded: false,
+ },
+ {
+ name: "reflector watches all pods",
+ reflectorNamespaces: []string{corev1.NamespaceAll},
+ addedPodNamespace: "default",
+ podShouldBeAdded: true,
},
}
- store.On("Add", mock.Anything).Return(nil)
- podWatcher.On("PullChanges", mock.Anything).Return([]*kubelet.Pod{kubeletPod}, nil)
- podWatcher.On("Expire").Return([]string{}, nil)
-
- err := updateStore(context.TODO(), store, podWatcher, "default")
- assert.NoError(t, err)
-
- // Add() shouldn't be called because the pod is in a different namespace
- store.AssertNotCalled(t, "Add", mock.Anything)
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ stores := []*MockStore{
+ new(MockStore),
+ new(MockStore),
+ }
+ for _, store := range stores {
+ store.On("Add", mock.Anything).Return(nil)
+ }
+
+ watcher := new(MockPodWatcher)
+
+ kubeletPod := &kubelet.Pod{
+ Metadata: kubelet.PodMetadata{
+ Namespace: test.addedPodNamespace,
+ Name: "test-pod",
+ UID: "12345",
+ },
+ }
+
+ kubernetesPod := kubelet.ConvertKubeletPodToK8sPod(kubeletPod)
+
+ watcher.On("PullChanges", mock.Anything).Return([]*kubelet.Pod{kubeletPod}, nil)
+ watcher.On("Expire").Return([]string{}, nil)
+
+ reflector := kubeletReflector{
+ namespaces: test.reflectorNamespaces,
+ watchAllNamespaces: slices.Contains(test.reflectorNamespaces, corev1.NamespaceAll),
+ podWatcher: watcher,
+ }
+
+ for _, store := range stores {
+ err := reflector.addStore(store)
+ require.NoError(t, err)
+ }
+
+ err := reflector.updateStores(context.TODO())
+ require.NoError(t, err)
+
+ if test.podShouldBeAdded {
+ for _, store := range stores {
+ store.AssertCalled(t, "Add", kubernetesPod)
+ }
+ } else {
+ for _, store := range stores {
+ store.AssertNotCalled(t, "Add", mock.Anything)
+ }
+ }
+ })
+ }
}
-func TestUpdateStore_HandleExpiredPods(t *testing.T) {
- store := new(MockStore)
- podWatcher := new(MockPodWatcher)
- podUID := "kubernetes_pod://pod-12345"
- kubernetesPod := corev1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- UID: types.UID("pod-12345"),
+func TestUpdateStores_HandleExpired(t *testing.T) {
+ tests := []struct {
+ name string
+ expiredUID string
+ expectedPodToBeDeleted *corev1.Pod
+ }{
+ {
+ name: "expired pod",
+ expiredUID: "kubernetes_pod://pod-12345",
+ expectedPodToBeDeleted: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ UID: types.UID("pod-12345"),
+ },
+ },
+ },
+ {
+ name: "expired container",
+ expiredUID: "container-12345",
+ expectedPodToBeDeleted: nil,
},
}
- podWatcher.On("PullChanges", mock.Anything).Return([]*kubelet.Pod{}, nil)
- podWatcher.On("Expire").Return([]string{podUID}, nil)
- store.On("Delete", &kubernetesPod).Return(nil)
-
- err := updateStore(context.TODO(), store, podWatcher, "default")
- assert.NoError(t, err)
-
- store.AssertCalled(t, "Delete", &kubernetesPod)
-}
-
-func TestUpdateStore_HandleExpiredContainers(t *testing.T) {
- store := new(MockStore)
- podWatcher := new(MockPodWatcher)
-
- podWatcher.On("PullChanges", mock.Anything).Return([]*kubelet.Pod{}, nil)
- podWatcher.On("Expire").Return([]string{"container-12345"}, nil)
-
- err := updateStore(context.TODO(), store, podWatcher, "default")
- assert.NoError(t, err)
-
- // Delete() shouldn't be called because the expired entity is not a pod
- store.AssertNotCalled(t, "Delete", mock.Anything)
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ stores := []*MockStore{
+ new(MockStore),
+ new(MockStore),
+ }
+ for _, store := range stores {
+ store.On("Delete", mock.Anything).Return(nil)
+ }
+
+ watcher := new(MockPodWatcher)
+ watcher.On("PullChanges", mock.Anything).Return([]*kubelet.Pod{}, nil)
+ watcher.On("Expire").Return([]string{test.expiredUID}, nil)
+
+ reflector := kubeletReflector{
+ namespaces: []string{"default"},
+ podWatcher: watcher,
+ }
+ for _, store := range stores {
+ err := reflector.addStore(store)
+ require.NoError(t, err)
+ }
+
+ err := reflector.updateStores(context.TODO())
+ require.NoError(t, err)
+
+ for _, store := range stores {
+ if test.expectedPodToBeDeleted != nil {
+ store.AssertCalled(t, "Delete", test.expectedPodToBeDeleted)
+ } else {
+ store.AssertNotCalled(t, "Delete", mock.Anything)
+ }
+ }
+ })
+ }
}
diff --git a/pkg/network/dns/cache_test.go b/pkg/network/dns/cache_test.go
index bb025deec57e8..b56e0359b6591 100644
--- a/pkg/network/dns/cache_test.go
+++ b/pkg/network/dns/cache_test.go
@@ -11,6 +11,7 @@ import (
cryptorand "crypto/rand"
"fmt"
"math/rand"
+ "net/netip"
"strings"
"testing"
"time"
@@ -329,7 +330,7 @@ func randomAddressGen() func() util.Address {
continue
}
- return util.V4AddressFromBytes(b)
+ return util.Address{Addr: netip.AddrFrom4([4]byte(b))}
}
}
}
diff --git a/pkg/network/driver/types_windows.go b/pkg/network/driver/types_windows.go
index 184464cd0965f..1dbeea0cb9197 100644
--- a/pkg/network/driver/types_windows.go
+++ b/pkg/network/driver/types_windows.go
@@ -114,8 +114,8 @@ type PerFlowData struct {
AddressFamily uint16
Protocol uint16
Flags uint32
- LocalAddress [16]uint8
- RemoteAddress [16]uint8
+ LocalAddress [16]byte
+ RemoteAddress [16]byte
PacketsOut uint64
MonotonicSentBytes uint64
TransportBytesOut uint64
diff --git a/pkg/network/ebpf/c/co-re/tracer-fentry.c b/pkg/network/ebpf/c/co-re/tracer-fentry.c
index 857d2f922cde6..a5242b70662f9 100644
--- a/pkg/network/ebpf/c/co-re/tracer-fentry.c
+++ b/pkg/network/ebpf/c/co-re/tracer-fentry.c
@@ -201,7 +201,7 @@ int BPF_PROG(udp_sendpage_exit, struct sock *sk, struct page *page, int offset,
return 0;
}
- return handle_message(&t, sent, 0, CONN_DIRECTION_UNKNOWN, 0, 0, PACKET_COUNT_NONE, sk);
+ return handle_message(&t, sent, 0, CONN_DIRECTION_UNKNOWN, 1, 0, PACKET_COUNT_INCREMENT, sk);
}
SEC("fexit/tcp_recvmsg")
@@ -262,7 +262,7 @@ static __always_inline int handle_udp_send(struct sock *sk, int sent) {
if (sent > 0) {
log_debug("udp_sendmsg: sent: %d", sent);
- handle_message(t, sent, 0, CONN_DIRECTION_UNKNOWN, 1, 0, PACKET_COUNT_NONE, sk);
+ handle_message(t, sent, 0, CONN_DIRECTION_UNKNOWN, 1, 0, PACKET_COUNT_INCREMENT, sk);
}
bpf_map_delete_elem(&udp_send_skb_args, &pid_tgid);
diff --git a/pkg/network/ebpf/c/protocols/classification/defs.h b/pkg/network/ebpf/c/protocols/classification/defs.h
index 5148d5a5be43b..823112a4fb7e1 100644
--- a/pkg/network/ebpf/c/protocols/classification/defs.h
+++ b/pkg/network/ebpf/c/protocols/classification/defs.h
@@ -143,10 +143,12 @@ typedef enum {
PROG_HTTP2_EOS_PARSER,
PROG_HTTP2_TERMINATION,
PROG_KAFKA,
- PROG_KAFKA_RESPONSE_PARTITION_PARSER_V0,
- PROG_KAFKA_RESPONSE_PARTITION_PARSER_V12,
- PROG_KAFKA_RESPONSE_RECORD_BATCH_PARSER_V0,
- PROG_KAFKA_RESPONSE_RECORD_BATCH_PARSER_V12,
+ PROG_KAFKA_FETCH_RESPONSE_PARTITION_PARSER_V0,
+ PROG_KAFKA_FETCH_RESPONSE_PARTITION_PARSER_V12,
+ PROG_KAFKA_FETCH_RESPONSE_RECORD_BATCH_PARSER_V0,
+ PROG_KAFKA_FETCH_RESPONSE_RECORD_BATCH_PARSER_V12,
+ PROG_KAFKA_PRODUCE_RESPONSE_PARTITION_PARSER_V0,
+ PROG_KAFKA_PRODUCE_RESPONSE_PARTITION_PARSER_V9,
PROG_KAFKA_TERMINATION,
PROG_GRPC,
PROG_POSTGRES,
diff --git a/pkg/network/ebpf/c/protocols/kafka/kafka-classification.h b/pkg/network/ebpf/c/protocols/kafka/kafka-classification.h
index 68140f05390c4..982cdcaed53bc 100644
--- a/pkg/network/ebpf/c/protocols/kafka/kafka-classification.h
+++ b/pkg/network/ebpf/c/protocols/kafka/kafka-classification.h
@@ -283,7 +283,7 @@ static __always_inline bool skip_request_tagged_fields(pktbuf_t pkt, u32 *offset
}
// Getting the offset (out parameter) of the first topic name in the produce request.
-static __always_inline bool get_topic_offset_from_produce_request(const kafka_header_t *kafka_header, pktbuf_t pkt, u32 *out_offset) {
+static __always_inline bool get_topic_offset_from_produce_request(const kafka_header_t *kafka_header, pktbuf_t pkt, u32 *out_offset, s16 *out_acks) {
const s16 api_version = kafka_header->api_version;
u32 offset = *out_offset;
bool flexible = api_version >= 9;
@@ -310,6 +310,9 @@ static __always_inline bool get_topic_offset_from_produce_request(const kafka_he
// complete. Allowed values: 0 for no acknowledgments, 1 for only the leader and -1 for the full ISR.
return false;
}
+ if (out_acks != NULL) {
+ *out_acks = acks;
+ }
PKTBUF_READ_BIG_ENDIAN_WRAPPER(s32, timeout_ms, pkt, offset);
if (timeout_ms < 0) {
@@ -361,7 +364,7 @@ static __always_inline bool is_kafka_request(const kafka_header_t *kafka_header,
bool flexible = false;
switch (kafka_header->api_key) {
case KAFKA_PRODUCE:
- if (!get_topic_offset_from_produce_request(kafka_header, pkt, &offset)) {
+ if (!get_topic_offset_from_produce_request(kafka_header, pkt, &offset, NULL)) {
return false;
}
flexible = kafka_header->api_version >= 9;
diff --git a/pkg/network/ebpf/c/protocols/kafka/kafka-parsing.h b/pkg/network/ebpf/c/protocols/kafka/kafka-parsing.h
index 5ee3697c4aa9a..e55ab7954124d 100644
--- a/pkg/network/ebpf/c/protocols/kafka/kafka-parsing.h
+++ b/pkg/network/ebpf/c/protocols/kafka/kafka-parsing.h
@@ -480,6 +480,13 @@ static enum parser_level parser_state_to_level(kafka_response_state state)
case KAFKA_FETCH_RESPONSE_PARTITION_ERROR_CODE_START:
case KAFKA_FETCH_RESPONSE_PARTITION_ABORTED_TRANSACTIONS:
case KAFKA_FETCH_RESPONSE_RECORD_BATCHES_ARRAY_START:
+
+ case KAFKA_PRODUCE_RESPONSE_START:
+ case KAFKA_PRODUCE_RESPONSE_NUM_TOPICS:
+ case KAFKA_PRODUCE_RESPONSE_TOPIC_NAME_SIZE:
+ case KAFKA_PRODUCE_RESPONSE_NUM_PARTITIONS:
+ case KAFKA_PRODUCE_RESPONSE_PARTITION_START:
+ case KAFKA_PRODUCE_RESPONSE_PARTITION_ERROR_CODE_START:
return PARSER_LEVEL_PARTITION;
case KAFKA_FETCH_RESPONSE_RECORD_BATCH_START:
case KAFKA_FETCH_RESPONSE_RECORD_BATCH_LENGTH:
@@ -494,13 +501,14 @@ static enum parser_level parser_state_to_level(kafka_response_state state)
}
}
-static __always_inline enum parse_result kafka_continue_parse_response_partition_loop(kafka_info_t *kafka,
+static __always_inline enum parse_result kafka_continue_parse_response_partition_loop_fetch(kafka_info_t *kafka,
conn_tuple_t *tup,
kafka_response_context_t *response,
pktbuf_t pkt, u32 offset,
u32 data_end,
u32 api_version)
{
+ extra_debug("Parsing fetch response");
u32 orig_offset = offset;
bool flexible = api_version >= 12;
enum parse_result ret;
@@ -587,18 +595,7 @@ static __always_inline enum parse_result kafka_continue_parse_response_partition
response->record_batch_length = 0;
}
break;
- case KAFKA_FETCH_RESPONSE_PARTITION_START:
- case KAFKA_FETCH_RESPONSE_PARTITION_ERROR_CODE_START:
- case KAFKA_FETCH_RESPONSE_PARTITION_ABORTED_TRANSACTIONS:
- case KAFKA_FETCH_RESPONSE_RECORD_BATCHES_ARRAY_START:
- case KAFKA_FETCH_RESPONSE_RECORD_BATCH_START:
- case KAFKA_FETCH_RESPONSE_RECORD_BATCH_LENGTH:
- case KAFKA_FETCH_RESPONSE_RECORD_BATCH_MAGIC:
- case KAFKA_FETCH_RESPONSE_RECORD_BATCH_RECORDS_COUNT:
- case KAFKA_FETCH_RESPONSE_RECORD_BATCH_END:
- case KAFKA_FETCH_RESPONSE_RECORD_BATCHES_ARRAY_END:
- case KAFKA_FETCH_RESPONSE_PARTITION_TAGGED_FIELDS:
- case KAFKA_FETCH_RESPONSE_PARTITION_END:
+ default:
break;
}
@@ -754,12 +751,8 @@ static __always_inline enum parse_result kafka_continue_parse_response_partition
response->state = KAFKA_FETCH_RESPONSE_PARTITION_START;
break;
- case KAFKA_FETCH_RESPONSE_RECORD_BATCH_START:
- case KAFKA_FETCH_RESPONSE_RECORD_BATCH_LENGTH:
- case KAFKA_FETCH_RESPONSE_RECORD_BATCH_MAGIC:
- case KAFKA_FETCH_RESPONSE_RECORD_BATCH_RECORDS_COUNT:
- case KAFKA_FETCH_RESPONSE_RECORD_BATCH_END:
- case KAFKA_FETCH_RESPONSE_RECORD_BATCHES_ARRAY_END:
+ default:
+
extra_debug("invalid state %d in partition parser", response->state);
return RET_ERR;
break;
@@ -776,6 +769,133 @@ static __always_inline enum parse_result kafka_continue_parse_response_partition
return RET_LOOP_END;
}
+static __always_inline enum parse_result kafka_continue_parse_response_partition_loop_produce(kafka_info_t *kafka,
+ conn_tuple_t *tup,
+ kafka_response_context_t *response,
+ pktbuf_t pkt, u32 offset,
+ u32 data_end,
+ u32 api_version)
+{
+ extra_debug("Parsing produce response");
+ u32 orig_offset = offset;
+ bool flexible = api_version >= 9;
+ enum parse_result ret;
+
+ extra_debug("carry_over_offset %d", response->carry_over_offset);
+
+ if (response->carry_over_offset < 0) {
+ return RET_ERR;
+ }
+
+ offset += response->carry_over_offset;
+ response->carry_over_offset = 0;
+
+ switch (response->state) {
+ case KAFKA_PRODUCE_RESPONSE_START:
+ extra_debug("KAFKA_PRODUCE_RESPONSE_START");
+ if (flexible) {
+ ret = skip_tagged_fields(response, pkt, &offset, data_end, true);
+ if (ret != RET_DONE) {
+ return ret;
+ }
+ }
+
+ response->state = KAFKA_PRODUCE_RESPONSE_NUM_TOPICS;
+ // fallthrough
+
+ case KAFKA_PRODUCE_RESPONSE_NUM_TOPICS:
+ {
+ extra_debug("KAFKA_PRODUCE_RESPONSE_NUM_TOPICS");
+ s64 num_topics = 0;
+ ret = read_varint_or_s32(flexible, response, pkt, &offset, data_end, &num_topics, true,
+ VARINT_BYTES_NUM_TOPICS);
+ extra_debug("num_topics: %lld", num_topics);
+ if (ret != RET_DONE) {
+ return ret;
+ }
+ if (num_topics <= 0) {
+ return RET_ERR;
+ }
+ }
+ response->state = KAFKA_PRODUCE_RESPONSE_TOPIC_NAME_SIZE;
+ // fallthrough
+
+ case KAFKA_PRODUCE_RESPONSE_TOPIC_NAME_SIZE:
+ {
+ extra_debug("KAFKA_PRODUCE_RESPONSE_TOPIC_NAME_SIZE");
+ s64 topic_name_size = 0;
+ ret = read_varint_or_s16(flexible, response, pkt, &offset, data_end, &topic_name_size, true,
+ VARINT_BYTES_TOPIC_NAME_SIZE);
+ extra_debug("topic_name_size: %lld", topic_name_size);
+ if (ret != RET_DONE) {
+ return ret;
+ }
+ if (topic_name_size <= 0 || topic_name_size > TOPIC_NAME_MAX_ALLOWED_SIZE) {
+ return RET_ERR;
+ }
+ offset += topic_name_size;
+ }
+ response->state = KAFKA_PRODUCE_RESPONSE_NUM_PARTITIONS;
+ // fallthrough
+
+ case KAFKA_PRODUCE_RESPONSE_NUM_PARTITIONS:
+ {
+ extra_debug("KAFKA_PRODUCE_RESPONSE_NUM_PARTITIONS");
+ s64 number_of_partitions = 0;
+ ret = read_varint_or_s32(flexible, response, pkt, &offset, data_end, &number_of_partitions, true,
+ VARINT_BYTES_NUM_PARTITIONS);
+ extra_debug("number_of_partitions: %lld", number_of_partitions);
+ if (ret != RET_DONE) {
+ return ret;
+ }
+ if (number_of_partitions <= 0 || number_of_partitions >= 2) {
+ // We only support a single partition for produce requests at the moment
+ return RET_ERR;
+ }
+ response->partitions_count = number_of_partitions;
+ response->state = KAFKA_PRODUCE_RESPONSE_PARTITION_START;
+
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ switch (response->state) {
+ case KAFKA_PRODUCE_RESPONSE_PARTITION_START:
+ offset += sizeof(s32); // Skip partition_index
+ response->state = KAFKA_PRODUCE_RESPONSE_PARTITION_ERROR_CODE_START;
+ // fallthrough
+
+ case KAFKA_PRODUCE_RESPONSE_PARTITION_ERROR_CODE_START:
+ {
+ // Error codes range from -1 to 119 as per the Kafka protocol specification.
+ // For details, refer to: https://kafka.apache.org/protocol.html#protocol_error_codes
+ s16 error_code = 0;
+ ret = read_with_remainder_s16(response, pkt, &offset, data_end, &error_code, true);
+ if (ret != RET_DONE) {
+ return ret;
+ }
+ if (error_code < -1 || error_code > 119) {
+ extra_debug("invalid error code: %d", error_code);
+ return RET_ERR;
+ }
+ extra_debug("got error code: %d", error_code);
+ response->partition_error_code = error_code;
+ response->transaction.error_code = error_code;
+
+ // No need to continue parsing the produce response, as we got the error now
+ return RET_DONE;
+ }
+ default:
+ break;
+ }
+
+ response->carry_over_offset = offset - orig_offset;
+ return RET_LOOP_END;
+}
+
static __always_inline enum parse_result kafka_continue_parse_response_record_batches_loop(kafka_info_t *kafka,
conn_tuple_t *tup,
kafka_response_context_t *response,
@@ -966,16 +1086,7 @@ static __always_inline enum parse_result kafka_continue_parse_response_record_ba
}
break;
- case KAFKA_FETCH_RESPONSE_START:
- case KAFKA_FETCH_RESPONSE_NUM_TOPICS:
- case KAFKA_FETCH_RESPONSE_TOPIC_NAME_SIZE:
- case KAFKA_FETCH_RESPONSE_NUM_PARTITIONS:
- case KAFKA_FETCH_RESPONSE_PARTITION_START:
- case KAFKA_FETCH_RESPONSE_PARTITION_ERROR_CODE_START:
- case KAFKA_FETCH_RESPONSE_PARTITION_ABORTED_TRANSACTIONS:
- case KAFKA_FETCH_RESPONSE_RECORD_BATCHES_ARRAY_START:
- case KAFKA_FETCH_RESPONSE_PARTITION_TAGGED_FIELDS:
- case KAFKA_FETCH_RESPONSE_PARTITION_END:
+ default:
extra_debug("invalid state %d in record batches array parser", response->state);
break;
}
@@ -991,7 +1102,7 @@ static __always_inline enum parse_result kafka_continue_parse_response_record_ba
return RET_LOOP_END;
}
-static __always_inline void kafka_call_response_parser(void *ctx, conn_tuple_t *tup, pktbuf_t pkt, kafka_response_state state, u32 api_version)
+static __always_inline void kafka_call_response_parser(void *ctx, conn_tuple_t *tup, pktbuf_t pkt, kafka_response_state state, u32 api_version, u32 api_key)
{
enum parser_level level = parser_state_to_level(state);
// Leave uninitialzed to get a compile-time warning if we miss setting it in
@@ -1003,19 +1114,32 @@ static __always_inline void kafka_call_response_parser(void *ctx, conn_tuple_t *
switch (level) {
case PARSER_LEVEL_RECORD_BATCH:
if (api_version >= 12) {
- index = PROG_KAFKA_RESPONSE_RECORD_BATCH_PARSER_V12;
+ index = PROG_KAFKA_FETCH_RESPONSE_RECORD_BATCH_PARSER_V12;
} else {
- index = PROG_KAFKA_RESPONSE_RECORD_BATCH_PARSER_V0;
+ index = PROG_KAFKA_FETCH_RESPONSE_RECORD_BATCH_PARSER_V0;
}
break;
case PARSER_LEVEL_PARTITION:
default:
- if (api_version >= 12) {
- index = PROG_KAFKA_RESPONSE_PARTITION_PARSER_V12;
- } else {
- index = PROG_KAFKA_RESPONSE_PARTITION_PARSER_V0;
+ switch (api_key) {
+ case KAFKA_FETCH:
+ if (api_version >= 12) {
+ index = PROG_KAFKA_FETCH_RESPONSE_PARTITION_PARSER_V12;
+ } else {
+ index = PROG_KAFKA_FETCH_RESPONSE_PARTITION_PARSER_V0;
+ }
+ break;
+ case KAFKA_PRODUCE:
+ if (api_version >= 9) {
+ index = PROG_KAFKA_PRODUCE_RESPONSE_PARTITION_PARSER_V9;
+ } else {
+ index = PROG_KAFKA_PRODUCE_RESPONSE_PARTITION_PARSER_V0;
+ }
+ break;
+ default:
+ // Shouldn't happen
+ return;
}
- break;
}
bpf_tail_call_compat(ctx, &protocols_progs, index);
break;
@@ -1023,19 +1147,32 @@ static __always_inline void kafka_call_response_parser(void *ctx, conn_tuple_t *
switch (level) {
case PARSER_LEVEL_RECORD_BATCH:
if (api_version >= 12) {
- index = PROG_KAFKA_RESPONSE_RECORD_BATCH_PARSER_V12;
+ index = PROG_KAFKA_FETCH_RESPONSE_RECORD_BATCH_PARSER_V12;
} else {
- index = PROG_KAFKA_RESPONSE_RECORD_BATCH_PARSER_V0;
+ index = PROG_KAFKA_FETCH_RESPONSE_RECORD_BATCH_PARSER_V0;
}
break;
case PARSER_LEVEL_PARTITION:
default:
- if (api_version >= 12) {
- index = PROG_KAFKA_RESPONSE_PARTITION_PARSER_V12;
- } else {
- index = PROG_KAFKA_RESPONSE_PARTITION_PARSER_V0;
+ switch (api_key) {
+ case KAFKA_FETCH:
+ if (api_version >= 12) {
+ index = PROG_KAFKA_FETCH_RESPONSE_PARTITION_PARSER_V12;
+ } else {
+ index = PROG_KAFKA_FETCH_RESPONSE_PARTITION_PARSER_V0;
+ }
+ break;
+ case KAFKA_PRODUCE:
+ if (api_version >= 9) {
+ index = PROG_KAFKA_PRODUCE_RESPONSE_PARTITION_PARSER_V9;
+ } else {
+ index = PROG_KAFKA_PRODUCE_RESPONSE_PARTITION_PARSER_V0;
+ }
+ break;
+ default:
+ // Shouldn't happen
+ return;
}
- break;
}
bpf_tail_call_compat(ctx, &tls_process_progs, index);
break;
@@ -1053,15 +1190,20 @@ static __always_inline enum parse_result kafka_continue_parse_response(void *ctx
pktbuf_t pkt, u32 offset,
u32 data_end,
enum parser_level level,
- u32 api_version)
+ u32 api_version,
+ u32 api_key)
{
- enum parse_result ret;
+ enum parse_result ret = 0;
if (level == PARSER_LEVEL_PARTITION) {
response->record_batches_arrays_count = 0;
response->record_batches_arrays_idx = 0;
- ret = kafka_continue_parse_response_partition_loop(kafka, tup, response, pkt, offset, data_end, api_version);
+ if (api_key == KAFKA_PRODUCE) {
+ ret = kafka_continue_parse_response_partition_loop_produce(kafka, tup, response, pkt, offset, data_end, api_version);
+ } else if (api_key == KAFKA_FETCH) {
+ ret = kafka_continue_parse_response_partition_loop_fetch(kafka, tup, response, pkt, offset, data_end, api_version);
+ }
extra_debug("partition loop ret %d record_batches_array_count %u partitions_count %u", ret, response->record_batches_arrays_count, response->partitions_count);
// If we have parsed any record batches arrays (message sets), then
@@ -1083,7 +1225,7 @@ static __always_inline enum parse_result kafka_continue_parse_response(void *ctx
}
if (ret == RET_DONE) {
- extra_debug("enqueue, records_count %d", response->transaction.records_count);
+ extra_debug("enqueue, records_count %d, error_code %d", response->transaction.records_count, response->transaction.error_code);
kafka_batch_enqueue_wrapper(kafka, tup, &response->transaction);
return ret;
}
@@ -1194,26 +1336,33 @@ static __always_inline enum parse_result kafka_continue_parse_response(void *ctx
}
static __always_inline void kafka_response_parser(kafka_info_t *kafka, void *ctx, conn_tuple_t *tup, pktbuf_t pkt,
-enum parser_level level, u32 min_api_version, u32 max_api_version) {
+enum parser_level level, u32 min_api_version, u32 max_api_version, u32 target_api_key) {
kafka_response_context_t *response = bpf_map_lookup_elem(&kafka_response, tup);
if (!response) {
return;
}
u32 api_version = response->transaction.request_api_version;
+ u32 api_key = response->transaction.request_api_key;
+
if (api_version < min_api_version || api_version > max_api_version) {
// Should never happen. This check is there to inform the compiler about
// the bounds of api_version so that it can optimize away branches for versions
// outside the range at compile time.
return;
}
+ if (api_key != target_api_key) {
+ // Should never happen. This check is there to inform the compiler about
+ // the target_api_key so that it can optimize away branches for other keys
+ return;
+ }
u32 data_off = pktbuf_data_offset(pkt);
u32 data_end = pktbuf_data_end(pkt);
enum parse_result result = kafka_continue_parse_response(ctx, kafka, tup, response, pkt,
data_off, data_end, level,
- api_version);
+ api_version, target_api_key);
switch (result) {
case RET_EOP:
// This packet parsed successfully but more data needed, nothing
@@ -1231,7 +1380,7 @@ enum parser_level level, u32 min_api_version, u32 max_api_version) {
case RET_LOOP_END:
// We ran out of iterations in the loop, but we're not done
// processing this packet, so continue in a self tail call.
- kafka_call_response_parser(ctx, tup, pkt, response->state, response->transaction.request_api_version);
+ kafka_call_response_parser(ctx, tup, pkt, response->state, response->transaction.request_api_version, response->transaction.request_api_key);
// If we failed (due to exceeding tail calls), at least flush what
// we have.
@@ -1243,7 +1392,7 @@ enum parser_level level, u32 min_api_version, u32 max_api_version) {
}
}
-static __always_inline int __socket__kafka_response_parser(struct __sk_buff *skb, enum parser_level level, u32 min_api_version, u32 max_api_version) {
+static __always_inline int __socket__kafka_response_parser(struct __sk_buff *skb, enum parser_level level, u32 min_api_version, u32 max_api_version, u32 target_api_key) {
const __u32 zero = 0;
kafka_info_t *kafka = bpf_map_lookup_elem(&kafka_heap, &zero);
if (kafka == NULL) {
@@ -1256,32 +1405,43 @@ static __always_inline int __socket__kafka_response_parser(struct __sk_buff *skb
return 0;
}
- kafka_response_parser(kafka, skb, &tup, pktbuf_from_skb(skb, &skb_info), level, min_api_version, max_api_version);
+ kafka_response_parser(kafka, skb, &tup, pktbuf_from_skb(skb, &skb_info), level, min_api_version, max_api_version, target_api_key);
return 0;
}
-SEC("socket/kafka_response_partition_parser_v0")
-int socket__kafka_response_partition_parser_v0(struct __sk_buff *skb) {
- return __socket__kafka_response_parser(skb, PARSER_LEVEL_PARTITION, 0, 11);
+SEC("socket/kafka_fetch_response_partition_parser_v0")
+int socket__kafka_fetch_response_partition_parser_v0(struct __sk_buff *skb) {
+ return __socket__kafka_response_parser(skb, PARSER_LEVEL_PARTITION, 0, 11, KAFKA_FETCH);
+}
+
+SEC("socket/kafka_fetch_response_partition_parser_v12")
+int socket__kafka_fetch_response_partition_parser_v12(struct __sk_buff *skb) {
+ return __socket__kafka_response_parser(skb, PARSER_LEVEL_PARTITION, 12, 12, KAFKA_FETCH);
}
-SEC("socket/kafka_response_partition_parser_v12")
-int socket__kafka_response_partition_parser_v12(struct __sk_buff *skb) {
- return __socket__kafka_response_parser(skb, PARSER_LEVEL_PARTITION, 12, 12);
+SEC("socket/kafka_fetch_response_record_batch_parser_v0")
+int socket__kafka_fetch_response_record_batch_parser_v0(struct __sk_buff *skb) {
+ return __socket__kafka_response_parser(skb, PARSER_LEVEL_RECORD_BATCH, 0, 11, KAFKA_FETCH);
}
-SEC("socket/kafka_response_record_batch_parser_v0")
-int socket__kafka_response_record_batch_parser_v0(struct __sk_buff *skb) {
- return __socket__kafka_response_parser(skb, PARSER_LEVEL_RECORD_BATCH, 0, 11);
+SEC("socket/kafka_fetch_response_record_batch_parser_v12")
+int socket__kafka_fetch_response_record_batch_parser_v12(struct __sk_buff *skb) {
+ return __socket__kafka_response_parser(skb, PARSER_LEVEL_RECORD_BATCH, 12, 12, KAFKA_FETCH);
}
-SEC("socket/kafka_response_record_batch_parser_v12")
-int socket__kafka_response_record_batch_parser_v12(struct __sk_buff *skb) {
- return __socket__kafka_response_parser(skb, PARSER_LEVEL_RECORD_BATCH, 12, 12);
+SEC("socket/kafka_produce_response_partition_parser_v0")
+int socket__kafka_produce_response_partition_parser_v0(struct __sk_buff *skb) {
+ return __socket__kafka_response_parser(skb, PARSER_LEVEL_PARTITION, 0, 8, KAFKA_PRODUCE);
}
-static __always_inline int __uprobe__kafka_tls_response_parser(struct pt_regs *ctx, enum parser_level level, u32 min_api_version, u32 max_api_version) {
+SEC("socket/kafka_produce_response_partition_parser_v9")
+int socket__kafka_produce_response_partition_parser_v9(struct __sk_buff *skb) {
+ return __socket__kafka_response_parser(skb, PARSER_LEVEL_PARTITION, 9, 11, KAFKA_PRODUCE);
+}
+
+
+static __always_inline int __uprobe__kafka_tls_response_parser(struct pt_regs *ctx, enum parser_level level, u32 min_api_version, u32 max_api_version, u32 target_api_key) {
const __u32 zero = 0;
kafka_info_t *kafka = bpf_map_lookup_elem(&kafka_heap, &zero);
if (kafka == NULL) {
@@ -1295,29 +1455,39 @@ static __always_inline int __uprobe__kafka_tls_response_parser(struct pt_regs *c
// Put tuple on stack for 4.14.
conn_tuple_t tup = args->tup;
- kafka_response_parser(kafka, ctx, &tup, pktbuf_from_tls(ctx, args), level, min_api_version, max_api_version);
+ kafka_response_parser(kafka, ctx, &tup, pktbuf_from_tls(ctx, args), level, min_api_version, max_api_version, target_api_key);
return 0;
}
-SEC("uprobe/kafka_tls_response_partition_parser_v0")
-int uprobe__kafka_tls_response_partition_parser_v0(struct pt_regs *ctx) {
- return __uprobe__kafka_tls_response_parser(ctx, PARSER_LEVEL_PARTITION, 0, 11);
+SEC("uprobe/kafka_tls_fetch_response_partition_parser_v0")
+int uprobe__kafka_tls_fetch_response_partition_parser_v0(struct pt_regs *ctx) {
+ return __uprobe__kafka_tls_response_parser(ctx, PARSER_LEVEL_PARTITION, 0, 11, KAFKA_FETCH);
}
-SEC("uprobe/kafka_tls_response_partition_parser_v12")
-int uprobe__kafka_tls_response_partition_parser_v12(struct pt_regs *ctx) {
- return __uprobe__kafka_tls_response_parser(ctx, PARSER_LEVEL_PARTITION, 12, 12);
+SEC("uprobe/kafka_tls_fetch_response_partition_parser_v12")
+int uprobe__kafka_tls_fetch_response_partition_parser_v12(struct pt_regs *ctx) {
+ return __uprobe__kafka_tls_response_parser(ctx, PARSER_LEVEL_PARTITION, 12, 12, KAFKA_FETCH);
}
-SEC("uprobe/kafka_tls_response_record_batch_parser_v0")
-int uprobe__kafka_tls_response_record_batch_parser_v0(struct pt_regs *ctx) {
- return __uprobe__kafka_tls_response_parser(ctx, PARSER_LEVEL_RECORD_BATCH, 0, 11);
+SEC("uprobe/kafka_tls_fetch_response_record_batch_parser_v0")
+int uprobe__kafka_tls_fetch_response_record_batch_parser_v0(struct pt_regs *ctx) {
+ return __uprobe__kafka_tls_response_parser(ctx, PARSER_LEVEL_RECORD_BATCH, 0, 11, KAFKA_FETCH);
}
-SEC("uprobe/kafka_tls_response_record_batch_parser_v12")
-int uprobe__kafka_tls_response_record_batch_parser_v12(struct pt_regs *ctx) {
- return __uprobe__kafka_tls_response_parser(ctx, PARSER_LEVEL_RECORD_BATCH, 12, 12);
+SEC("uprobe/kafka_tls_fetch_response_record_batch_parser_v12")
+int uprobe__kafka_tls_fetch_response_record_batch_parser_v12(struct pt_regs *ctx) {
+ return __uprobe__kafka_tls_response_parser(ctx, PARSER_LEVEL_RECORD_BATCH, 12, 12, KAFKA_FETCH);
+}
+
+SEC("uprobe/kafka_tls_produce_response_partition_parser_v0")
+int uprobe__kafka_tls_produce_response_partition_parser_v0(struct pt_regs *ctx) {
+ return __uprobe__kafka_tls_response_parser(ctx, PARSER_LEVEL_PARTITION, 0, 8, KAFKA_PRODUCE);
+}
+
+SEC("uprobe/kafka_tls_produce_response_partition_parser_v9")
+int uprobe__kafka_tls_produce_response_partition_parser_v9(struct pt_regs *ctx) {
+ return __uprobe__kafka_tls_response_parser(ctx, PARSER_LEVEL_PARTITION, 9, 11, KAFKA_PRODUCE);
}
// Gets the next expected TCP sequence in the stream, assuming
@@ -1386,7 +1556,13 @@ static __always_inline bool kafka_process_new_response(void *ctx, conn_tuple_t *
kafka->response.transaction = *request;
bpf_map_delete_elem(&kafka_in_flight, &key);
- kafka->response.state = KAFKA_FETCH_RESPONSE_START;
+ if (request->request_api_key == KAFKA_FETCH) {
+ kafka->response.state = KAFKA_FETCH_RESPONSE_START;
+ } else if (request->request_api_key == KAFKA_PRODUCE) {
+ kafka->response.state = KAFKA_PRODUCE_RESPONSE_START;
+ } else {
+ return false;
+ }
kafka->response.carry_over_offset = offset - orig_offset;
kafka->response.expected_tcp_seq = kafka_get_next_tcp_seq(skb_info);
kafka->response.transaction.response_last_seen = bpf_ktime_get_ns();
@@ -1397,7 +1573,7 @@ static __always_inline bool kafka_process_new_response(void *ctx, conn_tuple_t *
bpf_map_update_elem(&kafka_response, tup, &response_ctx, BPF_ANY);
- kafka_call_response_parser(ctx, tup, pkt, KAFKA_FETCH_RESPONSE_START, kafka->response.transaction.request_api_version);
+ kafka_call_response_parser(ctx, tup, pkt, KAFKA_FETCH_RESPONSE_START, kafka->response.transaction.request_api_version, kafka->response.transaction.request_api_key);
return true;
}
@@ -1407,7 +1583,7 @@ static __always_inline bool kafka_process_response(void *ctx, conn_tuple_t *tup,
response->transaction.response_last_seen = bpf_ktime_get_ns();
if (!skb_info || skb_info->tcp_seq == response->expected_tcp_seq) {
response->expected_tcp_seq = kafka_get_next_tcp_seq(skb_info);
- kafka_call_response_parser(ctx, tup, pkt, response->state, response->transaction.request_api_version);
+ kafka_call_response_parser(ctx, tup, pkt, response->state, response->transaction.request_api_version, response->transaction.request_api_key);
// It's on the response path, so no need to parser as a request.
return true;
}
@@ -1491,11 +1667,15 @@ static __always_inline bool kafka_process(conn_tuple_t *tup, kafka_info_t *kafka
bool flexible = false;
+ s16 produce_required_acks = 0;
switch (kafka_header.api_key) {
case KAFKA_PRODUCE:
- if (!get_topic_offset_from_produce_request(&kafka_header, pkt, &offset)) {
+ if (!get_topic_offset_from_produce_request(&kafka_header, pkt, &offset, &produce_required_acks)) {
return false;
}
+ if (produce_required_acks == 0) {
+ __sync_fetch_and_add(&kafka_tel->produce_no_required_acks, 1);
+ }
flexible = kafka_header.api_version >= 9;
break;
case KAFKA_FETCH:
@@ -1592,6 +1772,7 @@ static __always_inline bool kafka_process(conn_tuple_t *tup, kafka_info_t *kafka
log_debug("Got number of Kafka produce records <= 0");
return false;
}
+ // We now know the record count, but we'll have to wait for the response to obtain the error code and latency
kafka_transaction->records_count = records_count;
break;
}
@@ -1603,21 +1784,22 @@ static __always_inline bool kafka_process(conn_tuple_t *tup, kafka_info_t *kafka
return false;
}
- if (kafka_header.api_key == KAFKA_FETCH) {
- // Copy to stack required by 4.14 verifier.
- kafka_transaction_t transaction;
- kafka_transaction_key_t key;
- bpf_memset(&key, 0, sizeof(key));
- bpf_memcpy(&transaction, kafka_transaction, sizeof(transaction));
- key.correlation_id = kafka_header.correlation_id;
- bpf_memcpy(&key.tuple, tup, sizeof(key.tuple));
- // Flip the tuple for the response path.
- flip_tuple(&key.tuple);
- bpf_map_update_elem(&kafka_in_flight, &key, &transaction, BPF_NOEXIST);
+ if (kafka_header.api_key == KAFKA_PRODUCE && produce_required_acks == 0) {
+ // If we have a produce request with required acks set to 0, we can enqueue it immediately, as there will be no produce response.
+ kafka_batch_enqueue_wrapper(kafka, tup, kafka_transaction);
return true;
}
- kafka_batch_enqueue_wrapper(kafka, tup, kafka_transaction);
+ // Copy to stack required by 4.14 verifier.
+ kafka_transaction_t transaction;
+ kafka_transaction_key_t key;
+ bpf_memset(&key, 0, sizeof(key));
+ bpf_memcpy(&transaction, kafka_transaction, sizeof(transaction));
+ key.correlation_id = kafka_header.correlation_id;
+ bpf_memcpy(&key.tuple, tup, sizeof(key.tuple));
+ // Flip the tuple for the response path.
+ flip_tuple(&key.tuple);
+ bpf_map_update_elem(&kafka_in_flight, &key, &transaction, BPF_NOEXIST);
return true;
}
diff --git a/pkg/network/ebpf/c/protocols/kafka/types.h b/pkg/network/ebpf/c/protocols/kafka/types.h
index a1729180174d8..af3a9e82de9c1 100644
--- a/pkg/network/ebpf/c/protocols/kafka/types.h
+++ b/pkg/network/ebpf/c/protocols/kafka/types.h
@@ -60,6 +60,13 @@ typedef enum {
KAFKA_FETCH_RESPONSE_RECORD_BATCHES_ARRAY_END,
KAFKA_FETCH_RESPONSE_PARTITION_TAGGED_FIELDS,
KAFKA_FETCH_RESPONSE_PARTITION_END,
+
+ KAFKA_PRODUCE_RESPONSE_START,
+ KAFKA_PRODUCE_RESPONSE_NUM_TOPICS,
+ KAFKA_PRODUCE_RESPONSE_TOPIC_NAME_SIZE,
+ KAFKA_PRODUCE_RESPONSE_NUM_PARTITIONS,
+ KAFKA_PRODUCE_RESPONSE_PARTITION_START,
+ KAFKA_PRODUCE_RESPONSE_PARTITION_ERROR_CODE_START,
} __attribute__ ((packed)) kafka_response_state;
typedef struct kafka_fetch_response_record_batches_array_t {
@@ -113,6 +120,7 @@ typedef struct kafka_info_t {
typedef struct {
// The array topic_name_size_buckets maps a bucket index to the number of occurrences observed for topic name lengths
__u64 topic_name_size_buckets[KAFKA_TELEMETRY_TOPIC_NAME_NUM_OF_BUCKETS];
+ __u64 produce_no_required_acks;
} kafka_telemetry_t;
#endif
diff --git a/pkg/network/ebpf/c/tracer.c b/pkg/network/ebpf/c/tracer.c
index 66482fb92a092..ad7f5fc0ad048 100644
--- a/pkg/network/ebpf/c/tracer.c
+++ b/pkg/network/ebpf/c/tracer.c
@@ -187,7 +187,7 @@ int BPF_BYPASSABLE_KRETPROBE(kretprobe__udp_sendpage, int sent) {
return 0;
}
- return handle_message(&t, sent, 0, CONN_DIRECTION_UNKNOWN, 0, 0, PACKET_COUNT_NONE, skp);
+ return handle_message(&t, sent, 0, CONN_DIRECTION_UNKNOWN, 1, 0, PACKET_COUNT_INCREMENT, skp);
}
SEC("kprobe/tcp_done")
@@ -420,7 +420,7 @@ static __always_inline int handle_ip6_skb(struct sock *sk, size_t size, struct f
}
log_debug("kprobe/ip6_make_skb: pid_tgid: %llu, size: %zu", pid_tgid, size);
- handle_message(&t, size, 0, CONN_DIRECTION_UNKNOWN, 0, 0, PACKET_COUNT_NONE, sk);
+ handle_message(&t, size, 0, CONN_DIRECTION_UNKNOWN, 1, 0, PACKET_COUNT_INCREMENT, sk);
increment_telemetry_count(udp_send_processed);
return 0;
@@ -597,9 +597,7 @@ static __always_inline int handle_ip_skb(struct sock *sk, size_t size, struct fl
log_debug("kprobe/ip_make_skb: pid_tgid: %llu, size: %zu", pid_tgid, size);
- // segment count is not currently enabled on prebuilt.
- // to enable, change PACKET_COUNT_NONE => PACKET_COUNT_INCREMENT
- handle_message(&t, size, 0, CONN_DIRECTION_UNKNOWN, 1, 0, PACKET_COUNT_NONE, sk);
+ handle_message(&t, size, 0, CONN_DIRECTION_UNKNOWN, 1, 0, PACKET_COUNT_INCREMENT, sk);
increment_telemetry_count(udp_send_processed);
return 0;
@@ -767,9 +765,7 @@ static __always_inline int handle_ret_udp_recvmsg_pre_4_7_0(int copied, void *ud
bpf_map_delete_elem(udp_sock_map, &pid_tgid);
log_debug("kretprobe/udp_recvmsg: pid_tgid: %llu, return: %d", pid_tgid, copied);
- // segment count is not currently enabled on prebuilt.
- // to enable, change PACKET_COUNT_NONE => PACKET_COUNT_INCREMENT
- handle_message(&t, 0, copied, CONN_DIRECTION_UNKNOWN, 0, 1, PACKET_COUNT_NONE, st->sk);
+ handle_message(&t, 0, copied, CONN_DIRECTION_UNKNOWN, 0, 1, PACKET_COUNT_INCREMENT, st->sk);
return 0;
}
diff --git a/pkg/network/ebpf/kprobe_types_linux.go b/pkg/network/ebpf/kprobe_types_linux.go
index 63dd4b4c6e393..7916d343dcd10 100644
--- a/pkg/network/ebpf/kprobe_types_linux.go
+++ b/pkg/network/ebpf/kprobe_types_linux.go
@@ -77,12 +77,12 @@ type PIDFD struct {
Fd uint32
}
type UDPRecvSock struct {
- Sk *_Ctype_struct_sock
- Msg *_Ctype_struct_msghdr
+ Sk uintptr
+ Msg uintptr
}
type BindSyscallArgs struct {
- Addr *_Ctype_struct_sockaddr
- Sk *_Ctype_struct_sock
+ Addr uintptr
+ Sk uintptr
}
type ProtocolStack struct {
Api uint8
diff --git a/pkg/network/event_common.go b/pkg/network/event_common.go
index 6908b02965048..990c9f40797b7 100644
--- a/pkg/network/event_common.go
+++ b/pkg/network/event_common.go
@@ -10,6 +10,7 @@ package network
import (
"encoding/binary"
"fmt"
+ "net/netip"
"strings"
"time"
@@ -353,10 +354,8 @@ const keyFmt = "p:%d|src:%s:%d|dst:%s:%d|f:%d|t:%d"
// Note: This is only used in /debug/* endpoints
func BeautifyKey(key string) string {
bytesToAddress := func(buf []byte) util.Address {
- if len(buf) == 4 {
- return util.V4AddressFromBytes(buf)
- }
- return util.V6AddressFromBytes(buf)
+ addr, _ := netip.AddrFromSlice(buf)
+ return util.Address{Addr: addr}
}
raw := []byte(key)
@@ -464,8 +463,8 @@ func generateConnectionKey(c ConnectionStats, buf []byte, useNAT bool) []byte {
buf[n] = uint8(c.Family)<<4 | uint8(c.Type)
n++
- n += laddr.WriteTo(buf[n:]) // 4 or 16 bytes
- n += raddr.WriteTo(buf[n:]) // 4 or 16 bytes
+ n += copy(buf[n:], laddr.AsSlice()) // 4 or 16 bytes
+ n += copy(buf[n:], raddr.AsSlice()) // 4 or 16 bytes
return buf[:n]
}
diff --git a/pkg/network/event_windows.go b/pkg/network/event_windows.go
index ff9bed37d33f9..54ad5399f8c63 100644
--- a/pkg/network/event_windows.go
+++ b/pkg/network/event_windows.go
@@ -8,7 +8,7 @@
package network
import (
- "net"
+ "net/netip"
"syscall"
"github.com/DataDog/datadog-agent/pkg/network/driver"
@@ -57,14 +57,14 @@ func isTCPFlowEstablished(flow *driver.PerFlowData) bool {
return false
}
-func convertV4Addr(addr [16]uint8) util.Address {
+func convertV4Addr(addr [16]byte) util.Address {
// We only read the first 4 bytes for v4 address
- return util.V4AddressFromBytes(addr[:net.IPv4len])
+ return util.Address{Addr: netip.AddrFrom4([4]byte(addr[:]))}
}
-func convertV6Addr(addr [16]uint8) util.Address {
+func convertV6Addr(addr [16]byte) util.Address {
// We read all 16 bytes for v6 address
- return util.V6AddressFromBytes(addr[:net.IPv6len])
+ return util.Address{Addr: netip.AddrFrom16(addr)}
}
// Monotonic values include retransmits and headers, while transport does not. We default to using transport
diff --git a/pkg/network/gateway_lookup_linux.go b/pkg/network/gateway_lookup_linux.go
index 48657b61c1ece..a3131f40fc085 100644
--- a/pkg/network/gateway_lookup_linux.go
+++ b/pkg/network/gateway_lookup_linux.go
@@ -131,7 +131,7 @@ func (g *gatewayLookup) LookupWithIPs(source util.Address, dest util.Address, ne
// if there is no gateway, we don't need to add subnet info
// for gateway resolution in the backend
- if r.Gateway.IsZero() || r.Gateway.IsUnspecified() {
+ if !r.Gateway.IsValid() || r.Gateway.IsUnspecified() {
return nil
}
diff --git a/pkg/network/go/bininspect/pclntab.go b/pkg/network/go/bininspect/pclntab.go
new file mode 100644
index 0000000000000..be68da2ad03bb
--- /dev/null
+++ b/pkg/network/go/bininspect/pclntab.go
@@ -0,0 +1,307 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024-present Datadog, Inc.
+
+//go:build linux
+
+package bininspect
+
+import (
+ "bytes"
+ "debug/elf"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+)
+
+const (
+ pclntabSectionName = ".gopclntab"
+
+ go116magic = 0xfffffffa
+ go118magic = 0xfffffff0
+ go120magic = 0xfffffff1
+)
+
+// version of the pclntab
+type version int
+
+const (
+ verUnknown version = iota
+ ver11
+ ver12
+ ver116
+ ver118
+ ver120
+)
+
+var (
+ // ErrMissingPCLNTABSection is returned when the pclntab section is missing.
+ ErrMissingPCLNTABSection = errors.New("failed to find pclntab section")
+
+ // ErrUnsupportedPCLNTABVersion is returned when the pclntab version is not supported.
+ ErrUnsupportedPCLNTABVersion = errors.New("unsupported pclntab version")
+
+ // ErrFailedToFindAllSymbols is returned when not all symbols were found.
+ ErrFailedToFindAllSymbols = errors.New("failed to find all symbols")
+)
+
+// sectionAccess is a wrapper around elf.Section to provide ReadAt functionality.
+// This is used to lazy read from the pclntab section, as the pclntab is large and we don't want to read it all at once,
+// or store it in memory.
+type sectionAccess struct {
+ section *elf.Section
+ baseOffset int64
+}
+
+// ReadAt reads len(p) bytes from the section starting at the given offset.
+func (s *sectionAccess) ReadAt(outBuffer []byte, offset int64) (int, error) {
+ return s.section.ReadAt(outBuffer, s.baseOffset+offset)
+}
+
+// pclntanSymbolParser is a parser for pclntab symbols.
+// Similar to LineTable struct in https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L43
+type pclntanSymbolParser struct {
+ // section is the pclntab section.
+ section *elf.Section
+ // symbolFilter is the filter for the symbols.
+ symbolFilter symbolFilter
+
+ // byteOrderParser is the binary.ByteOrder for the pclntab.
+ byteOrderParser binary.ByteOrder
+ // cachedVersion is the version of the pclntab.
+ cachedVersion version
+ // funcNameTable is the sectionAccess for the function name table.
+ funcNameTable sectionAccess
+ // funcData is the sectionAccess for the function data.
+ funcData sectionAccess
+ // funcTable is the sectionAccess for the function table.
+ funcTable sectionAccess
+ // funcTableSize is the size of the function table.
+ funcTableSize uint32
+ // ptrSize is the size of a pointer in the architecture of the binary.
+ ptrSize uint32
+ // ptrBufferSizeHelper is a buffer for reading pointers of the size ptrSize.
+ ptrBufferSizeHelper []byte
+ // funcNameHelper is a buffer for reading function names. Of the maximum size of the symbol names.
+ funcNameHelper []byte
+ // funcTableFieldSize is the size of a field in the function table.
+ funcTableFieldSize int
+ // funcTableBuffer is a buffer for reading fields in the function table.
+ funcTableBuffer []byte
+}
+
+// GetPCLNTABSymbolParser returns the matching symbols from the pclntab section.
+func GetPCLNTABSymbolParser(f *elf.File, symbolFilter symbolFilter) (map[string]*elf.Symbol, error) {
+ section := f.Section(pclntabSectionName)
+ if section == nil {
+ return nil, ErrMissingPCLNTABSection
+ }
+
+ parser := &pclntanSymbolParser{section: section, symbolFilter: symbolFilter}
+
+ if err := parser.parsePclntab(); err != nil {
+ return nil, err
+ }
+ // Late initialization, to prevent allocation if the binary is not supported.
+ _, maxSymbolsSize := symbolFilter.getMinMaxLength()
+ parser.funcNameHelper = make([]byte, maxSymbolsSize)
+ parser.funcTableFieldSize = getFuncTableFieldSize(parser.cachedVersion, int(parser.ptrSize))
+ // Allocate the buffer for reading the function table.
+ // TODO: Do we need 2*funcTableFieldSize?
+ parser.funcTableBuffer = make([]byte, 2*parser.funcTableFieldSize)
+ return parser.getSymbols()
+}
+
+// parsePclntab parses the pclntab, setting the version and verifying the header.
+// Based on parsePclnTab in https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L194
+func (p *pclntanSymbolParser) parsePclntab() error {
+ p.cachedVersion = ver11
+
+ pclntabHeader := make([]byte, 8)
+ if n, err := p.section.ReadAt(pclntabHeader, 0); err != nil || n != len(pclntabHeader) {
+ return fmt.Errorf("failed to read pclntab header: %w", err)
+ }
+ // Matching the condition https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L216-L220
+ // Check header: 4-byte magic, two zeros, pc quantum, pointer size.
+ if p.section.Size < 16 || pclntabHeader[4] != 0 || pclntabHeader[5] != 0 ||
+ (pclntabHeader[6] != 1 && pclntabHeader[6] != 2 && pclntabHeader[6] != 4) || // pc quantum
+ (pclntabHeader[7] != 4 && pclntabHeader[7] != 8) { // pointer size
+ // TODO: add explicit error message
+ return errors.New("invalid pclntab header")
+ }
+
+ leMagic := binary.LittleEndian.Uint32(pclntabHeader)
+ beMagic := binary.BigEndian.Uint32(pclntabHeader)
+ switch {
+ case leMagic == go116magic:
+ p.byteOrderParser, p.cachedVersion = binary.LittleEndian, ver116
+ case beMagic == go116magic:
+ p.byteOrderParser, p.cachedVersion = binary.BigEndian, ver116
+ case leMagic == go118magic:
+ p.byteOrderParser, p.cachedVersion = binary.LittleEndian, ver118
+ case beMagic == go118magic:
+ p.byteOrderParser, p.cachedVersion = binary.BigEndian, ver118
+ case leMagic == go120magic:
+ p.byteOrderParser, p.cachedVersion = binary.LittleEndian, ver120
+ case beMagic == go120magic:
+ p.byteOrderParser, p.cachedVersion = binary.BigEndian, ver120
+ default:
+ return ErrUnsupportedPCLNTABVersion
+ }
+
+ p.ptrSize = uint32(pclntabHeader[7])
+ p.ptrBufferSizeHelper = make([]byte, p.ptrSize)
+
+ // offset is based on https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L252
+ offset := func(word uint32) uint64 {
+ off := 8 + word*p.ptrSize
+ if n, err := p.section.ReadAt(p.ptrBufferSizeHelper, int64(off)); err != nil || n != int(p.ptrSize) {
+ return 0
+ }
+ return p.uintptr(p.ptrBufferSizeHelper)
+ }
+
+ switch p.cachedVersion {
+ case ver118, ver120:
+ p.funcTableSize = uint32(offset(0))
+ p.funcNameTable = sectionAccess{
+ section: p.section,
+ baseOffset: int64(offset(3)),
+ }
+ p.funcData = sectionAccess{
+ section: p.section,
+ baseOffset: int64(offset(7)),
+ }
+ p.funcTable = sectionAccess{
+ section: p.section,
+ baseOffset: int64(offset(7)),
+ }
+ case ver116:
+ p.funcTableSize = uint32(offset(0))
+ p.funcNameTable = sectionAccess{
+ section: p.section,
+ baseOffset: int64(offset(2)),
+ }
+ p.funcData = sectionAccess{
+ section: p.section,
+ baseOffset: int64(offset(6)),
+ }
+ p.funcTable = sectionAccess{
+ section: p.section,
+ baseOffset: int64(offset(6)),
+ }
+ }
+
+ return nil
+}
+
+// uintptr returns the pointer-sized value encoded at b.
+// The pointer size is dictated by the table being read.
+// based on https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L186.
+func (p *pclntanSymbolParser) uintptr(b []byte) uint64 {
+ if p.ptrSize == 4 {
+ return uint64(p.byteOrderParser.Uint32(b))
+ }
+ return p.byteOrderParser.Uint64(b)
+}
+
+// getFuncTableFieldSize returns the size of a field in the function table.
+// based on https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L388-L392
+func getFuncTableFieldSize(version version, ptrSize int) int {
+ if version >= ver118 {
+ return 4
+ }
+ return ptrSize
+}
+
+// getSymbols returns the symbols from the pclntab section that match the symbol filter.
+// based on https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L300-L329
+func (p *pclntanSymbolParser) getSymbols() (map[string]*elf.Symbol, error) {
+ numWanted := p.symbolFilter.getNumWanted()
+ symbols := make(map[string]*elf.Symbol, numWanted)
+ data := sectionAccess{section: p.section}
+ for currentIdx := uint32(0); currentIdx < p.funcTableSize; currentIdx++ {
+ // based on https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L315
+ _, err := p.funcTable.ReadAt(p.funcTableBuffer, int64((2*currentIdx+1)*uint32(p.funcTableFieldSize)))
+ if err != nil {
+ continue
+ }
+
+ // based on https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L321
+ data.baseOffset = int64(p.uint(p.funcTableBuffer)) + p.funcData.baseOffset
+ funcName := p.funcName(data)
+
+ if funcName == "" {
+ continue
+ }
+ symbols[funcName] = &elf.Symbol{
+ Name: funcName,
+ }
+ if len(symbols) == numWanted {
+ break
+ }
+ }
+ if len(symbols) < numWanted {
+ return symbols, ErrFailedToFindAllSymbols
+ }
+ return symbols, nil
+}
+
+// funcName returns the name of the function found at off.
+func (p *pclntanSymbolParser) funcName(data sectionAccess) string {
+ off := funcNameOffset(p.ptrSize, p.cachedVersion, p.byteOrderParser, data, p.ptrBufferSizeHelper)
+ n, err := p.funcNameTable.ReadAt(p.funcNameHelper, int64(off))
+ if n == 0 || (err != nil && !errors.Is(err, io.EOF)) {
+ return ""
+ }
+ idxToNull := bytes.IndexByte(p.funcNameHelper, 0)
+ if idxToNull == -1 || idxToNull == 0 || idxToNull >= n {
+ return ""
+ }
+
+ if p.symbolFilter.want(string(p.funcNameHelper[:idxToNull])) {
+ return string(p.funcNameHelper[:idxToNull])
+ }
+ return ""
+}
+
+// uint returns the uint stored at b.
+// based on https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L427-L432
+func (p *pclntanSymbolParser) uint(b []byte) uint64 {
+ if p.funcTableFieldSize == 4 {
+ return uint64(p.byteOrderParser.Uint32(b))
+ }
+ return p.byteOrderParser.Uint64(b)
+}
+
+// funcNameOffset returns the offset of the function name.
+// based on https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L472-L485
+// We can only for the usage of this function for getting the name of the function (https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L463)
+// So we explicitly set `n = 1` in the original implementation.
+func funcNameOffset(ptrSize uint32, version version, binary binary.ByteOrder, data sectionAccess, helper []byte) uint32 {
+ // In Go 1.18, the struct _func has changed. The original (prior to 1.18) was:
+ // type _func struct {
+ // entry uintptr
+ // nameoff int32
+ // ...
+ // }
+ // In Go 1.18, the struct is:
+ // type _func struct {
+ // entryoff uint32
+ // nameoff int32
+ // ...
+ // }
+ // Thus, to read the nameoff, for Go 1.18 and later, we need to skip the entryoff field (4 bytes).
+ // for Go 1.17 and earlier, We need to skip the sizeof(uintptr) which is ptrSize.
+ off := ptrSize
+ if version >= ver118 {
+ off = 4
+ }
+ // We read only 4 bytes, as the nameoff is an int32.
+ if n, err := data.ReadAt(helper[:4], int64(off)); err != nil || n != 4 {
+ return 0
+ }
+ return binary.Uint32(helper[:4])
+}
diff --git a/pkg/network/go/bininspect/symbols.go b/pkg/network/go/bininspect/symbols.go
index 90dd50c6932d1..910bc37d3ff30 100644
--- a/pkg/network/go/bininspect/symbols.go
+++ b/pkg/network/go/bininspect/symbols.go
@@ -281,3 +281,19 @@ func GetAnySymbolWithPrefix(elfFile *elf.File, prefix string, maxLength int) (*e
// Shouldn't happen
return nil, errors.New("empty symbols map")
}
+
+// GetAnySymbolWithPrefixPCLNTAB returns any one symbol with the given prefix and the
+// specified maximum length from the pclntab section in ELF file.
+func GetAnySymbolWithPrefixPCLNTAB(elfFile *elf.File, prefix string, maxLength int) (*elf.Symbol, error) {
+ symbols, err := GetPCLNTABSymbolParser(elfFile, newPrefixSymbolFilter(prefix, maxLength))
+ if err != nil {
+ return nil, err
+ }
+
+ for key := range symbols {
+ return symbols[key], nil
+ }
+
+ // Shouldn't happen
+ return nil, errors.New("empty symbols map")
+}
diff --git a/pkg/network/nat.go b/pkg/network/nat.go
index ef5194b23ffd5..e686c65ea7d83 100644
--- a/pkg/network/nat.go
+++ b/pkg/network/nat.go
@@ -14,7 +14,7 @@ func GetNATLocalAddress(c ConnectionStats) (util.Address, uint16) {
localIP := c.Source
localPort := c.SPort
- if c.IPTranslation != nil && !c.IPTranslation.ReplDstIP.IsZero() {
+ if c.IPTranslation != nil && c.IPTranslation.ReplDstIP.IsValid() {
// Fields are flipped
localIP = c.IPTranslation.ReplDstIP
localPort = c.IPTranslation.ReplDstPort
@@ -27,7 +27,7 @@ func GetNATRemoteAddress(c ConnectionStats) (util.Address, uint16) {
remoteIP := c.Dest
remotePort := c.DPort
- if c.IPTranslation != nil && !c.IPTranslation.ReplSrcIP.IsZero() {
+ if c.IPTranslation != nil && c.IPTranslation.ReplSrcIP.IsValid() {
// Fields are flipped
remoteIP = c.IPTranslation.ReplSrcIP
remotePort = c.IPTranslation.ReplSrcPort
diff --git a/pkg/network/netlink/conntracker.go b/pkg/network/netlink/conntracker.go
index 9f066d2f8cb97..885e4f5900b82 100644
--- a/pkg/network/netlink/conntracker.go
+++ b/pkg/network/netlink/conntracker.go
@@ -12,7 +12,6 @@ import (
"context"
"errors"
"fmt"
- "net"
"net/netip"
"sync"
"time"
@@ -194,8 +193,8 @@ func (ctr *realConntracker) GetTranslationForConn(c *network.ConnectionStats) *n
defer ctr.Unlock()
k := connKey{
- src: netip.AddrPortFrom(ipFromAddr(c.Source), c.SPort),
- dst: netip.AddrPortFrom(ipFromAddr(c.Dest), c.DPort),
+ src: netip.AddrPortFrom(c.Source.Addr, c.SPort),
+ dst: netip.AddrPortFrom(c.Dest.Addr, c.DPort),
transport: c.Type,
}
@@ -226,8 +225,8 @@ func (ctr *realConntracker) DeleteTranslation(c *network.ConnectionStats) {
defer ctr.Unlock()
k := connKey{
- src: netip.AddrPortFrom(ipFromAddr(c.Source), c.SPort),
- dst: netip.AddrPortFrom(ipFromAddr(c.Dest), c.DPort),
+ src: netip.AddrPortFrom(c.Source.Addr, c.SPort),
+ dst: netip.AddrPortFrom(c.Dest.Addr, c.DPort),
transport: c.Type,
}
@@ -453,29 +452,13 @@ func IsNAT(c Con) bool {
func formatIPTranslation(tuple *ConTuple) *network.IPTranslation {
return &network.IPTranslation{
- ReplSrcIP: addrFromIP(tuple.Src.Addr()),
- ReplDstIP: addrFromIP(tuple.Dst.Addr()),
+ ReplSrcIP: util.Address{Addr: tuple.Src.Addr().Unmap()},
+ ReplDstIP: util.Address{Addr: tuple.Dst.Addr().Unmap()},
ReplSrcPort: tuple.Src.Port(),
ReplDstPort: tuple.Dst.Port(),
}
}
-func addrFromIP(ip netip.Addr) util.Address {
- if ip.Is6() && !ip.Is4In6() {
- b := ip.As16()
- return util.V6AddressFromBytes(b[:])
- }
- b := ip.As4()
- return util.V4AddressFromBytes(b[:])
-}
-
-func ipFromAddr(a util.Address) netip.Addr {
- if a.Len() == net.IPv6len {
- return netip.AddrFrom16(*(*[16]byte)(a.Bytes()))
- }
- return netip.AddrFrom4(*(*[4]byte)(a.Bytes()))
-}
-
func formatKey(tuple *ConTuple) (k connKey, ok bool) {
ok = true
k.src = tuple.Src
diff --git a/pkg/network/netlink/decoding.go b/pkg/network/netlink/decoding.go
index 003420fb3ca90..aff7d90466fd6 100644
--- a/pkg/network/netlink/decoding.go
+++ b/pkg/network/netlink/decoding.go
@@ -201,12 +201,12 @@ func ipv4(b []byte) (netip.Addr, error) {
if len(b) != 4 {
return netip.Addr{}, fmt.Errorf("invalid IPv4 size")
}
- return netip.AddrFrom4(*(*[4]byte)(b)), nil
+ return netip.AddrFrom4([4]byte(b)), nil
}
func ipv6(b []byte) (netip.Addr, error) {
if len(b) != 16 {
return netip.Addr{}, fmt.Errorf("invalid IPv6 size")
}
- return netip.AddrFrom16(*(*[16]byte)(b)), nil
+ return netip.AddrFrom16([16]byte(b)), nil
}
diff --git a/pkg/network/protocols/ebpf_types.go b/pkg/network/protocols/ebpf_types.go
index 64ab3ae132ba7..9fcf544843b86 100644
--- a/pkg/network/protocols/ebpf_types.go
+++ b/pkg/network/protocols/ebpf_types.go
@@ -47,14 +47,18 @@ const (
ProgramHTTP2Termination ProgramType = C.PROG_HTTP2_TERMINATION
// ProgramKafka is the Golang representation of the C.PROG_KAFKA enum
ProgramKafka ProgramType = C.PROG_KAFKA
- // ProgramKafkaResponsePartitionParserV0 is the Golang representation of the C.PROG_KAFKA_RESPONSE_PARTITION_PARSER_v0 enum
- ProgramKafkaResponsePartitionParserV0 ProgramType = C.PROG_KAFKA_RESPONSE_PARTITION_PARSER_V0
- // ProgramKafkaResponsePartitionParserV12 is the Golang representation of the C.PROG_KAFKA_RESPONSE_PARTITION_PARSER_v0 enum
- ProgramKafkaResponsePartitionParserV12 ProgramType = C.PROG_KAFKA_RESPONSE_PARTITION_PARSER_V12
- // ProgramKafkaResponseRecordBatchParserV0 is the Golang representation of the C.PROG_KAFKA_RESPONSE_RECORD_BATCH_PARSER_v0 enum
- ProgramKafkaResponseRecordBatchParserV0 ProgramType = C.PROG_KAFKA_RESPONSE_RECORD_BATCH_PARSER_V0
- // ProgramKafkaResponseRecordBatchParserV12 is the Golang representation of the C.PROG_KAFKA_RESPONSE_RECORD_BATCH_PARSER_v0 enum
- ProgramKafkaResponseRecordBatchParserV12 ProgramType = C.PROG_KAFKA_RESPONSE_RECORD_BATCH_PARSER_V12
+ // ProgramKafkaFetchResponsePartitionParserV0 is the Golang representation of the C.PROG_KAFKA_FETCH_RESPONSE_PARTITION_PARSER_V0 enum
+ ProgramKafkaFetchResponsePartitionParserV0 ProgramType = C.PROG_KAFKA_FETCH_RESPONSE_PARTITION_PARSER_V0
+ // ProgramKafkaFetchResponsePartitionParserV12 is the Golang representation of the C.PROG_KAFKA_FETCH_RESPONSE_PARTITION_PARSER_V12 enum
+ ProgramKafkaFetchResponsePartitionParserV12 ProgramType = C.PROG_KAFKA_FETCH_RESPONSE_PARTITION_PARSER_V12
+ // ProgramKafkaFetchResponseRecordBatchParserV0 is the Golang representation of the C.PROG_KAFKA_FETCH_RESPONSE_RECORD_BATCH_PARSER_V0 enum
+ ProgramKafkaFetchResponseRecordBatchParserV0 ProgramType = C.PROG_KAFKA_FETCH_RESPONSE_RECORD_BATCH_PARSER_V0
+ // ProgramKafkaFetchResponseRecordBatchParserV12 is the Golang representation of the C.PROG_KAFKA_FETCH_RESPONSE_RECORD_BATCH_PARSER_V12 enum
+ ProgramKafkaFetchResponseRecordBatchParserV12 ProgramType = C.PROG_KAFKA_FETCH_RESPONSE_RECORD_BATCH_PARSER_V12
+ // ProgramKafkaProduceResponsePartitionParserV0 is the Golang representation of the C.PROG_KAFKA_PRODUCE_RESPONSE_PARTITION_PARSER_V0 enum
+ ProgramKafkaProduceResponsePartitionParserV0 ProgramType = C.PROG_KAFKA_PRODUCE_RESPONSE_PARTITION_PARSER_V0
+ // ProgramKafkaProduceResponsePartitionParserV9 is the Golang representation of the C.PROG_KAFKA_PRODUCE_RESPONSE_PARTITION_PARSER_V9 enum
+ ProgramKafkaProduceResponsePartitionParserV9 ProgramType = C.PROG_KAFKA_PRODUCE_RESPONSE_PARTITION_PARSER_V9
// ProgramKafkaTermination is tail call to process Kafka termination.
ProgramKafkaTermination ProgramType = C.PROG_KAFKA_TERMINATION
// ProgramPostgres is the Golang representation of the C.PROG_POSTGRES enum
diff --git a/pkg/network/protocols/ebpf_types_linux.go b/pkg/network/protocols/ebpf_types_linux.go
index 43ab21bee8667..9cc859e489174 100644
--- a/pkg/network/protocols/ebpf_types_linux.go
+++ b/pkg/network/protocols/ebpf_types_linux.go
@@ -36,25 +36,29 @@ const (
ProgramKafka ProgramType = 0x9
- ProgramKafkaResponsePartitionParserV0 ProgramType = 0xa
+ ProgramKafkaFetchResponsePartitionParserV0 ProgramType = 0xa
- ProgramKafkaResponsePartitionParserV12 ProgramType = 0xb
+ ProgramKafkaFetchResponsePartitionParserV12 ProgramType = 0xb
- ProgramKafkaResponseRecordBatchParserV0 ProgramType = 0xc
+ ProgramKafkaFetchResponseRecordBatchParserV0 ProgramType = 0xc
- ProgramKafkaResponseRecordBatchParserV12 ProgramType = 0xd
+ ProgramKafkaFetchResponseRecordBatchParserV12 ProgramType = 0xd
- ProgramKafkaTermination ProgramType = 0xe
+ ProgramKafkaProduceResponsePartitionParserV0 ProgramType = 0xe
- ProgramPostgres ProgramType = 0x10
+ ProgramKafkaProduceResponsePartitionParserV9 ProgramType = 0xf
- ProgramPostgresParseMessage ProgramType = 0x11
+ ProgramKafkaTermination ProgramType = 0x10
- ProgramPostgresTermination ProgramType = 0x12
+ ProgramPostgres ProgramType = 0x12
- ProgramRedis ProgramType = 0x13
+ ProgramPostgresParseMessage ProgramType = 0x13
- ProgramRedisTermination ProgramType = 0x14
+ ProgramPostgresTermination ProgramType = 0x14
+
+ ProgramRedis ProgramType = 0x15
+
+ ProgramRedisTermination ProgramType = 0x16
)
type ebpfProtocolType uint16
diff --git a/pkg/network/protocols/http/etw_http_service.go b/pkg/network/protocols/http/etw_http_service.go
index ee913ae4b7c8d..8225553e059d3 100644
--- a/pkg/network/protocols/http/etw_http_service.go
+++ b/pkg/network/protocols/http/etw_http_service.go
@@ -1472,7 +1472,7 @@ func ipAndPortFromTup(tup driver.ConnTupleType, local bool) ([16]uint8, uint16)
}
func ip4format(ip [16]uint8) string {
- ipObj := netip.AddrFrom4(*(*[4]byte)(ip[:4]))
+ ipObj := netip.AddrFrom4([4]byte(ip[:]))
return ipObj.String()
}
diff --git a/pkg/network/protocols/http/model_linux.go b/pkg/network/protocols/http/model_linux.go
index 4855bee8a00af..75034b59fb921 100644
--- a/pkg/network/protocols/http/model_linux.go
+++ b/pkg/network/protocols/http/model_linux.go
@@ -110,7 +110,7 @@ func (e *EbpfEvent) String() string {
func requestFragment(fragment []byte) [BufferSize]byte {
if len(fragment) >= BufferSize {
- return *(*[BufferSize]byte)(fragment)
+ return [BufferSize]byte(fragment)
}
var b [BufferSize]byte
copy(b[:], fragment)
diff --git a/pkg/network/protocols/http/statkeeper_test_linux.go b/pkg/network/protocols/http/statkeeper_test_linux.go
index ba8f1aa4308a8..3a4c59dfeb675 100644
--- a/pkg/network/protocols/http/statkeeper_test_linux.go
+++ b/pkg/network/protocols/http/statkeeper_test_linux.go
@@ -25,9 +25,9 @@ func generateIPv4HTTPTransaction(source util.Address, dest util.Address, sourceP
event.Http.Response_last_seen = event.Http.Request_started + latencyNS
event.Http.Response_status_code = uint16(code)
event.Http.Request_fragment = requestFragment([]byte(reqFragment))
- event.Tuple.Saddr_l = uint64(binary.LittleEndian.Uint32(source.Bytes()))
+ event.Tuple.Saddr_l = uint64(binary.LittleEndian.Uint32(source.Unmap().AsSlice()))
event.Tuple.Sport = uint16(sourcePort)
- event.Tuple.Daddr_l = uint64(binary.LittleEndian.Uint32(dest.Bytes()))
+ event.Tuple.Daddr_l = uint64(binary.LittleEndian.Uint32(dest.Unmap().AsSlice()))
event.Tuple.Dport = uint16(destPort)
event.Tuple.Metadata = 1
diff --git a/pkg/network/protocols/http/statkeeper_test_windows.go b/pkg/network/protocols/http/statkeeper_test_windows.go
index ea4fdc13f00bb..8516d667d41dd 100644
--- a/pkg/network/protocols/http/statkeeper_test_windows.go
+++ b/pkg/network/protocols/http/statkeeper_test_windows.go
@@ -25,11 +25,10 @@ func generateIPv4HTTPTransaction(source util.Address, dest util.Address, sourceP
tx.Txn.ResponseStatusCode = uint16(code)
tx.RequestFragment = []byte(reqFragment)
- source.WriteTo(tx.Txn.Tup.RemoteAddr[:])
-
+ copy(tx.Txn.Tup.RemoteAddr[:], source.AsSlice())
tx.Txn.Tup.RemotePort = uint16(sourcePort)
- dest.WriteTo(tx.Txn.Tup.LocalAddr[:])
+ copy(tx.Txn.Tup.LocalAddr[:], dest.AsSlice())
tx.Txn.Tup.LocalPort = uint16(destPort)
return &tx
diff --git a/pkg/network/protocols/kafka/kernel_telemetry.go b/pkg/network/protocols/kafka/kernel_telemetry.go
index 034cd2a6cb59a..d607bb0032d51 100644
--- a/pkg/network/protocols/kafka/kernel_telemetry.go
+++ b/pkg/network/protocols/kafka/kernel_telemetry.go
@@ -20,6 +20,9 @@ type kernelTelemetry struct {
// pathSizeBucket Count of topic names sizes divided into buckets.
pathSizeBucket [TopicNameBuckets]*libtelemetry.Counter
+ // produceNoRequiredAcks is the number of produce requests that did not require any acks.
+ produceNoRequiredAcks *libtelemetry.Counter
+
// telemetryLastState represents the latest Kafka eBPF Kernel telemetry observed from the kernel
telemetryLastState RawKernelTelemetry
}
@@ -34,6 +37,8 @@ func newKernelTelemetry() *kernelTelemetry {
kafkaKernelTel.pathSizeBucket[bucketIndex] = metricGroup.NewCounter("path_size_bucket_" + (strconv.Itoa(bucketIndex + 1)))
}
+ kafkaKernelTel.produceNoRequiredAcks = metricGroup.NewCounter("produce_no_required_acks")
+
return kafkaKernelTel
}
@@ -42,8 +47,9 @@ func (t *kernelTelemetry) update(tel *RawKernelTelemetry) {
// We should only add the delta between the current eBPF map state and the last seen eBPF map state
telemetryDelta := tel.Sub(t.telemetryLastState)
for bucketIndex := range t.pathSizeBucket {
- t.pathSizeBucket[bucketIndex].Add(int64(telemetryDelta.Name_size_buckets[bucketIndex]))
+ t.pathSizeBucket[bucketIndex].Add(int64(telemetryDelta.Topic_name_size_buckets[bucketIndex]))
}
+ t.produceNoRequiredAcks.Add(int64(telemetryDelta.Produce_no_required_acks))
// Create a deep copy of the 'tel' parameter to prevent changes from the outer scope affecting the last state
t.telemetryLastState = *tel
}
@@ -51,7 +57,8 @@ func (t *kernelTelemetry) update(tel *RawKernelTelemetry) {
// Sub generates a new RawKernelTelemetry object by subtracting the values of this RawKernelTelemetry object from the other
func (t *RawKernelTelemetry) Sub(other RawKernelTelemetry) *RawKernelTelemetry {
return &RawKernelTelemetry{
- Name_size_buckets: computePathSizeBucketDifferences(t.Name_size_buckets, other.Name_size_buckets),
+ Topic_name_size_buckets: computePathSizeBucketDifferences(t.Topic_name_size_buckets, other.Topic_name_size_buckets),
+ Produce_no_required_acks: t.Produce_no_required_acks - other.Produce_no_required_acks,
}
}
diff --git a/pkg/network/protocols/kafka/model_linux.go b/pkg/network/protocols/kafka/model_linux.go
index 25b7c738469c4..225e2120acc19 100644
--- a/pkg/network/protocols/kafka/model_linux.go
+++ b/pkg/network/protocols/kafka/model_linux.go
@@ -70,7 +70,8 @@ RawKernelTelemetry{
"in range [81, 90]": %d,
"in range [91, 255]": %d,
}
-}`, t.Name_size_buckets[0], t.Name_size_buckets[1], t.Name_size_buckets[2], t.Name_size_buckets[3],
- t.Name_size_buckets[4], t.Name_size_buckets[5], t.Name_size_buckets[6], t.Name_size_buckets[7],
- t.Name_size_buckets[8], t.Name_size_buckets[9])
+ "produce no required acks": %d,
+}`, t.Topic_name_size_buckets[0], t.Topic_name_size_buckets[1], t.Topic_name_size_buckets[2], t.Topic_name_size_buckets[3],
+ t.Topic_name_size_buckets[4], t.Topic_name_size_buckets[5], t.Topic_name_size_buckets[6], t.Topic_name_size_buckets[7],
+ t.Topic_name_size_buckets[8], t.Topic_name_size_buckets[9], t.Produce_no_required_acks)
}
diff --git a/pkg/network/protocols/kafka/protocol.go b/pkg/network/protocols/kafka/protocol.go
index 85e94c778004d..41d0dc72b3854 100644
--- a/pkg/network/protocols/kafka/protocol.go
+++ b/pkg/network/protocols/kafka/protocol.go
@@ -40,10 +40,12 @@ const (
eventStreamName = "kafka"
filterTailCall = "socket__kafka_filter"
- responsePartitionParserV0TailCall = "socket__kafka_response_partition_parser_v0"
- responsePartitionParserV12TailCall = "socket__kafka_response_partition_parser_v12"
- responseRecordBatchParserV0TailCall = "socket__kafka_response_record_batch_parser_v0"
- responseRecordBatchParserV12TailCall = "socket__kafka_response_record_batch_parser_v12"
+ fetchResponsePartitionParserV0TailCall = "socket__kafka_fetch_response_partition_parser_v0"
+ fetchResponsePartitionParserV12TailCall = "socket__kafka_fetch_response_partition_parser_v12"
+ fetchResponseRecordBatchParserV0TailCall = "socket__kafka_fetch_response_record_batch_parser_v0"
+ fetchResponseRecordBatchParserV12TailCall = "socket__kafka_fetch_response_record_batch_parser_v12"
+ produceResponsePartitionParserV0TailCall = "socket__kafka_produce_response_partition_parser_v0"
+ produceResponsePartitionParserV9TailCall = "socket__kafka_produce_response_partition_parser_v9"
dispatcherTailCall = "socket__protocol_dispatcher_kafka"
kafkaHeapMap = "kafka_heap"
@@ -53,10 +55,12 @@ const (
tlsFilterTailCall = "uprobe__kafka_tls_filter"
- tlsResponsePartitionParserV0TailCall = "uprobe__kafka_tls_response_partition_parser_v0"
- tlsResponsePartitionParserV12TailCall = "uprobe__kafka_tls_response_partition_parser_v12"
- tlsResponseRecordBatchParserV0TailCall = "uprobe__kafka_tls_response_record_batch_parser_v0"
- tlsResponseRecordBatchParserV12TailCall = "uprobe__kafka_tls_response_record_batch_parser_v12"
+ tlsFetchResponsePartitionParserV0TailCall = "uprobe__kafka_tls_fetch_response_partition_parser_v0"
+ tlsFetchResponsePartitionParserV12TailCall = "uprobe__kafka_tls_fetch_response_partition_parser_v12"
+ tlsFetchResponseRecordBatchParserV0TailCall = "uprobe__kafka_tls_fetch_response_record_batch_parser_v0"
+ tlsFetchResponseRecordBatchParserV12TailCall = "uprobe__kafka_tls_fetch_response_record_batch_parser_v12"
+ tlsProduceResponsePartitionParserV0TailCall = "uprobe__kafka_tls_produce_response_partition_parser_v0"
+ tlsProduceResponsePartitionParserV9TailCall = "uprobe__kafka_tls_produce_response_partition_parser_v9"
tlsTerminationTailCall = "uprobe__kafka_tls_termination"
tlsDispatcherTailCall = "uprobe__tls_protocol_dispatcher_kafka"
@@ -106,30 +110,44 @@ var Spec = &protocols.ProtocolSpec{
},
{
ProgArrayName: protocols.ProtocolDispatcherProgramsMap,
- Key: uint32(protocols.ProgramKafkaResponsePartitionParserV0),
+ Key: uint32(protocols.ProgramKafkaFetchResponsePartitionParserV0),
ProbeIdentificationPair: manager.ProbeIdentificationPair{
- EBPFFuncName: responsePartitionParserV0TailCall,
+ EBPFFuncName: fetchResponsePartitionParserV0TailCall,
},
},
{
ProgArrayName: protocols.ProtocolDispatcherProgramsMap,
- Key: uint32(protocols.ProgramKafkaResponsePartitionParserV12),
+ Key: uint32(protocols.ProgramKafkaFetchResponsePartitionParserV12),
ProbeIdentificationPair: manager.ProbeIdentificationPair{
- EBPFFuncName: responsePartitionParserV12TailCall,
+ EBPFFuncName: fetchResponsePartitionParserV12TailCall,
},
},
{
ProgArrayName: protocols.ProtocolDispatcherProgramsMap,
- Key: uint32(protocols.ProgramKafkaResponseRecordBatchParserV0),
+ Key: uint32(protocols.ProgramKafkaFetchResponseRecordBatchParserV0),
ProbeIdentificationPair: manager.ProbeIdentificationPair{
- EBPFFuncName: responseRecordBatchParserV0TailCall,
+ EBPFFuncName: fetchResponseRecordBatchParserV0TailCall,
},
},
{
ProgArrayName: protocols.ProtocolDispatcherProgramsMap,
- Key: uint32(protocols.ProgramKafkaResponseRecordBatchParserV12),
+ Key: uint32(protocols.ProgramKafkaFetchResponseRecordBatchParserV12),
ProbeIdentificationPair: manager.ProbeIdentificationPair{
- EBPFFuncName: responseRecordBatchParserV12TailCall,
+ EBPFFuncName: fetchResponseRecordBatchParserV12TailCall,
+ },
+ },
+ {
+ ProgArrayName: protocols.ProtocolDispatcherProgramsMap,
+ Key: uint32(protocols.ProgramKafkaProduceResponsePartitionParserV0),
+ ProbeIdentificationPair: manager.ProbeIdentificationPair{
+ EBPFFuncName: produceResponsePartitionParserV0TailCall,
+ },
+ },
+ {
+ ProgArrayName: protocols.ProtocolDispatcherProgramsMap,
+ Key: uint32(protocols.ProgramKafkaProduceResponsePartitionParserV9),
+ ProbeIdentificationPair: manager.ProbeIdentificationPair{
+ EBPFFuncName: produceResponsePartitionParserV9TailCall,
},
},
{
@@ -148,30 +166,44 @@ var Spec = &protocols.ProtocolSpec{
},
{
ProgArrayName: protocols.TLSDispatcherProgramsMap,
- Key: uint32(protocols.ProgramKafkaResponsePartitionParserV0),
+ Key: uint32(protocols.ProgramKafkaFetchResponsePartitionParserV0),
+ ProbeIdentificationPair: manager.ProbeIdentificationPair{
+ EBPFFuncName: tlsFetchResponsePartitionParserV0TailCall,
+ },
+ },
+ {
+ ProgArrayName: protocols.TLSDispatcherProgramsMap,
+ Key: uint32(protocols.ProgramKafkaFetchResponsePartitionParserV12),
+ ProbeIdentificationPair: manager.ProbeIdentificationPair{
+ EBPFFuncName: tlsFetchResponsePartitionParserV12TailCall,
+ },
+ },
+ {
+ ProgArrayName: protocols.TLSDispatcherProgramsMap,
+ Key: uint32(protocols.ProgramKafkaFetchResponseRecordBatchParserV0),
ProbeIdentificationPair: manager.ProbeIdentificationPair{
- EBPFFuncName: tlsResponsePartitionParserV0TailCall,
+ EBPFFuncName: tlsFetchResponseRecordBatchParserV0TailCall,
},
},
{
ProgArrayName: protocols.TLSDispatcherProgramsMap,
- Key: uint32(protocols.ProgramKafkaResponsePartitionParserV12),
+ Key: uint32(protocols.ProgramKafkaFetchResponseRecordBatchParserV12),
ProbeIdentificationPair: manager.ProbeIdentificationPair{
- EBPFFuncName: tlsResponsePartitionParserV12TailCall,
+ EBPFFuncName: tlsFetchResponseRecordBatchParserV12TailCall,
},
},
{
ProgArrayName: protocols.TLSDispatcherProgramsMap,
- Key: uint32(protocols.ProgramKafkaResponseRecordBatchParserV0),
+ Key: uint32(protocols.ProgramKafkaProduceResponsePartitionParserV0),
ProbeIdentificationPair: manager.ProbeIdentificationPair{
- EBPFFuncName: tlsResponseRecordBatchParserV0TailCall,
+ EBPFFuncName: tlsProduceResponsePartitionParserV0TailCall,
},
},
{
ProgArrayName: protocols.TLSDispatcherProgramsMap,
- Key: uint32(protocols.ProgramKafkaResponseRecordBatchParserV12),
+ Key: uint32(protocols.ProgramKafkaProduceResponsePartitionParserV9),
ProbeIdentificationPair: manager.ProbeIdentificationPair{
- EBPFFuncName: tlsResponseRecordBatchParserV12TailCall,
+ EBPFFuncName: tlsProduceResponsePartitionParserV9TailCall,
},
},
{
diff --git a/pkg/network/protocols/kafka/statkeeper.go b/pkg/network/protocols/kafka/statkeeper.go
index ccd7d0e6416c4..3ef8895d4c201 100644
--- a/pkg/network/protocols/kafka/statkeeper.go
+++ b/pkg/network/protocols/kafka/statkeeper.go
@@ -58,7 +58,7 @@ func (statKeeper *StatKeeper) Process(tx *EbpfTx) {
}
latency := tx.RequestLatency()
- // Currently, we only support measuring latency for fetch operations
+ // Produce requests with acks = 0 do not receive a response, and as a result, have no latency
if key.RequestAPIKey == FetchAPIKey && latency <= 0 {
statKeeper.telemetry.invalidLatency.Add(1)
return
diff --git a/pkg/network/protocols/kafka/statkeeper_test.go b/pkg/network/protocols/kafka/statkeeper_test.go
index dc99ff01902d3..21170f2ec8d92 100644
--- a/pkg/network/protocols/kafka/statkeeper_test.go
+++ b/pkg/network/protocols/kafka/statkeeper_test.go
@@ -135,9 +135,9 @@ func generateKafkaTransaction(source util.Address, dest util.Address, sourcePort
event.Transaction.Records_count = recordsCount
event.Transaction.Topic_name_size = uint8(len(topicName))
event.Transaction.Topic_name = topicNameFromString([]byte(topicName))
- event.Tup.Saddr_l = uint64(binary.LittleEndian.Uint32(source.Bytes()))
+ event.Tup.Saddr_l = uint64(binary.LittleEndian.Uint32(source.Unmap().AsSlice()))
event.Tup.Sport = uint16(sourcePort)
- event.Tup.Daddr_l = uint64(binary.LittleEndian.Uint32(dest.Bytes()))
+ event.Tup.Daddr_l = uint64(binary.LittleEndian.Uint32(dest.Unmap().AsSlice()))
event.Tup.Dport = uint16(destPort)
event.Tup.Metadata = 1
diff --git a/pkg/network/protocols/kafka/types_linux.go b/pkg/network/protocols/kafka/types_linux.go
index cf33022e3dd2f..ca225a50ec3a5 100644
--- a/pkg/network/protocols/kafka/types_linux.go
+++ b/pkg/network/protocols/kafka/types_linux.go
@@ -63,5 +63,6 @@ type KafkaResponseContext struct {
}
type RawKernelTelemetry struct {
- Name_size_buckets [10]uint64
+ Topic_name_size_buckets [10]uint64
+ Produce_no_required_acks uint64
}
diff --git a/pkg/network/protocols/postgres/model_linux_test.go b/pkg/network/protocols/postgres/model_linux_test.go
index f3888f9945c7e..da81378a4928a 100644
--- a/pkg/network/protocols/postgres/model_linux_test.go
+++ b/pkg/network/protocols/postgres/model_linux_test.go
@@ -77,7 +77,7 @@ func BenchmarkExtractTableName(b *testing.B) {
func requestFragment(fragment []byte) [ebpf.BufferSize]byte {
if len(fragment) >= ebpf.BufferSize {
- return *(*[ebpf.BufferSize]byte)(fragment)
+ return [ebpf.BufferSize]byte(fragment)
}
var b [ebpf.BufferSize]byte
copy(b[:], fragment)
diff --git a/pkg/network/protocols/tls/nodejs/testdata/package.json b/pkg/network/protocols/tls/nodejs/testdata/package.json
index d544e62306e42..18ab31594327b 100644
--- a/pkg/network/protocols/tls/nodejs/testdata/package.json
+++ b/pkg/network/protocols/tls/nodejs/testdata/package.json
@@ -1,5 +1,5 @@
{
- "name": "nodejs-https-server",
+ "name": "test@nodejs-https-server",
"dependencies": {
"dd-trace": "^5.21.0"
}
diff --git a/pkg/network/route_cache.go b/pkg/network/route_cache.go
index 3537a4c2678f5..44a675b0f649b 100644
--- a/pkg/network/route_cache.go
+++ b/pkg/network/route_cache.go
@@ -169,10 +169,10 @@ func (c *routeCache) Get(source, dest util.Address, netns uint32) (Route, bool)
func newRouteKey(source, dest util.Address, netns uint32) routeKey {
k := routeKey{netns: netns, source: source, dest: dest}
- switch dest.Len() {
- case 4:
+ switch {
+ case dest.Is4():
k.connFamily = AFINET
- case 16:
+ case dest.Is6():
k.connFamily = AFINET6
}
return k
diff --git a/pkg/network/tracer/cached_conntrack.go b/pkg/network/tracer/cached_conntrack.go
index 51c691423f840..de4f6cec213f1 100644
--- a/pkg/network/tracer/cached_conntrack.go
+++ b/pkg/network/tracer/cached_conntrack.go
@@ -10,7 +10,6 @@ package tracer
import (
"errors"
"fmt"
- "net"
"net/netip"
"os"
"sync"
@@ -21,7 +20,6 @@ import (
"github.com/DataDog/datadog-agent/pkg/network"
"github.com/DataDog/datadog-agent/pkg/network/netlink"
- "github.com/DataDog/datadog-agent/pkg/process/util"
"github.com/DataDog/datadog-agent/pkg/util/kernel"
"github.com/DataDog/datadog-agent/pkg/util/log"
)
@@ -65,13 +63,6 @@ func (cache *cachedConntrack) Exists(c *network.ConnectionStats) (bool, error) {
return cache.exists(c, c.NetNS, int(c.Pid))
}
-func ipFromAddr(a util.Address) netip.Addr {
- if a.Len() == net.IPv6len {
- return netip.AddrFrom16(*(*[16]byte)(a.Bytes()))
- }
- return netip.AddrFrom4(*(*[4]byte)(a.Bytes()))
-}
-
func (cache *cachedConntrack) exists(c *network.ConnectionStats, netns uint32, pid int) (bool, error) {
ctrk, err := cache.ensureConntrack(uint64(netns), pid)
if err != nil {
@@ -89,8 +80,8 @@ func (cache *cachedConntrack) exists(c *network.ConnectionStats, netns uint32, p
conn := netlink.Con{
Origin: netlink.ConTuple{
- Src: netip.AddrPortFrom(ipFromAddr(c.Source), c.SPort),
- Dst: netip.AddrPortFrom(ipFromAddr(c.Dest), c.DPort),
+ Src: netip.AddrPortFrom(c.Source.Unmap(), c.SPort),
+ Dst: netip.AddrPortFrom(c.Dest.Unmap(), c.DPort),
Proto: protoNumber,
},
}
diff --git a/pkg/network/tracer/connection/ebpfless_tracer.go b/pkg/network/tracer/connection/ebpfless_tracer.go
index b2cd928ceddfb..2c467a114aa4b 100644
--- a/pkg/network/tracer/connection/ebpfless_tracer.go
+++ b/pkg/network/tracer/connection/ebpfless_tracer.go
@@ -20,6 +20,7 @@ import (
"github.com/vishvananda/netns"
"golang.org/x/sys/unix"
+ ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf"
"github.com/DataDog/datadog-agent/pkg/network"
"github.com/DataDog/datadog-agent/pkg/network/config"
"github.com/DataDog/datadog-agent/pkg/network/filter"
@@ -213,7 +214,11 @@ func (t *ebpfLessTracer) processConnection(
}
if conn.Type == network.UDP || conn.Monotonic.TCPEstablished > 0 {
- conn.LastUpdateEpoch = uint64(time.Now().UnixNano())
+ var ts int64
+ if ts, err = ddebpf.NowNanoseconds(); err != nil {
+ return fmt.Errorf("error getting last updated timestamp for connection: %w", err)
+ }
+ conn.LastUpdateEpoch = uint64(ts)
t.conns[key] = conn
}
diff --git a/pkg/network/tracer/connection/util/conn_tracer.go b/pkg/network/tracer/connection/util/conn_tracer.go
index b97bf1272ae65..724e60ec4c4eb 100644
--- a/pkg/network/tracer/connection/util/conn_tracer.go
+++ b/pkg/network/tracer/connection/util/conn_tracer.go
@@ -163,10 +163,10 @@ func ConnStatsToTuple(c *network.ConnectionStats, tup *netebpf.ConnTuple) {
} else {
tup.SetType(netebpf.UDP)
}
- if !c.Source.IsZero() {
+ if c.Source.IsValid() {
tup.Saddr_l, tup.Saddr_h = util.ToLowHigh(c.Source)
}
- if !c.Dest.IsZero() {
+ if c.Dest.IsValid() {
tup.Daddr_l, tup.Daddr_h = util.ToLowHigh(c.Dest)
}
}
diff --git a/pkg/network/tracer/tracer.go b/pkg/network/tracer/tracer.go
index fded4ce4d70c0..5017167091fca 100644
--- a/pkg/network/tracer/tracer.go
+++ b/pkg/network/tracer/tracer.go
@@ -40,6 +40,7 @@ import (
"github.com/DataDog/datadog-agent/pkg/process/util"
timeresolver "github.com/DataDog/datadog-agent/pkg/security/resolvers/time"
"github.com/DataDog/datadog-agent/pkg/telemetry"
+ "github.com/DataDog/datadog-agent/pkg/util/ec2"
"github.com/DataDog/datadog-agent/pkg/util/kernel"
"github.com/DataDog/datadog-agent/pkg/util/log"
)
@@ -850,3 +851,17 @@ func newUSMMonitor(c *config.Config, tracer connection.Tracer) *usm.Monitor {
return monitor
}
+
+// GetNetworkID retrieves the vpc_id (network_id) from IMDS
+func (t *Tracer) GetNetworkID(context context.Context) (string, error) {
+ id := ""
+ err := kernel.WithRootNS(kernel.ProcFSRoot(), func() error {
+ var err error
+ id, err = ec2.GetNetworkID(context)
+ return err
+ })
+ if err != nil {
+ return "", err
+ }
+ return id, nil
+}
diff --git a/pkg/network/tracer/tracer_linux_test.go b/pkg/network/tracer/tracer_linux_test.go
index f541f252cf8bf..dfa6036127c77 100644
--- a/pkg/network/tracer/tracer_linux_test.go
+++ b/pkg/network/tracer/tracer_linux_test.go
@@ -1127,31 +1127,61 @@ func (s *TracerSuite) TestSelfConnect() {
}, 5*time.Second, 100*time.Millisecond, "could not find expected number of tcp connections, expected: 2")
}
-func (s *TracerSuite) TestUDPPeekCount() {
- t := s.T()
- config := testConfig()
- tr := setupTracer(t, config)
+// sets up two udp sockets talking to each other locally.
+// returns (listener, dialer)
+func setupUdpSockets(t *testing.T, udpnet, ip string) (*net.UDPConn, *net.UDPConn) {
+ serverAddr := fmt.Sprintf("%s:%d", ip, 0)
- ln, err := net.ListenPacket("udp", "127.0.0.1:0")
+ laddr, err := net.ResolveUDPAddr(udpnet, serverAddr)
require.NoError(t, err)
- defer ln.Close()
- saddr := ln.LocalAddr().String()
+ var ln, c *net.UDPConn = nil, nil
+ t.Cleanup(func() {
+ if ln != nil {
+ ln.Close()
+ }
+ if c != nil {
+ c.Close()
+ }
+ })
- laddr, err := net.ResolveUDPAddr("udp", "127.0.0.1:0")
+ ln, err = net.ListenUDP(udpnet, laddr)
require.NoError(t, err)
- raddr, err := net.ResolveUDPAddr("udp", saddr)
+
+ saddr := ln.LocalAddr().String()
+
+ raddr, err := net.ResolveUDPAddr(udpnet, saddr)
require.NoError(t, err)
- c, err := net.DialUDP("udp", laddr, raddr)
+ c, err = net.DialUDP(udpnet, laddr, raddr)
require.NoError(t, err)
- defer c.Close()
+
+ return ln, c
+}
+
+func (s *TracerSuite) TestUDPPeekCount() {
+ t := s.T()
+ t.Run("v4", func(t *testing.T) {
+ testUDPPeekCount(t, "udp4", "127.0.0.1")
+ })
+ t.Run("v6", func(t *testing.T) {
+ if !testConfig().CollectUDPv6Conns {
+ t.Skip("UDPv6 disabled")
+ }
+ testUDPPeekCount(t, "udp6", "[::1]")
+ })
+}
+func testUDPPeekCount(t *testing.T, udpnet, ip string) {
+ config := testConfig()
+ tr := setupTracer(t, config)
+
+ ln, c := setupUdpSockets(t, udpnet, ip)
msg := []byte("asdf")
- _, err = c.Write(msg)
+ _, err := c.Write(msg)
require.NoError(t, err)
- rawConn, err := ln.(*net.UDPConn).SyscallConn()
+ rawConn, err := ln.SyscallConn()
require.NoError(t, err)
err = rawConn.Control(func(fd uintptr) {
buf := make([]byte, 1024)
@@ -1204,12 +1234,82 @@ func (s *TracerSuite) TestUDPPeekCount() {
m := outgoing.Monotonic
require.Equal(t, len(msg), int(m.SentBytes))
require.Equal(t, 0, int(m.RecvBytes))
+ require.Equal(t, 1, int(m.SentPackets))
+ require.Equal(t, 0, int(m.RecvPackets))
require.True(t, outgoing.IntraHost)
// make sure the inverse values are seen for the other message
m = incoming.Monotonic
require.Equal(t, 0, int(m.SentBytes))
require.Equal(t, len(msg), int(m.RecvBytes))
+ require.Equal(t, 0, int(m.SentPackets))
+ require.Equal(t, 1, int(m.RecvPackets))
+ require.True(t, incoming.IntraHost)
+}
+
+func (s *TracerSuite) TestUDPPacketSumming() {
+ t := s.T()
+ t.Run("v4", func(t *testing.T) {
+ testUDPPacketSumming(t, "udp4", "127.0.0.1")
+ })
+ t.Run("v6", func(t *testing.T) {
+ if !testConfig().CollectUDPv6Conns {
+ t.Skip("UDPv6 disabled")
+ }
+ testUDPPacketSumming(t, "udp6", "[::1]")
+ })
+}
+func testUDPPacketSumming(t *testing.T, udpnet, ip string) {
+ config := testConfig()
+ tr := setupTracer(t, config)
+
+ ln, c := setupUdpSockets(t, udpnet, ip)
+
+ msg := []byte("asdf")
+ // send UDP packets of increasing length
+ for i := range msg {
+ _, err := c.Write(msg[:i+1])
+ require.NoError(t, err)
+ }
+ expectedBytes := 1 + 2 + 3 + 4
+
+ buf := make([]byte, 256)
+ recvBytes := 0
+ for range msg {
+ n, _, err := ln.ReadFrom(buf)
+ require.NoError(t, err)
+ recvBytes += n
+ }
+ // sanity check: did userspace get all four expected packets?
+ require.Equal(t, recvBytes, expectedBytes)
+
+ var incoming *network.ConnectionStats
+ var outgoing *network.ConnectionStats
+ require.Eventuallyf(t, func() bool {
+ conns := getConnections(t, tr)
+ if outgoing == nil {
+ outgoing, _ = findConnection(c.LocalAddr(), c.RemoteAddr(), conns)
+ }
+ if incoming == nil {
+ incoming, _ = findConnection(c.RemoteAddr(), c.LocalAddr(), conns)
+ }
+
+ return outgoing != nil && incoming != nil
+ }, 3*time.Second, 100*time.Millisecond, "couldn't find incoming and outgoing connections matching")
+
+ m := outgoing.Monotonic
+ require.Equal(t, expectedBytes, int(m.SentBytes))
+ require.Equal(t, 0, int(m.RecvBytes))
+ require.Equal(t, int(len(msg)), int(m.SentPackets))
+ require.Equal(t, 0, int(m.RecvPackets))
+ require.True(t, outgoing.IntraHost)
+
+ // make sure the inverse values are seen for the other message
+ m = incoming.Monotonic
+ require.Equal(t, 0, int(m.SentBytes))
+ require.Equal(t, expectedBytes, int(m.RecvBytes))
+ require.Equal(t, 0, int(m.SentPackets))
+ require.Equal(t, int(len(msg)), int(m.RecvPackets))
require.True(t, incoming.IntraHost)
}
@@ -1507,10 +1607,18 @@ func (s *TracerSuite) TestSendfileRegression() {
}, 3*time.Second, 100*time.Millisecond, "couldn't find connections used by sendfile(2)")
if assert.NotNil(t, outConn, "couldn't find outgoing connection used by sendfile(2)") {
- assert.Equalf(t, int64(clientMessageSize), int64(outConn.Monotonic.SentBytes), "sendfile send data wasn't properly traced")
+ assert.Equalf(t, int64(clientMessageSize), int64(outConn.Monotonic.SentBytes), "sendfile sent bytes wasn't properly traced")
+ if connType == network.UDP {
+ assert.Equalf(t, int64(1), int64(outConn.Monotonic.SentPackets), "sendfile UDP should send exactly 1 packet")
+ assert.Equalf(t, int64(0), int64(outConn.Monotonic.RecvPackets), "sendfile outConn shouldn't have any RecvPackets")
+ }
}
if assert.NotNil(t, inConn, "couldn't find incoming connection used by sendfile(2)") {
- assert.Equalf(t, int64(clientMessageSize), int64(inConn.Monotonic.RecvBytes), "sendfile recv data wasn't properly traced")
+ assert.Equalf(t, int64(clientMessageSize), int64(inConn.Monotonic.RecvBytes), "sendfile recv bytes wasn't properly traced")
+ if connType == network.UDP {
+ assert.Equalf(t, int64(1), int64(inConn.Monotonic.RecvPackets), "sendfile UDP should recv exactly 1 packet")
+ assert.Equalf(t, int64(0), int64(inConn.Monotonic.SentPackets), "sendfile inConn shouldn't have any SentPackets")
+ }
}
}
@@ -1541,7 +1649,7 @@ func (s *TracerSuite) TestSendfileRegression() {
t.Skip("UDP will fail with prebuilt tracer")
}
- // Start TCP server
+ // Start UDP server
var rcvd int64
server := &UDPServer{
network: "udp" + strings.TrimPrefix(family.String(), "v"),
diff --git a/pkg/network/tracer/tracer_unsupported.go b/pkg/network/tracer/tracer_unsupported.go
index bdb6abdf3dbf5..f3ef15179c0b7 100644
--- a/pkg/network/tracer/tracer_unsupported.go
+++ b/pkg/network/tracer/tracer_unsupported.go
@@ -34,6 +34,11 @@ func (t *Tracer) GetActiveConnections(_ string) (*network.Connections, error) {
return nil, ebpf.ErrNotImplemented
}
+// GetNetworkID is not implemented on this OS for Tracer
+func (t *Tracer) GetNetworkID(_ context.Context) (string, error) {
+ return "", ebpf.ErrNotImplemented
+}
+
// RegisterClient registers the client
func (t *Tracer) RegisterClient(_ string) error {
return ebpf.ErrNotImplemented
diff --git a/pkg/network/tracer/tracer_windows.go b/pkg/network/tracer/tracer_windows.go
index a4677a19c501a..fba6ea78a95b0 100644
--- a/pkg/network/tracer/tracer_windows.go
+++ b/pkg/network/tracer/tracer_windows.go
@@ -309,6 +309,11 @@ func (t *Tracer) DebugDumpProcessCache(_ context.Context) (interface{}, error) {
return nil, ebpf.ErrNotImplemented
}
+// GetNetworkID is not implemented on this OS for Tracer
+func (t *Tracer) GetNetworkID(_ context.Context) (string, error) {
+ return "", ebpf.ErrNotImplemented
+}
+
func newUSMMonitor(c *config.Config, dh driver.Handle) usm.Monitor {
if !c.EnableHTTPMonitoring && !c.EnableNativeTLSMonitoring {
return nil
diff --git a/pkg/network/usm/kafka_monitor_test.go b/pkg/network/usm/kafka_monitor_test.go
index 2862a8600c89b..1c3c5e12702ca 100644
--- a/pkg/network/usm/kafka_monitor_test.go
+++ b/pkg/network/usm/kafka_monitor_test.go
@@ -94,6 +94,11 @@ type kafkaParsingValidationWithErrorCodes struct {
expectedAPIVersionFetch int
}
+type groupInfo struct {
+ numSets int
+ msgs []Message
+}
+
func skipTestIfKernelNotSupported(t *testing.T) {
currKernelVersion, err := kernel.HostVersion()
require.NoError(t, err)
@@ -500,15 +505,15 @@ func (s *KafkaProtocolParsingSuite) testKafkaProtocolParsing(t *testing.T, tls b
// Ensure that the other buckets remain unchanged before verifying the expected bucket.
for idx := 0; idx < kafka.TopicNameBuckets; idx++ {
if idx != tt.expectedBucketIndex {
- require.Equal(t, currentRawKernelTelemetry.Name_size_buckets[idx],
- telemetryMap.Name_size_buckets[idx],
+ require.Equal(t, currentRawKernelTelemetry.Topic_name_size_buckets[idx],
+ telemetryMap.Topic_name_size_buckets[idx],
"Expected bucket (%d) to remain unchanged", idx)
}
}
// Verify that the expected bucket contains the correct number of occurrences.
expectedNumberOfOccurrences := fixCount(2) // (1 produce request + 1 fetch request)
- return uint64(expectedNumberOfOccurrences)+currentRawKernelTelemetry.Name_size_buckets[tt.expectedBucketIndex] == telemetryMap.Name_size_buckets[tt.expectedBucketIndex]
+ return uint64(expectedNumberOfOccurrences)+currentRawKernelTelemetry.Topic_name_size_buckets[tt.expectedBucketIndex] == telemetryMap.Topic_name_size_buckets[tt.expectedBucketIndex]
}, time.Second*3, time.Millisecond*100)
// Update the current raw kernel telemetry for the next iteration
@@ -524,19 +529,17 @@ func (s *KafkaProtocolParsingSuite) testKafkaProtocolParsing(t *testing.T, tls b
require.NoError(t, proxy.WaitForConnectionReady(unixPath))
cfg := getDefaultTestConfiguration(tls)
- monitor := newKafkaMonitor(t, cfg)
- if tls && cfg.EnableGoTLSSupport {
- utils.WaitForProgramsToBeTraced(t, "go-tls", proxyProcess.Process.Pid, utils.ManualTracingFallbackEnabled)
- }
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Cleanup(func() {
for _, client := range tt.context.clients {
client.Client.Close()
}
- cleanProtocolMaps(t, "kafka", monitor.ebpfProgram.Manager.Manager)
})
-
+ monitor := newKafkaMonitor(t, cfg)
+ if tls && cfg.EnableGoTLSSupport {
+ utils.WaitForProgramsToBeTraced(t, "go-tls", proxyProcess.Process.Pid, utils.ManualTracingFallbackEnabled)
+ }
tt.testBody(t, &tt.context, monitor)
})
}
@@ -616,7 +619,7 @@ func appendUint32(dst []byte, u uint32) []byte {
// kmsg doesn't have a ResponseFormatter so we need to add the length
// and the correlation Id ourselves.
-func appendResponse(dst []byte, response kmsg.FetchResponse, correlationID uint32) []byte {
+func appendResponse(dst []byte, response kmsg.Response, correlationID uint32) []byte {
var data []byte
data = response.AppendTo(data)
@@ -645,9 +648,9 @@ type Message struct {
response []byte
}
-func appendMessages(messages []Message, correlationID int, req kmsg.FetchRequest, resp kmsg.FetchResponse) []Message {
+func appendMessages(messages []Message, correlationID int, req kmsg.Request, resp kmsg.Response) []Message {
formatter := kmsg.NewRequestFormatter(kmsg.FormatterClientID("kgo"))
- data := formatter.AppendRequest(make([]byte, 0), &req, int32(correlationID))
+ data := formatter.AppendRequest(make([]byte, 0), req, int32(correlationID))
respData := appendResponse(make([]byte, 0), resp, uint32(correlationID))
return append(messages,
@@ -932,7 +935,7 @@ func testKafkaFetchRaw(t *testing.T, tls bool, apiVersion int) {
formatter := kmsg.NewRequestFormatter(kmsg.FormatterClientID("kgo"))
var msgs []Message
reqData := formatter.AppendRequest(make([]byte, 0), &req, int32(55))
- respData := appendResponse(make([]byte, 0), resp, uint32(55))
+ respData := appendResponse(make([]byte, 0), &resp, uint32(55))
msgs = append(msgs, Message{request: reqData})
msgs = append(msgs, Message{response: respData[0:4]})
@@ -955,7 +958,7 @@ func testKafkaFetchRaw(t *testing.T, tls bool, apiVersion int) {
formatter := kmsg.NewRequestFormatter(kmsg.FormatterClientID("kgo"))
var msgs []Message
reqData := formatter.AppendRequest(make([]byte, 0), &req, int32(55))
- respData := appendResponse(make([]byte, 0), resp, uint32(55))
+ respData := appendResponse(make([]byte, 0), &resp, uint32(55))
msgs = append(msgs, Message{request: reqData})
msgs = append(msgs, Message{response: respData[0:8]})
@@ -978,7 +981,7 @@ func testKafkaFetchRaw(t *testing.T, tls bool, apiVersion int) {
formatter := kmsg.NewRequestFormatter(kmsg.FormatterClientID("kgo"))
var msgs []Message
reqData := formatter.AppendRequest(make([]byte, 0), &req, int32(55))
- respData := appendResponse(make([]byte, 0), resp, uint32(55))
+ respData := appendResponse(make([]byte, 0), &resp, uint32(55))
msgs = append(msgs, Message{request: reqData})
msgs = append(msgs, Message{response: respData[0:4]})
@@ -1150,7 +1153,7 @@ func testKafkaFetchRaw(t *testing.T, tls bool, apiVersion int) {
var msgs []Message
if tt.buildMessages == nil {
- msgs = appendMessages(msgs, 99, req, resp)
+ msgs = appendMessages(msgs, 99, &req, &resp)
} else {
msgs = tt.buildMessages(req, resp)
}
@@ -1193,54 +1196,7 @@ func testKafkaFetchRaw(t *testing.T, tls bool, apiVersion int) {
formatter := kmsg.NewRequestFormatter(kmsg.FormatterClientID("kgo"))
- type groupInfo struct {
- numSets int
- msgs []Message
- }
-
- var groups []groupInfo
- var info groupInfo
-
- for splitIdx := 0; splitIdx < 500; splitIdx++ {
- reqData := formatter.AppendRequest(make([]byte, 0), &req, int32(splitIdx))
- respData := appendResponse(make([]byte, 0), resp, uint32(splitIdx))
-
- // There is an assumption in the code that there are no splits
- // inside the header.
- minSegSize := 8
-
- segSize := min(minSegSize+splitIdx, len(respData))
- if segSize >= len(respData) {
- break
- }
-
- var msgs []Message
- msgs = append(msgs, Message{request: reqData})
- msgs = append(msgs, Message{response: respData[0:segSize]})
-
- if segSize+8 >= len(respData) {
- msgs = append(msgs, Message{response: respData[segSize:]})
- } else {
- // Three segments tests other code paths than two, for example
- // it will fail if the tcp_seq is not updated in the response
- // parsing continuation path.
- msgs = append(msgs, Message{response: respData[segSize : segSize+8]})
- msgs = append(msgs, Message{response: respData[segSize+8:]})
- }
-
- if info.numSets >= 20 {
- groups = append(groups, info)
- info.numSets = 0
- info.msgs = make([]Message, 0)
- }
-
- info.numSets++
- info.msgs = append(info.msgs, msgs...)
- }
-
- if info.numSets > 0 {
- groups = append(groups, info)
- }
+ groups := getSplitGroups(&req, &resp, formatter)
for groupIdx, group := range groups {
name := fmt.Sprintf("split/%s/group%d", tt.name, groupIdx)
@@ -1303,7 +1259,9 @@ func testKafkaProduceRaw(t *testing.T, tls bool, apiVersion int) {
name string
topic string
buildRequest func(string) kmsg.ProduceRequest
+ buildResponse func(string) kmsg.ProduceResponse
numProducedRecords int
+ errorCode int32
}{
{
name: "basic",
@@ -1323,6 +1281,51 @@ func testKafkaProduceRaw(t *testing.T, tls bool, apiVersion int) {
req := kmsg.NewProduceRequest()
req.SetVersion(int16(apiVersion))
+ req.Acks = 1 // Leader Ack
+ transactionID := "transaction-id"
+ req.TransactionID = &transactionID
+ req.TimeoutMillis = 99999999
+ req.Topics = append(req.Topics, reqTopic)
+
+ return req
+ },
+ buildResponse: func(topic string) kmsg.ProduceResponse {
+ partition := kmsg.NewProduceResponseTopicPartition()
+
+ var partitions []kmsg.ProduceResponseTopicPartition
+ partitions = append(partitions, partition)
+
+ topics := kmsg.NewProduceResponseTopic()
+ topics.Topic = topic
+ topics.Partitions = append(topics.Partitions, partitions...)
+
+ resp := kmsg.NewProduceResponse()
+ resp.SetVersion(int16(apiVersion))
+ resp.ThrottleMillis = 999999999
+ resp.Topics = append(resp.Topics, topics)
+ return resp
+ },
+ numProducedRecords: 2,
+ },
+ {
+ name: "with error code",
+ topic: "test-topic-error",
+ buildRequest: func(topic string) kmsg.ProduceRequest {
+ // Make record batch over 16KiB for larger varint size
+ record := makeRecordWithVal(make([]byte, 10000))
+ records := []kmsg.Record{record, record}
+ recordBatch := makeRecordBatch(records...)
+
+ partition := kmsg.NewProduceRequestTopicPartition()
+ partition.Records = recordBatch.AppendTo(partition.Records)
+
+ reqTopic := kmsg.NewProduceRequestTopic()
+ reqTopic.Partitions = append(reqTopic.Partitions, partition)
+ reqTopic.Topic = topic
+
+ req := kmsg.NewProduceRequest()
+ req.SetVersion(int16(apiVersion))
+ req.Acks = -1 // All ISR Acks
transactionID := "transaction-id"
req.TransactionID = &transactionID
req.TimeoutMillis = 99999999
@@ -1330,7 +1333,25 @@ func testKafkaProduceRaw(t *testing.T, tls bool, apiVersion int) {
return req
},
+ buildResponse: func(topic string) kmsg.ProduceResponse {
+ partition := kmsg.NewProduceResponseTopicPartition()
+ partition.ErrorCode = 1
+
+ var partitions []kmsg.ProduceResponseTopicPartition
+ partitions = append(partitions, partition)
+
+ topics := kmsg.NewProduceResponseTopic()
+ topics.Topic = topic
+ topics.Partitions = append(topics.Partitions, partitions...)
+
+ resp := kmsg.NewProduceResponse()
+ resp.SetVersion(int16(apiVersion))
+ resp.ThrottleMillis = 999999999
+ resp.Topics = append(resp.Topics, topics)
+ return resp
+ },
numProducedRecords: 2,
+ errorCode: 1,
},
}
@@ -1349,9 +1370,9 @@ func testKafkaProduceRaw(t *testing.T, tls bool, apiVersion int) {
cleanProtocolMaps(t, "kafka", monitor.ebpfProgram.Manager.Manager)
})
req := tt.buildRequest(tt.topic)
- formatter := kmsg.NewRequestFormatter(kmsg.FormatterClientID("kgo"))
- data := formatter.AppendRequest(make([]byte, 0), &req, int32(99))
- msgs := []Message{{request: data}}
+ var msgs []Message
+ resp := tt.buildResponse(tt.topic)
+ msgs = appendMessages(msgs, 99, &req, &resp)
can.runClient(msgs)
@@ -1359,9 +1380,79 @@ func testKafkaProduceRaw(t *testing.T, tls bool, apiVersion int) {
expectedNumberOfProduceRequests: tt.numProducedRecords,
expectedAPIVersionProduce: apiVersion,
tlsEnabled: tls,
- }, kafkaSuccessErrorCode)
+ }, tt.errorCode)
})
+
+ req := tt.buildRequest(tt.topic)
+ resp := tt.buildResponse(tt.topic)
+ formatter := kmsg.NewRequestFormatter(kmsg.FormatterClientID("kgo"))
+
+ groups := getSplitGroups(&req, &resp, formatter)
+
+ for groupIdx, group := range groups {
+ name := fmt.Sprintf("split/%s/group%d", tt.name, groupIdx)
+ t.Run(name, func(t *testing.T) {
+ t.Cleanup(func() {
+ cleanProtocolMaps(t, "kafka", monitor.ebpfProgram.Manager.Manager)
+ })
+
+ can.runClient(group.msgs)
+
+ getAndValidateKafkaStats(t, monitor, 1, tt.topic, kafkaParsingValidation{
+ expectedNumberOfProduceRequests: tt.numProducedRecords * group.numSets,
+ expectedAPIVersionProduce: apiVersion,
+ tlsEnabled: tls,
+ }, tt.errorCode)
+ })
+ }
+ }
+}
+
+func getSplitGroups(req kmsg.Request, resp kmsg.Response, formatter *kmsg.RequestFormatter) []groupInfo {
+ var groups []groupInfo
+ var info groupInfo
+
+ for splitIdx := 0; splitIdx < 500; splitIdx++ {
+ reqData := formatter.AppendRequest(make([]byte, 0), req, int32(splitIdx))
+ respData := appendResponse(make([]byte, 0), resp, uint32(splitIdx))
+
+ // There is an assumption in the code that there are no splits
+ // inside the header.
+ minSegSize := 8
+
+ segSize := min(minSegSize+splitIdx, len(respData))
+ if segSize >= len(respData) {
+ break
+ }
+
+ var msgs []Message
+ msgs = append(msgs, Message{request: reqData})
+ msgs = append(msgs, Message{response: respData[0:segSize]})
+
+ if segSize+8 >= len(respData) {
+ msgs = append(msgs, Message{response: respData[segSize:]})
+ } else {
+ // Three segments tests other code paths than two, for example
+ // it will fail if the tcp_seq is not updated in the response
+ // parsing continuation path.
+ msgs = append(msgs, Message{response: respData[segSize : segSize+8]})
+ msgs = append(msgs, Message{response: respData[segSize+8:]})
+ }
+
+ if info.numSets >= 20 {
+ groups = append(groups, info)
+ info.numSets = 0
+ info.msgs = make([]Message, 0)
+ }
+
+ info.numSets++
+ info.msgs = append(info.msgs, msgs...)
+ }
+
+ if info.numSets > 0 {
+ groups = append(groups, info)
}
+ return groups
}
func (s *KafkaProtocolParsingSuite) TestKafkaProduceRaw() {
@@ -1526,13 +1617,13 @@ func validateProduceFetchCount(t *assert.CollectT, kafkaStats map[kafka.Key]*kaf
continue
}
assert.Equal(t, topicName[:min(len(topicName), 80)], kafkaKey.TopicName)
+ assert.Greater(t, requestStats.FirstLatencySample, float64(1))
switch kafkaKey.RequestAPIKey {
case kafka.ProduceAPIKey:
assert.Equal(t, uint16(validation.expectedAPIVersionProduce), kafkaKey.RequestVersion)
numberOfProduceRequests += requestStats.Count
case kafka.FetchAPIKey:
assert.Equal(t, uint16(validation.expectedAPIVersionFetch), kafkaKey.RequestVersion)
- assert.Greater(t, requestStats.FirstLatencySample, float64(1))
numberOfFetchRequests += requestStats.Count
default:
assert.FailNow(t, "Expecting only produce or fetch kafka requests")
diff --git a/pkg/network/usm/testutil/generic_testutil_builder.go b/pkg/network/usm/testutil/generic_testutil_builder.go
index af5cacfcfcdb8..623aeccb284e5 100644
--- a/pkg/network/usm/testutil/generic_testutil_builder.go
+++ b/pkg/network/usm/testutil/generic_testutil_builder.go
@@ -13,10 +13,14 @@ import (
"path"
)
-// BuildGoBinaryWrapper builds a Go binary and returns the path to it.
+const (
+ baseLDFlags = "-ldflags=-extldflags '-static'"
+)
+
+// buildGoBinary builds a Go binary and returns the path to it.
// If the binary is already built (meanly in the CI), it returns the
// path to the binary.
-func BuildGoBinaryWrapper(curDir, binaryDir string) (string, error) {
+func buildGoBinary(curDir, binaryDir, buildFlags string) (string, error) {
serverSrcDir := path.Join(curDir, binaryDir)
cachedServerBinaryPath := path.Join(serverSrcDir, binaryDir)
@@ -26,7 +30,7 @@ func BuildGoBinaryWrapper(curDir, binaryDir string) (string, error) {
return cachedServerBinaryPath, nil
}
- c := exec.Command("go", "build", "-buildvcs=false", "-a", "-tags=test", "-ldflags=-extldflags '-static'", "-o", cachedServerBinaryPath, serverSrcDir)
+ c := exec.Command("go", "build", "-buildvcs=false", "-a", "-tags=test,netgo", buildFlags, "-o", cachedServerBinaryPath, serverSrcDir)
out, err := c.CombinedOutput()
if err != nil {
return "", fmt.Errorf("could not build unix transparent proxy server test binary: %s\noutput: %s", err, string(out))
@@ -34,3 +38,17 @@ func BuildGoBinaryWrapper(curDir, binaryDir string) (string, error) {
return cachedServerBinaryPath, nil
}
+
+// BuildGoBinaryWrapper builds a Go binary and returns the path to it.
+// If the binary is already built (meanly in the CI), it returns the
+// path to the binary.
+func BuildGoBinaryWrapper(curDir, binaryDir string) (string, error) {
+ return buildGoBinary(curDir, binaryDir, baseLDFlags)
+}
+
+// BuildGoBinaryWrapperWithoutSymbols builds a Go binary without symbols and returns the path to it.
+// If the binary is already built (meanly in the CI), it returns the
+// path to the binary.
+func BuildGoBinaryWrapperWithoutSymbols(curDir, binaryDir string) (string, error) {
+ return buildGoBinary(curDir, binaryDir, baseLDFlags+" -s -w")
+}
diff --git a/pkg/networkpath/traceroute/runner.go b/pkg/networkpath/traceroute/runner.go
index 940ea9a7344e7..2e0fabbc88eec 100644
--- a/pkg/networkpath/traceroute/runner.go
+++ b/pkg/networkpath/traceroute/runner.go
@@ -20,6 +20,7 @@ import (
"github.com/vishvananda/netns"
telemetryComponent "github.com/DataDog/datadog-agent/comp/core/telemetry"
+ "github.com/DataDog/datadog-agent/pkg/config/setup"
"github.com/DataDog/datadog-agent/pkg/network"
"github.com/DataDog/datadog-agent/pkg/networkpath/payload"
"github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/tcp"
@@ -32,15 +33,18 @@ import (
"github.com/DataDog/datadog-agent/pkg/util/log"
)
-// TODO: are these good defaults?
const (
- DefaultSourcePort = 12345
- DefaultDestPort = 33434
- DefaultNumPaths = 1
- DefaultMinTTL = 1
- DefaultMaxTTL = 30
- DefaultDelay = 50 //msec
- DefaultReadTimeout = 10 * time.Second
+ // DefaultSourcePort defines the default source port
+ DefaultSourcePort = 12345
+ // DefaultDestPort defines the default destination port
+ DefaultDestPort = 33434
+ // DefaultNumPaths defines the default number of paths
+ DefaultNumPaths = 1
+ // DefaultMinTTL defines the default minimum TTL
+ DefaultMinTTL = 1
+ // DefaultDelay defines the default delay
+ DefaultDelay = 50 //msec
+ // DefaultOutputFormat defines the default output format
DefaultOutputFormat = "json"
tracerouteRunnerModuleName = "traceroute_runner__"
@@ -111,14 +115,14 @@ func (r *Runner) RunTraceroute(ctx context.Context, cfg Config) (payload.Network
maxTTL := cfg.MaxTTL
if maxTTL == 0 {
- maxTTL = DefaultMaxTTL
+ maxTTL = setup.DefaultNetworkPathMaxTTL
}
var timeout time.Duration
- if cfg.TimeoutMs == 0 {
- timeout = DefaultReadTimeout
+ if cfg.Timeout == 0 {
+ timeout = setup.DefaultNetworkPathTimeout * time.Duration(maxTTL) * time.Millisecond
} else {
- timeout = time.Duration(cfg.TimeoutMs) * time.Millisecond
+ timeout = cfg.Timeout
}
hname, err := hostname.Get(ctx)
diff --git a/pkg/networkpath/traceroute/runner_test.go b/pkg/networkpath/traceroute/runner_test.go
index c3ef189cca095..86f7b99f9c68a 100644
--- a/pkg/networkpath/traceroute/runner_test.go
+++ b/pkg/networkpath/traceroute/runner_test.go
@@ -6,8 +6,9 @@
package traceroute
import (
- "github.com/stretchr/testify/assert"
"testing"
+
+ "github.com/stretchr/testify/assert"
)
func TestGetPorts(t *testing.T) {
diff --git a/pkg/networkpath/traceroute/tcp/tcpv4.go b/pkg/networkpath/traceroute/tcp/tcpv4.go
index 6b5e9b8db94a0..23f3c45950689 100644
--- a/pkg/networkpath/traceroute/tcp/tcpv4.go
+++ b/pkg/networkpath/traceroute/tcp/tcpv4.go
@@ -103,15 +103,9 @@ func (t *TCPv4) TracerouteSequential() (*Results, error) {
// hops should be of length # of hops
hops := make([]*Hop, 0, t.MaxTTL-t.MinTTL)
- // TODO: better logic around timeout for sequential is needed
- // right now we're just hacking around the existing
- // need to convert uint8 to int for proper conversion to
- // time.Duration
- timeout := t.Timeout / time.Duration(int(t.MaxTTL-t.MinTTL))
-
for i := int(t.MinTTL); i <= int(t.MaxTTL); i++ {
seqNumber := rand.Uint32()
- hop, err := t.sendAndReceive(rawIcmpConn, rawTCPConn, i, seqNumber, timeout)
+ hop, err := t.sendAndReceive(rawIcmpConn, rawTCPConn, i, seqNumber, t.Timeout)
if err != nil {
return nil, fmt.Errorf("failed to run traceroute: %w", err)
}
diff --git a/pkg/networkpath/traceroute/traceroute.go b/pkg/networkpath/traceroute/traceroute.go
index e23ac10ca64f3..5fb757fc88f71 100644
--- a/pkg/networkpath/traceroute/traceroute.go
+++ b/pkg/networkpath/traceroute/traceroute.go
@@ -8,6 +8,7 @@ package traceroute
import (
"context"
+ "time"
"github.com/DataDog/datadog-agent/pkg/networkpath/payload"
)
@@ -30,7 +31,7 @@ type (
// Max number of hops to try
MaxTTL uint8
// TODO: do we want to expose this?
- TimeoutMs uint
+ Timeout time.Duration
// Protocol is the protocol to use
// for traceroute, default is UDP
Protocol payload.Protocol
diff --git a/pkg/networkpath/traceroute/traceroute_linux.go b/pkg/networkpath/traceroute/traceroute_linux.go
index dd97f341c0a73..1e4e0f10321cb 100644
--- a/pkg/networkpath/traceroute/traceroute_linux.go
+++ b/pkg/networkpath/traceroute/traceroute_linux.go
@@ -47,7 +47,7 @@ func (l *LinuxTraceroute) Run(_ context.Context) (payload.NetworkPath, error) {
return payload.NetworkPath{}, err
}
- resp, err := tu.GetTraceroute(clientID, l.cfg.DestHostname, l.cfg.DestPort, l.cfg.Protocol, l.cfg.MaxTTL, l.cfg.TimeoutMs)
+ resp, err := tu.GetTraceroute(clientID, l.cfg.DestHostname, l.cfg.DestPort, l.cfg.Protocol, l.cfg.MaxTTL, l.cfg.Timeout)
if err != nil {
return payload.NetworkPath{}, err
}
diff --git a/pkg/networkpath/traceroute/traceroute_windows.go b/pkg/networkpath/traceroute/traceroute_windows.go
index d84089ca5a752..f6e1702121b3b 100644
--- a/pkg/networkpath/traceroute/traceroute_windows.go
+++ b/pkg/networkpath/traceroute/traceroute_windows.go
@@ -46,7 +46,7 @@ func (w *WindowsTraceroute) Run(_ context.Context) (payload.NetworkPath, error)
log.Warnf("could not initialize system-probe connection: %s", err.Error())
return payload.NetworkPath{}, err
}
- resp, err := tu.GetTraceroute(clientID, w.cfg.DestHostname, w.cfg.DestPort, w.cfg.Protocol, w.cfg.MaxTTL, w.cfg.TimeoutMs)
+ resp, err := tu.GetTraceroute(clientID, w.cfg.DestHostname, w.cfg.DestPort, w.cfg.Protocol, w.cfg.MaxTTL, w.cfg.Timeout)
if err != nil {
return payload.NetworkPath{}, err
}
diff --git a/pkg/obfuscate/go.mod b/pkg/obfuscate/go.mod
index 80da0db8971b4..e25602108c4fb 100644
--- a/pkg/obfuscate/go.mod
+++ b/pkg/obfuscate/go.mod
@@ -4,7 +4,7 @@ go 1.22.0
require (
github.com/DataDog/datadog-go/v5 v5.5.0
- github.com/DataDog/go-sqllexer v0.0.13
+ github.com/DataDog/go-sqllexer v0.0.14
github.com/outcaste-io/ristretto v0.2.1
github.com/stretchr/testify v1.9.0
go.uber.org/atomic v1.10.0
diff --git a/pkg/obfuscate/go.sum b/pkg/obfuscate/go.sum
index b0abab16649ad..e6d91aba3fc14 100644
--- a/pkg/obfuscate/go.sum
+++ b/pkg/obfuscate/go.sum
@@ -1,7 +1,7 @@
github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU=
github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw=
-github.com/DataDog/go-sqllexer v0.0.13 h1:9mKfe+3s73GI/7dWBxi2Ds7+xZynJqMKK9cIUBrutak=
-github.com/DataDog/go-sqllexer v0.0.13/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc=
+github.com/DataDog/go-sqllexer v0.0.14 h1:xUQh2tLr/95LGxDzLmttLgTo/1gzFeOyuwrQa/Iig4Q=
+github.com/DataDog/go-sqllexer v0.0.14/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc=
github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY=
github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
diff --git a/pkg/obfuscate/sql_test.go b/pkg/obfuscate/sql_test.go
index 9b0f5969e55f5..f6b7005c46342 100644
--- a/pkg/obfuscate/sql_test.go
+++ b/pkg/obfuscate/sql_test.go
@@ -2403,6 +2403,20 @@ func TestSQLLexerObfuscationAndNormalization(t *testing.T) {
Procedures: []string{},
},
},
+ {
+ name: "select with cte",
+ query: "WITH users AS (SELECT * FROM people) SELECT * FROM users where id = 1",
+ expected: "WITH users AS ( SELECT * FROM people ) SELECT * FROM users where id = ?",
+ metadata: SQLMetadata{
+ Size: 12,
+ TablesCSV: "people",
+ Commands: []string{
+ "SELECT",
+ },
+ Comments: []string{},
+ Procedures: []string{},
+ },
+ },
}
for _, tt := range tests {
@@ -2589,6 +2603,20 @@ func TestSQLLexerNormalization(t *testing.T) {
Procedures: []string{},
},
},
+ {
+ name: "select with cte",
+ query: "WITH users AS (SELECT * FROM people) SELECT * FROM users",
+ expected: "WITH users AS ( SELECT * FROM people ) SELECT * FROM users",
+ metadata: SQLMetadata{
+ Size: 12,
+ TablesCSV: "people",
+ Commands: []string{
+ "SELECT",
+ },
+ Comments: []string{},
+ Procedures: []string{},
+ },
+ },
}
for _, tt := range tests {
diff --git a/pkg/process/checks/checks.go b/pkg/process/checks/checks.go
index 139abb92720dd..b7aabbd8c794e 100644
--- a/pkg/process/checks/checks.go
+++ b/pkg/process/checks/checks.go
@@ -35,6 +35,8 @@ type SysProbeConfig struct {
SystemProbeAddress string
// System probe process module on/off configuration
ProcessModuleEnabled bool
+ // System probe network_tracer module on/off configuration
+ NetworkTracerModuleEnabled bool
}
// Check is an interface for Agent checks that collect data. Each check returns
diff --git a/pkg/process/checks/container.go b/pkg/process/checks/container.go
index de3e40fd00b43..1e4187d46391c 100644
--- a/pkg/process/checks/container.go
+++ b/pkg/process/checks/container.go
@@ -6,7 +6,6 @@
package checks
import (
- "context"
"fmt"
"math"
"sync"
@@ -16,9 +15,9 @@ import (
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
ddconfig "github.com/DataDog/datadog-agent/pkg/config"
+ "github.com/DataDog/datadog-agent/pkg/process/net"
"github.com/DataDog/datadog-agent/pkg/process/statsd"
proccontainers "github.com/DataDog/datadog-agent/pkg/process/util/containers"
- "github.com/DataDog/datadog-agent/pkg/util/cloudproviders"
"github.com/DataDog/datadog-agent/pkg/util/flavor"
"github.com/DataDog/datadog-agent/pkg/util/log"
)
@@ -53,11 +52,21 @@ type ContainerCheck struct {
}
// Init initializes a ContainerCheck instance.
-func (c *ContainerCheck) Init(_ *SysProbeConfig, info *HostInfo, _ bool) error {
+func (c *ContainerCheck) Init(syscfg *SysProbeConfig, info *HostInfo, _ bool) error {
c.containerProvider = proccontainers.GetSharedContainerProvider(c.wmeta)
c.hostInfo = info
- networkID, err := cloudproviders.GetNetworkID(context.TODO())
+ var tu *net.RemoteSysProbeUtil
+ var err error
+ if syscfg.NetworkTracerModuleEnabled {
+ // Calling the remote tracer will cause it to initialize and check connectivity
+ tu, err = net.GetRemoteSystemProbeUtil(syscfg.SystemProbeAddress)
+ if err != nil {
+ log.Warnf("could not initiate connection with system probe: %s", err)
+ }
+ }
+
+ networkID, err := retryGetNetworkID(tu)
if err != nil {
log.Infof("no network ID detected: %s", err)
}
diff --git a/pkg/process/checks/net.go b/pkg/process/checks/net.go
index 26e01d0677061..5396fdd26c10f 100644
--- a/pkg/process/checks/net.go
+++ b/pkg/process/checks/net.go
@@ -107,7 +107,7 @@ func (c *ConnectionsCheck) Init(syscfg *SysProbeConfig, hostInfo *HostInfo, _ bo
}
}
- networkID, err := cloudproviders.GetNetworkID(context.TODO())
+ networkID, err := retryGetNetworkID(tu)
if err != nil {
log.Infof("no network ID detected: %s", err)
}
@@ -503,3 +503,17 @@ func convertAndEnrichWithServiceCtx(tags []string, tagOffsets []uint32, serviceC
return tagsStr
}
+
+// fetches network_id from the current netNS or from the system probe if necessary, where the root netNS is used
+func retryGetNetworkID(sysProbeUtil *net.RemoteSysProbeUtil) (string, error) {
+ networkID, err := cloudproviders.GetNetworkID(context.TODO())
+ if err != nil && sysProbeUtil != nil {
+ log.Infof("no network ID detected. retrying via system-probe: %s", err)
+ networkID, err = sysProbeUtil.GetNetworkID()
+ if err != nil {
+ log.Infof("failed to get network ID from system-probe: %s", err)
+ return "", err
+ }
+ }
+ return networkID, err
+}
diff --git a/pkg/process/checks/process.go b/pkg/process/checks/process.go
index f35e71704d1cb..26685da9e15aa 100644
--- a/pkg/process/checks/process.go
+++ b/pkg/process/checks/process.go
@@ -6,7 +6,6 @@
package checks
import (
- "context"
"errors"
"fmt"
"math"
@@ -28,7 +27,6 @@ import (
"github.com/DataDog/datadog-agent/pkg/process/statsd"
"github.com/DataDog/datadog-agent/pkg/process/util"
proccontainers "github.com/DataDog/datadog-agent/pkg/process/util/containers"
- "github.com/DataDog/datadog-agent/pkg/util/cloudproviders"
"github.com/DataDog/datadog-agent/pkg/util/flavor"
"github.com/DataDog/datadog-agent/pkg/util/log"
"github.com/DataDog/datadog-agent/pkg/util/subscriptions"
@@ -137,7 +135,17 @@ func (p *ProcessCheck) Init(syscfg *SysProbeConfig, info *HostInfo, oneShot bool
p.notInitializedLogLimit = log.NewLogLimit(1, time.Minute*10)
- networkID, err := cloudproviders.GetNetworkID(context.TODO())
+ var tu *net.RemoteSysProbeUtil
+ var err error
+ if syscfg.NetworkTracerModuleEnabled {
+ // Calling the remote tracer will cause it to initialize and check connectivity
+ tu, err = net.GetRemoteSystemProbeUtil(syscfg.SystemProbeAddress)
+ if err != nil {
+ log.Warnf("could not initiate connection with system probe: %s", err)
+ }
+ }
+
+ networkID, err := retryGetNetworkID(tu)
if err != nil {
log.Infof("no network ID detected: %s", err)
}
diff --git a/pkg/process/net/common.go b/pkg/process/net/common.go
index 4ccf712cbd804..2480dc1d69f84 100644
--- a/pkg/process/net/common.go
+++ b/pkg/process/net/common.go
@@ -44,6 +44,7 @@ type Conn interface {
const (
contentTypeProtobuf = "application/protobuf"
+ contentTypeJSON = "application/json"
)
var (
@@ -58,10 +59,10 @@ type RemoteSysProbeUtil struct {
// Retrier used to setup system probe
initRetry retry.Retrier
- path string
- httpClient http.Client
- pprofClient http.Client
- extendedTimeoutClient http.Client
+ path string
+ httpClient http.Client
+ pprofClient http.Client
+ tracerouteClient http.Client
}
// GetRemoteSystemProbeUtil returns a ready to use RemoteSysProbeUtil. It is backed by a shared singleton.
@@ -166,6 +167,32 @@ func (r *RemoteSysProbeUtil) GetConnections(clientID string) (*model.Connections
return conns, nil
}
+// GetNetworkID fetches the network_id (vpc_id) from system-probe
+func (r *RemoteSysProbeUtil) GetNetworkID() (string, error) {
+ req, err := http.NewRequest("GET", networkIDURL, nil)
+ if err != nil {
+ return "", fmt.Errorf("failed to create request: %w", err)
+ }
+
+ req.Header.Set("Accept", "text/plain")
+ resp, err := r.httpClient.Do(req)
+ if err != nil {
+ return "", fmt.Errorf("failed to execute request: %w", err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ return "", fmt.Errorf("network_id request failed: url: %s, status code: %d", networkIDURL, resp.StatusCode)
+ }
+
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return "", fmt.Errorf("failed to read response body: %w", err)
+ }
+
+ return string(body), nil
+}
+
// GetPing returns the results of a ping to a host
func (r *RemoteSysProbeUtil) GetPing(clientID string, host string, count int, interval time.Duration, timeout time.Duration) ([]byte, error) {
req, err := http.NewRequest("GET", fmt.Sprintf("%s/%s?client_id=%s&count=%d&interval=%d&timeout=%d", pingURL, host, clientID, count, interval, timeout), nil)
@@ -173,7 +200,7 @@ func (r *RemoteSysProbeUtil) GetPing(clientID string, host string, count int, in
return nil, err
}
- req.Header.Set("Accept", "application/json")
+ req.Header.Set("Accept", contentTypeJSON)
resp, err := r.httpClient.Do(req)
if err != nil {
return nil, err
@@ -199,14 +226,19 @@ func (r *RemoteSysProbeUtil) GetPing(clientID string, host string, count int, in
}
// GetTraceroute returns the results of a traceroute to a host
-func (r *RemoteSysProbeUtil) GetTraceroute(clientID string, host string, port uint16, protocol nppayload.Protocol, maxTTL uint8, timeout uint) ([]byte, error) {
- req, err := http.NewRequest("GET", fmt.Sprintf("%s/%s?client_id=%s&port=%d&max_ttl=%d&timeout=%d&protocol=%s", tracerouteURL, host, clientID, port, maxTTL, timeout, protocol), nil)
+func (r *RemoteSysProbeUtil) GetTraceroute(clientID string, host string, port uint16, protocol nppayload.Protocol, maxTTL uint8, timeout time.Duration) ([]byte, error) {
+ httpTimeout := timeout*time.Duration(maxTTL) + 10*time.Second // allow extra time for the system probe communication overhead, calculate full timeout for TCP traceroute
+ log.Tracef("Network Path traceroute HTTP request timeout: %s", httpTimeout)
+ ctx, cancel := context.WithTimeout(context.Background(), httpTimeout)
+ defer cancel()
+
+ req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/%s?client_id=%s&port=%d&max_ttl=%d&timeout=%d&protocol=%s", tracerouteURL, host, clientID, port, maxTTL, timeout, protocol), nil)
if err != nil {
return nil, err
}
- req.Header.Set("Accept", "application/json")
- resp, err := r.extendedTimeoutClient.Do(req)
+ req.Header.Set("Accept", contentTypeJSON)
+ resp, err := r.tracerouteClient.Do(req)
if err != nil {
return nil, err
}
@@ -303,17 +335,13 @@ func newSystemProbe(path string) *RemoteSysProbeUtil {
},
},
},
- extendedTimeoutClient: http.Client{
- Timeout: 60 * time.Second,
+ tracerouteClient: http.Client{
+ // no timeout set here, the expected usage of this client
+ // is that the caller will set a timeout on each request
Transport: &http.Transport{
- MaxIdleConns: 2,
- IdleConnTimeout: 30 * time.Second,
DialContext: func(_ context.Context, _, _ string) (net.Conn, error) {
return net.Dial(netType, path)
},
- TLSHandshakeTimeout: 1 * time.Second,
- ResponseHeaderTimeout: 50 * time.Second,
- ExpectContinueTimeout: 50 * time.Millisecond,
},
},
}
diff --git a/pkg/process/net/common_linux.go b/pkg/process/net/common_linux.go
index 2dc5c7db28c8d..7fee3ffdb1cb9 100644
--- a/pkg/process/net/common_linux.go
+++ b/pkg/process/net/common_linux.go
@@ -18,6 +18,7 @@ const (
pingURL = "http://unix/" + string(sysconfig.PingModule) + "/ping/"
tracerouteURL = "http://unix/" + string(sysconfig.TracerouteModule) + "/traceroute/"
connectionsURL = "http://unix/" + string(sysconfig.NetworkTracerModule) + "/connections"
+ networkIDURL = "http://unix/" + string(sysconfig.NetworkTracerModule) + "/network_id"
procStatsURL = "http://unix/" + string(sysconfig.ProcessModule) + "/stats"
registerURL = "http://unix/" + string(sysconfig.NetworkTracerModule) + "/register"
statsURL = "http://unix/debug/stats"
diff --git a/pkg/process/net/common_unsupported.go b/pkg/process/net/common_unsupported.go
index 03a481a2de400..ebdea5968e5bb 100644
--- a/pkg/process/net/common_unsupported.go
+++ b/pkg/process/net/common_unsupported.go
@@ -40,6 +40,11 @@ func (r *RemoteSysProbeUtil) GetConnections(_ string) (*model.Connections, error
return nil, ErrNotImplemented
}
+// GetNetworkID is not supported
+func (r *RemoteSysProbeUtil) GetNetworkID() (string, error) {
+ return "", ErrNotImplemented
+}
+
// GetStats is not supported
func (r *RemoteSysProbeUtil) GetStats() (map[string]interface{}, error) {
return nil, ErrNotImplemented
diff --git a/pkg/process/net/common_windows.go b/pkg/process/net/common_windows.go
index 4ad0d218e65f5..83d8440825e4a 100644
--- a/pkg/process/net/common_windows.go
+++ b/pkg/process/net/common_windows.go
@@ -15,6 +15,7 @@ import (
const (
connectionsURL = "http://localhost:3333/" + string(sysconfig.NetworkTracerModule) + "/connections"
+ networkIDURL = "http://unix/" + string(sysconfig.NetworkTracerModule) + "/network_id"
registerURL = "http://localhost:3333/" + string(sysconfig.NetworkTracerModule) + "/register"
languageDetectionURL = "http://localhost:3333/" + string(sysconfig.LanguageDetectionModule) + "/detect"
statsURL = "http://localhost:3333/debug/stats"
diff --git a/pkg/process/net/mocks/sys_probe_util.go b/pkg/process/net/mocks/sys_probe_util.go
index 3bf0b2c1d7270..0d0af5300fa4f 100644
--- a/pkg/process/net/mocks/sys_probe_util.go
+++ b/pkg/process/net/mocks/sys_probe_util.go
@@ -43,6 +43,34 @@ func (_m *SysProbeUtil) GetConnections(clientID string) (*process.Connections, e
return r0, r1
}
+// GetNetworkID provides a mock function with given fields:
+func (_m *SysProbeUtil) GetNetworkID() (string, error) {
+ ret := _m.Called()
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetNetworkID")
+ }
+
+ var r0 string
+ var r1 error
+ if rf, ok := ret.Get(0).(func() (string, error)); ok {
+ return rf()
+ }
+ if rf, ok := ret.Get(0).(func() string); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(string)
+ }
+
+ if rf, ok := ret.Get(1).(func() error); ok {
+ r1 = rf()
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
// GetProcStats provides a mock function with given fields: pids
func (_m *SysProbeUtil) GetProcStats(pids []int32) (*process.ProcStatsWithPermByPID, error) {
ret := _m.Called(pids)
diff --git a/pkg/process/net/shared.go b/pkg/process/net/shared.go
index 72a6e418865c6..a0a7aa18ae327 100644
--- a/pkg/process/net/shared.go
+++ b/pkg/process/net/shared.go
@@ -13,4 +13,5 @@ type SysProbeUtil interface {
GetStats() (map[string]interface{}, error)
GetProcStats(pids []int32) (*model.ProcStatsWithPermByPID, error)
Register(clientID string) error
+ GetNetworkID() (string, error)
}
diff --git a/pkg/process/util/address.go b/pkg/process/util/address.go
index 15dde9f118d23..c98964820a071 100644
--- a/pkg/process/util/address.go
+++ b/pkg/process/util/address.go
@@ -20,41 +20,6 @@ type Address struct {
netip.Addr
}
-// WriteTo writes the address byte representation into the supplied buffer
-func (a Address) WriteTo(b []byte) int {
- if a.Is4() {
- v := a.As4()
- return copy(b, v[:])
- }
-
- v := a.As16()
- return copy(b, v[:])
-
-}
-
-// Bytes returns a byte slice representing the Address.
-// You may want to consider using `WriteTo` instead to avoid allocations
-func (a Address) Bytes() []byte {
- // Note: this implicitly converts IPv4-in-6 to IPv4
- if a.Is4() || a.Is4In6() {
- v := a.As4()
- return v[:]
- }
-
- v := a.As16()
- return v[:]
-}
-
-// Len returns the number of bytes required to represent this IP
-func (a Address) Len() int {
- return int(a.BitLen()) / 8
-}
-
-// IsZero reports whether a is its zero value
-func (a Address) IsZero() bool {
- return a.Addr == netip.Addr{}
-}
-
// AddressFromNetIP returns an Address from a provided net.IP
func AddressFromNetIP(ip net.IP) Address {
addr, _ := netipx.FromStdIP(ip)
@@ -71,7 +36,7 @@ func AddressFromString(s string) Address {
// Warning: the returned `net.IP` will share the same underlying
// memory as the given `buf` argument.
func NetIPFromAddress(addr Address, buf []byte) net.IP {
- n := addr.WriteTo(buf)
+ n := copy(buf, addr.AsSlice())
return net.IP(buf[:n])
}
@@ -115,11 +80,6 @@ func V4Address(ip uint32) Address {
}
}
-// V4AddressFromBytes creates an Address using the byte representation of an v4 IP
-func V4AddressFromBytes(buf []byte) Address {
- return Address{netip.AddrFrom4(*(*[4]byte)(buf))}
-}
-
// V6Address creates an Address using the uint128 representation of an v6 IP
func V6Address(low, high uint64) Address {
var a [16]byte
@@ -128,10 +88,5 @@ func V6Address(low, high uint64) Address {
return Address{netip.AddrFrom16(a)}
}
-// V6AddressFromBytes creates an Address using the byte representation of an v6 IP
-func V6AddressFromBytes(buf []byte) Address {
- return Address{netip.AddrFrom16(*(*[16]byte)(buf))}
-}
-
// IPBufferPool is meant to be used in conjunction with `NetIPFromAddress`
var IPBufferPool = ddsync.NewSlicePool[byte](net.IPv6len, net.IPv6len)
diff --git a/pkg/process/util/address_test.go b/pkg/process/util/address_test.go
index bfbf57c312efc..a1de755961b49 100644
--- a/pkg/process/util/address_test.go
+++ b/pkg/process/util/address_test.go
@@ -88,22 +88,16 @@ func TestAddressUsageInMaps(t *testing.T) {
func TestAddressV4(t *testing.T) {
addr := V4Address(889192575)
- // Should be able to recreate addr from bytes alone
- assert.Equal(t, addr, V4AddressFromBytes(addr.Bytes()))
// Should be able to recreate addr from IP string
assert.Equal(t, addr, AddressFromString("127.0.0.53"))
assert.Equal(t, "127.0.0.53", addr.String())
addr = V4Address(0)
- // Should be able to recreate addr from bytes alone
- assert.Equal(t, addr, V4AddressFromBytes(addr.Bytes()))
// Should be able to recreate addr from IP string
assert.Equal(t, addr, AddressFromString("0.0.0.0"))
assert.Equal(t, "0.0.0.0", addr.String())
addr = V4Address(16820416)
- // Should be able to recreate addr from bytes alone
- assert.Equal(t, addr, V4AddressFromBytes(addr.Bytes()))
// Should be able to recreate addr from IP string
assert.Equal(t, addr, AddressFromString("192.168.0.1"))
assert.Equal(t, "192.168.0.1", addr.String())
@@ -111,31 +105,23 @@ func TestAddressV4(t *testing.T) {
func TestAddressV6(t *testing.T) {
addr := V6Address(889192575, 0)
- // Should be able to recreate addr from bytes alone
- assert.Equal(t, addr, V6AddressFromBytes(addr.Bytes()))
// Should be able to recreate addr from IP string
assert.Equal(t, addr, AddressFromString("::7f00:35:0:0"))
assert.Equal(t, "::7f00:35:0:0", addr.String())
assert.False(t, addr.IsLoopback())
addr = V6Address(0, 0)
- // Should be able to recreate addr from bytes alone
- assert.Equal(t, addr, V6AddressFromBytes(addr.Bytes()))
// Should be able to recreate addr from IP string
assert.Equal(t, addr, AddressFromString("::"))
assert.Equal(t, "::", addr.String())
addr = V6Address(72057594037927936, 0)
- // Should be able to recreate addr from bytes alone
- assert.Equal(t, addr, V6AddressFromBytes(addr.Bytes()))
// Should be able to recreate addr from IP string
assert.Equal(t, addr, AddressFromString("::1"))
assert.Equal(t, "::1", addr.String())
assert.True(t, addr.IsLoopback())
addr = V6Address(72059793061183488, 3087860000)
- // Should be able to recreate addr from bytes alone
- assert.Equal(t, addr, V6AddressFromBytes(addr.Bytes()))
// Should be able to recreate addr from IP string
assert.Equal(t, addr, AddressFromString("2001:db8::2:1"))
assert.Equal(t, "2001:db8::2:1", addr.String())
@@ -176,35 +162,6 @@ func BenchmarkV6Address(b *testing.B) {
runtime.KeepAlive(addr)
}
-func BenchmarkBytes(b *testing.B) {
- var (
- addr = AddressFromString("8.8.8.8")
- bytes []byte
- )
- b.ReportAllocs()
- b.ResetTimer()
-
- for i := 0; i < b.N; i++ {
- // this allocates a slice descriptor that escapes to the heap
- bytes = addr.Bytes()
- }
- runtime.KeepAlive(bytes)
-}
-
-func BenchmarkWriteTo(b *testing.B) {
- addr := AddressFromString("8.8.8.8")
- bytes := make([]byte, 4)
- b.ReportAllocs()
- b.ResetTimer()
-
- for i := 0; i < b.N; i++ {
- // this method shouldn't allocate
- _ = addr.WriteTo(bytes)
- bytes = bytes[:0]
- }
- runtime.KeepAlive(bytes)
-}
-
func BenchmarkToLowHigh(b *testing.B) {
addr := AddressFromString("8.8.8.8")
var l, h uint64
diff --git a/pkg/sbom/collectors/host/collector.go b/pkg/sbom/collectors/host/collector.go
index b5d1a5a992a97..b92d8ad7360f2 100644
--- a/pkg/sbom/collectors/host/collector.go
+++ b/pkg/sbom/collectors/host/collector.go
@@ -35,6 +35,11 @@ func (c *Collector) Shutdown() {
c.closed = true
}
+// channelSize defines the result channel size
+// It doesn't need more than 1 because the host collector should
+// not trigger multiple scans at the same time unlike for container-images.
+const channelSize = 1
+
func init() {
collectors.RegisterCollector(collectors.HostCollector, &Collector{
resChan: make(chan sbom.ScanResult, channelSize),
diff --git a/pkg/sbom/collectors/host/request.go b/pkg/sbom/collectors/host/request.go
index 8ecd73f19a20e..3b3481f7c2633 100644
--- a/pkg/sbom/collectors/host/request.go
+++ b/pkg/sbom/collectors/host/request.go
@@ -3,22 +3,14 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-//go:build trivy || (windows && wmi)
-
package host
import (
"io/fs"
- "github.com/DataDog/datadog-agent/pkg/sbom"
- "github.com/DataDog/datadog-agent/pkg/sbom/collectors"
+ "github.com/DataDog/datadog-agent/pkg/sbom/types"
)
-// channelSize defines the result channel size
-// It doesn't need more than 1 because the host collector should
-// not trigger multiple scans at the same time unlike for container-images.
-const channelSize = 1
-
// scanRequest defines a scan request. This struct should be
// hashable to be pushed in the work queue for processing.
type scanRequest struct {
@@ -27,18 +19,18 @@ type scanRequest struct {
}
// NewScanRequest creates a new scan request
-func NewScanRequest(path string, fs fs.FS) sbom.ScanRequest {
+func NewScanRequest(path string, fs fs.FS) types.ScanRequest {
return scanRequest{Path: path, FS: fs}
}
// Collector returns the collector name
func (r scanRequest) Collector() string {
- return collectors.HostCollector
+ return "host"
}
// Type returns the scan request type
-func (r scanRequest) Type(sbom.ScanOptions) string {
- return sbom.ScanFilesystemType
+func (r scanRequest) Type(types.ScanOptions) string {
+ return types.ScanFilesystemType
}
// ID returns the scan request ID
diff --git a/pkg/sbom/sbom.go b/pkg/sbom/sbom.go
index 551547d870a3a..accf0e8eb702b 100644
--- a/pkg/sbom/sbom.go
+++ b/pkg/sbom/sbom.go
@@ -11,6 +11,7 @@ import (
"github.com/DataDog/datadog-agent/comp/core/config"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
+ "github.com/DataDog/datadog-agent/pkg/sbom/types"
cyclonedxgo "github.com/CycloneDX/cyclonedx-go"
)
@@ -26,19 +27,6 @@ type Report interface {
ID() string
}
-// ScanOptions defines the scan options
-type ScanOptions struct {
- Analyzers []string
- CheckDiskUsage bool
- MinAvailableDisk uint64
- Timeout time.Duration
- WaitAfter time.Duration
- Fast bool
- CollectFiles bool
- UseMount bool
- OverlayFsScan bool
-}
-
// ScanOptionsFromConfig loads the scanning options from the configuration
func ScanOptionsFromConfig(cfg config.Component, containers bool) (scanOpts ScanOptions) {
if containers {
@@ -59,11 +47,10 @@ func ScanOptionsFromConfig(cfg config.Component, containers bool) (scanOpts Scan
}
// ScanRequest defines the scan request interface
-type ScanRequest interface {
- Collector() string
- Type(ScanOptions) string
- ID() string
-}
+type ScanRequest = types.ScanRequest
+
+// ScanOptions defines the scan options
+type ScanOptions = types.ScanOptions
// ScanResult defines the scan result
type ScanResult struct {
diff --git a/pkg/sbom/scanner/scanner.go b/pkg/sbom/scanner/scanner.go
index bf4e25676d723..19d25a01c808a 100644
--- a/pkg/sbom/scanner/scanner.go
+++ b/pkg/sbom/scanner/scanner.go
@@ -224,6 +224,11 @@ func (s *Scanner) startScanRequestHandler(ctx context.Context) {
}()
}
+// GetCollector returns the collector with the specified name
+func (s *Scanner) GetCollector(collector string) collectors.Collector {
+ return s.collectors[collector]
+}
+
func (s *Scanner) handleScanRequest(ctx context.Context, r interface{}) {
request, ok := r.(sbom.ScanRequest)
if !ok {
@@ -232,8 +237,8 @@ func (s *Scanner) handleScanRequest(ctx context.Context, r interface{}) {
return
}
- collector, ok := s.collectors[request.Collector()]
- if !ok {
+ collector := s.GetCollector(request.Collector())
+ if collector == nil {
_ = log.Errorf("invalid collector '%s'", request.Collector())
s.scanQueue.Forget(request)
return
@@ -276,7 +281,7 @@ func (s *Scanner) processScan(ctx context.Context, request sbom.ScanRequest, img
if result == nil {
scanContext, cancel := context.WithTimeout(ctx, timeout(collector))
defer cancel()
- result = s.performScan(scanContext, request, collector)
+ result = s.PerformScan(scanContext, request, collector)
errorType = "scan"
}
sendResult(ctx, request.ID(), result, collector)
@@ -299,7 +304,8 @@ func (s *Scanner) checkDiskSpace(imgMeta *workloadmeta.ContainerImageMetadata, c
return result
}
-func (s *Scanner) performScan(ctx context.Context, request sbom.ScanRequest, collector collectors.Collector) *sbom.ScanResult {
+// PerformScan processes a scan request with the selected collector and returns the SBOM
+func (s *Scanner) PerformScan(ctx context.Context, request sbom.ScanRequest, collector collectors.Collector) *sbom.ScanResult {
createdAt := time.Now()
s.cacheMutex.Lock()
diff --git a/pkg/sbom/types/types.go b/pkg/sbom/types/types.go
new file mode 100644
index 0000000000000..8b5989cd5ed23
--- /dev/null
+++ b/pkg/sbom/types/types.go
@@ -0,0 +1,34 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package types holds sbom related types
+package types
+
+import "time"
+
+// ScanRequest defines the scan request interface
+type ScanRequest interface {
+ Collector() string
+ Type(ScanOptions) string
+ ID() string
+}
+
+// ScanOptions defines the scan options
+type ScanOptions struct {
+ Analyzers []string
+ CheckDiskUsage bool
+ MinAvailableDisk uint64
+ Timeout time.Duration
+ WaitAfter time.Duration
+ Fast bool
+ CollectFiles bool
+ UseMount bool
+ OverlayFsScan bool
+}
+
+const (
+ ScanFilesystemType = "filesystem" // ScanFilesystemType defines the type for file-system scan
+ ScanDaemonType = "daemon" // ScanDaemonType defines the type for daemon scan
+)
diff --git a/pkg/security/agent/agent.go b/pkg/security/agent/agent.go
index c582e4a777763..60d71694f9188 100644
--- a/pkg/security/agent/agent.go
+++ b/pkg/security/agent/agent.go
@@ -37,7 +37,6 @@ type RuntimeSecurityAgent struct {
connected *atomic.Bool
eventReceived *atomic.Uint64
activityDumpReceived *atomic.Uint64
- telemetry *telemetry
profContainersTelemetry *profContainersTelemetry
endpoints *config.Endpoints
cancel context.CancelFunc
@@ -48,8 +47,7 @@ type RuntimeSecurityAgent struct {
// RSAOptions represents the runtime security agent options
type RSAOptions struct {
- LogProfiledWorkloads bool
- IgnoreDDAgentContainers bool
+ LogProfiledWorkloads bool
}
// Start the runtime security agent
@@ -70,11 +68,6 @@ func (rsa *RuntimeSecurityAgent) Start(reporter common.RawReporter, endpoints *c
go rsa.startActivityDumpStorageTelemetry(ctx)
}
- if rsa.telemetry != nil {
- // Send Runtime Security Agent telemetry
- go rsa.telemetry.run(ctx)
- }
-
if rsa.profContainersTelemetry != nil {
// Send Profiled Containers telemetry
go rsa.profContainersTelemetry.run(ctx)
@@ -190,7 +183,7 @@ func (rsa *RuntimeSecurityAgent) DispatchActivityDump(msg *api.ActivityDumpStrea
log.Errorf("%v", err)
return
}
- if rsa.telemetry != nil {
+ if rsa.profContainersTelemetry != nil {
// register for telemetry for this container
imageName, imageTag := dump.GetImageNameTag()
rsa.profContainersTelemetry.registerProfiledContainer(imageName, imageTag)
diff --git a/pkg/security/agent/agent_nix.go b/pkg/security/agent/agent_nix.go
index f7c537a7d7525..bc444d6f049fe 100644
--- a/pkg/security/agent/agent_nix.go
+++ b/pkg/security/agent/agent_nix.go
@@ -24,12 +24,6 @@ func NewRuntimeSecurityAgent(statsdClient statsd.ClientInterface, hostname strin
return nil, err
}
- // on windows do no telemetry
- telemetry, err := newTelemetry(statsdClient, wmeta, opts.IgnoreDDAgentContainers)
- if err != nil {
- return nil, errors.New("failed to initialize the telemetry reporter")
- }
-
profContainersTelemetry, err := newProfContainersTelemetry(statsdClient, wmeta, opts.LogProfiledWorkloads)
if err != nil {
return nil, errors.New("failed to initialize the profiled containers telemetry reporter")
@@ -44,7 +38,6 @@ func NewRuntimeSecurityAgent(statsdClient statsd.ClientInterface, hostname strin
return &RuntimeSecurityAgent{
client: client,
hostname: hostname,
- telemetry: telemetry,
profContainersTelemetry: profContainersTelemetry,
storage: storage,
running: atomic.NewBool(false),
diff --git a/pkg/security/agent/agent_windows.go b/pkg/security/agent/agent_windows.go
index 54bd6862155e2..3b1cad54f3e10 100644
--- a/pkg/security/agent/agent_windows.go
+++ b/pkg/security/agent/agent_windows.go
@@ -24,7 +24,6 @@ func NewRuntimeSecurityAgent(_ statsd.ClientInterface, hostname string, _ RSAOpt
return &RuntimeSecurityAgent{
client: client,
hostname: hostname,
- telemetry: nil,
storage: nil,
running: atomic.NewBool(false),
connected: atomic.NewBool(false),
diff --git a/pkg/security/agent/status_provider_test.go b/pkg/security/agent/status_provider_test.go
index 4c259193e5f67..af26830e9f3dd 100644
--- a/pkg/security/agent/status_provider_test.go
+++ b/pkg/security/agent/status_provider_test.go
@@ -18,7 +18,6 @@ func TestStatus(t *testing.T) {
agent: &RuntimeSecurityAgent{
client: nil,
hostname: "test",
- telemetry: nil,
storage: nil,
running: atomic.NewBool(false),
connected: atomic.NewBool(false),
diff --git a/pkg/security/agent/telemetry_others.go b/pkg/security/agent/telemetry_others.go
index d36f3e1aba54b..10647951c734b 100644
--- a/pkg/security/agent/telemetry_others.go
+++ b/pkg/security/agent/telemetry_others.go
@@ -10,10 +10,6 @@ package agent
import "context"
-type telemetry struct{}
-
-func (t *telemetry) run(_ context.Context) {}
-
type profContainersTelemetry struct{}
func (t *profContainersTelemetry) registerProfiledContainer(_, _ string) {}
diff --git a/pkg/security/config/config.go b/pkg/security/config/config.go
index 630c3a952ce10..95a261f32963d 100644
--- a/pkg/security/config/config.go
+++ b/pkg/security/config/config.go
@@ -233,10 +233,26 @@ type RuntimeSecurityConfig struct {
EBPFLessSocket string
// Enforcement capabilities
- EnforcementEnabled bool
+ // EnforcementEnabled defines if the enforcement capability should be enabled
+ EnforcementEnabled bool
+ // EnforcementRawSyscallEnabled defines if the enforcement should be performed using the sys_enter tracepoint
EnforcementRawSyscallEnabled bool
EnforcementBinaryExcluded []string
EnforcementRuleSourceAllowed []string
+ // EnforcementDisarmerContainerEnabled defines if an enforcement rule should be disarmed when hitting too many different containers
+ EnforcementDisarmerContainerEnabled bool
+ // EnforcementDisarmerContainerMaxAllowed defines the maximum number of different containers that can trigger an enforcement rule
+ // within a period before the enforcement is disarmed for this rule
+ EnforcementDisarmerContainerMaxAllowed int
+ // EnforcementDisarmerContainerPeriod defines the period during which EnforcementDisarmerContainerMaxAllowed is checked
+ EnforcementDisarmerContainerPeriod time.Duration
+ // EnforcementDisarmerExecutableEnabled defines if an enforcement rule should be disarmed when hitting too many different executables
+ EnforcementDisarmerExecutableEnabled bool
+ // EnforcementDisarmerExecutableMaxAllowed defines the maximum number of different executables that can trigger an enforcement rule
+ // within a period before the enforcement is disarmed for this rule
+ EnforcementDisarmerExecutableMaxAllowed int
+ // EnforcementDisarmerExecutablePeriod defines the period during which EnforcementDisarmerExecutableMaxAllowed is checked
+ EnforcementDisarmerExecutablePeriod time.Duration
//WindowsFilenameCacheSize is the max number of filenames to cache
WindowsFilenameCacheSize int
@@ -416,10 +432,16 @@ func NewRuntimeSecurityConfig() (*RuntimeSecurityConfig, error) {
AnomalyDetectionEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.security_profile.anomaly_detection.enabled"),
// enforcement
- EnforcementEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.enforcement.enabled"),
- EnforcementBinaryExcluded: coreconfig.SystemProbe().GetStringSlice("runtime_security_config.enforcement.exclude_binaries"),
- EnforcementRawSyscallEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.enforcement.raw_syscall.enabled"),
- EnforcementRuleSourceAllowed: coreconfig.SystemProbe().GetStringSlice("runtime_security_config.enforcement.rule_source_allowed"),
+ EnforcementEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.enforcement.enabled"),
+ EnforcementBinaryExcluded: coreconfig.SystemProbe().GetStringSlice("runtime_security_config.enforcement.exclude_binaries"),
+ EnforcementRawSyscallEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.enforcement.raw_syscall.enabled"),
+ EnforcementRuleSourceAllowed: coreconfig.SystemProbe().GetStringSlice("runtime_security_config.enforcement.rule_source_allowed"),
+ EnforcementDisarmerContainerEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.enforcement.disarmer.container.enabled"),
+ EnforcementDisarmerContainerMaxAllowed: coreconfig.SystemProbe().GetInt("runtime_security_config.enforcement.disarmer.container.max_allowed"),
+ EnforcementDisarmerContainerPeriod: coreconfig.SystemProbe().GetDuration("runtime_security_config.enforcement.disarmer.container.period"),
+ EnforcementDisarmerExecutableEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.enforcement.disarmer.executable.enabled"),
+ EnforcementDisarmerExecutableMaxAllowed: coreconfig.SystemProbe().GetInt("runtime_security_config.enforcement.disarmer.executable.max_allowed"),
+ EnforcementDisarmerExecutablePeriod: coreconfig.SystemProbe().GetDuration("runtime_security_config.enforcement.disarmer.executable.period"),
// User Sessions
UserSessionsCacheSize: coreconfig.SystemProbe().GetInt("runtime_security_config.user_sessions.cache_size"),
@@ -489,6 +511,16 @@ func (c *RuntimeSecurityConfig) sanitize() error {
return fmt.Errorf("invalid IPv4 address: got %v", coreconfig.SystemProbe().GetString("runtime_security_config.imds_ipv4"))
}
+ if c.EnforcementDisarmerContainerEnabled && c.EnforcementDisarmerContainerMaxAllowed <= 0 {
+ return fmt.Errorf("invalid value for runtime_security_config.enforcement.disarmer.container.max_allowed: %d", c.EnforcementDisarmerContainerMaxAllowed)
+ }
+
+ if c.EnforcementDisarmerExecutableEnabled && c.EnforcementDisarmerExecutableMaxAllowed <= 0 {
+ return fmt.Errorf("invalid value for runtime_security_config.enforcement.disarmer.executable.max_allowed: %d", c.EnforcementDisarmerExecutableMaxAllowed)
+ }
+
+ c.sanitizePlatform()
+
return c.sanitizeRuntimeSecurityConfigActivityDump()
}
diff --git a/pkg/security/config/config_linux.go b/pkg/security/config/config_linux.go
new file mode 100644
index 0000000000000..02062f9def762
--- /dev/null
+++ b/pkg/security/config/config_linux.go
@@ -0,0 +1,15 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package config holds config related files
+package config
+
+func (c *RuntimeSecurityConfig) sanitizePlatform() {
+ // Force the disable of features unavailable on EBPFLess
+ if c.EBPFLessEnabled {
+ c.ActivityDumpEnabled = false
+ c.SecurityProfileEnabled = false
+ }
+}
diff --git a/pkg/security/config/config_others.go b/pkg/security/config/config_others.go
new file mode 100644
index 0000000000000..91da3e31277fb
--- /dev/null
+++ b/pkg/security/config/config_others.go
@@ -0,0 +1,15 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build !linux
+
+// Package config holds config related files
+package config
+
+func (c *RuntimeSecurityConfig) sanitizePlatform() {
+ // Force the disable of features unavailable on Windows
+ c.ActivityDumpEnabled = false
+ c.SecurityProfileEnabled = false
+}
diff --git a/pkg/security/ebpf/c/include/helpers/approvers.h b/pkg/security/ebpf/c/include/helpers/approvers.h
index dedc9b36772dd..02c10694c7a19 100644
--- a/pkg/security/ebpf/c/include/helpers/approvers.h
+++ b/pkg/security/ebpf/c/include/helpers/approvers.h
@@ -49,10 +49,26 @@ int __attribute__((always_inline)) chown_approvers(struct syscall_cache_t *sysca
return basename_approver(syscall, syscall->setattr.dentry, EVENT_CHOWN);
}
-int __attribute__((always_inline)) approve_mmap_by_flags(struct syscall_cache_t *syscall) {
+int __attribute__((always_inline)) lookup_u32_flags(void *map, u32 *flags) {
u32 key = 0;
- u32 *flags = bpf_map_lookup_elem(&mmap_flags_approvers, &key);
- if (flags != NULL && (syscall->mmap.flags & *flags) > 0) {
+ struct u32_flags_filter_t *filter = bpf_map_lookup_elem(map, &key);
+ if (filter == NULL || !filter->is_set) {
+ return 0;
+ }
+ *flags = filter->flags;
+
+ return 1;
+}
+
+int __attribute__((always_inline)) approve_mmap_by_flags(struct syscall_cache_t *syscall) {
+ u32 flags = 0;
+
+ int exists = lookup_u32_flags(&mmap_flags_approvers, &flags);
+ if (!exists) {
+ return 0;
+ }
+
+ if ((syscall->mmap.flags & flags) > 0) {
monitor_event_approved(syscall->type, FLAG_APPROVER_TYPE);
return 1;
}
@@ -60,12 +76,13 @@ int __attribute__((always_inline)) approve_mmap_by_flags(struct syscall_cache_t
}
int __attribute__((always_inline)) approve_mmap_by_protection(struct syscall_cache_t *syscall) {
- u32 key = 0;
- u32 *flags_ptr = bpf_map_lookup_elem(&mmap_protection_approvers, &key);
- if (flags_ptr == NULL) {
+ u32 flags = 0;
+
+ int exists = lookup_u32_flags(&mmap_protection_approvers, &flags);
+ if (!exists) {
return 0;
}
- u32 flags = *flags_ptr;
+
if ((flags == 0 && syscall->mmap.protection == 0) || (syscall->mmap.protection & flags) > 0) {
monitor_event_approved(syscall->type, FLAG_APPROVER_TYPE);
return 1;
@@ -104,9 +121,14 @@ int __attribute__((always_inline)) chdir_approvers(struct syscall_cache_t *sysca
}
int __attribute__((always_inline)) approve_mprotect_by_vm_protection(struct syscall_cache_t *syscall) {
- u32 key = 0;
- u32 *flags = bpf_map_lookup_elem(&mprotect_vm_protection_approvers, &key);
- if (flags != NULL && (syscall->mprotect.vm_protection & *flags) > 0) {
+ u32 flags = 0;
+
+ int exists = lookup_u32_flags(&mprotect_vm_protection_approvers, &flags);
+ if (!exists) {
+ return 0;
+ }
+
+ if ((syscall->mprotect.vm_protection & flags) > 0) {
monitor_event_approved(syscall->type, FLAG_APPROVER_TYPE);
return 1;
}
@@ -114,9 +136,14 @@ int __attribute__((always_inline)) approve_mprotect_by_vm_protection(struct sysc
}
int __attribute__((always_inline)) approve_mprotect_by_req_protection(struct syscall_cache_t *syscall) {
- u32 key = 0;
- u32 *flags = bpf_map_lookup_elem(&mprotect_req_protection_approvers, &key);
- if (flags != NULL && (syscall->mprotect.req_protection & *flags) > 0) {
+ u32 flags = 0;
+
+ int exists = lookup_u32_flags(&mprotect_req_protection_approvers, &flags);
+ if (!exists) {
+ return 0;
+ }
+
+ if ((syscall->mprotect.req_protection & flags) > 0) {
monitor_event_approved(syscall->type, FLAG_APPROVER_TYPE);
return 1;
}
@@ -133,13 +160,13 @@ int __attribute__((always_inline)) mprotect_approvers(struct syscall_cache_t *sy
}
int __attribute__((always_inline)) approve_by_flags(struct syscall_cache_t *syscall) {
- u32 key = 0;
- u32 *flags_ptr = bpf_map_lookup_elem(&open_flags_approvers, &key);
- if (flags_ptr == NULL) {
+ u32 flags = 0;
+
+ int exists = lookup_u32_flags(&open_flags_approvers, &flags);
+ if (!exists) {
return 0;
}
- u32 flags = *flags_ptr;
if ((flags == 0 && syscall->open.flags == 0) || ((syscall->open.flags & flags) > 0)) {
monitor_event_approved(syscall->type, FLAG_APPROVER_TYPE);
@@ -171,9 +198,14 @@ int __attribute__((always_inline)) rmdir_approvers(struct syscall_cache_t *sysca
}
int __attribute__((always_inline)) approve_splice_by_entry_flags(struct syscall_cache_t *syscall) {
- u32 key = 0;
- u32 *flags = bpf_map_lookup_elem(&splice_entry_flags_approvers, &key);
- if (flags != NULL && (syscall->splice.pipe_entry_flag & *flags) > 0) {
+ u32 flags = 0;
+
+ int exists = lookup_u32_flags(&splice_entry_flags_approvers, &flags);
+ if (!exists) {
+ return 0;
+ }
+
+ if ((syscall->splice.pipe_entry_flag & flags) > 0) {
monitor_event_approved(syscall->type, FLAG_APPROVER_TYPE);
return 1;
}
@@ -181,9 +213,14 @@ int __attribute__((always_inline)) approve_splice_by_entry_flags(struct syscall_
}
int __attribute__((always_inline)) approve_splice_by_exit_flags(struct syscall_cache_t *syscall) {
- u32 key = 0;
- u32 *flags = bpf_map_lookup_elem(&splice_exit_flags_approvers, &key);
- if (flags != NULL && (syscall->splice.pipe_exit_flag & *flags) > 0) {
+ u32 flags = 0;
+
+ int exists = lookup_u32_flags(&splice_exit_flags_approvers, &flags);
+ if (!exists) {
+ return 0;
+ }
+
+ if ((syscall->splice.pipe_exit_flag & flags) > 0) {
monitor_event_approved(syscall->type, FLAG_APPROVER_TYPE);
return 1;
}
@@ -216,16 +253,18 @@ int __attribute__((always_inline)) utime_approvers(struct syscall_cache_t *sysca
}
int __attribute__((always_inline)) bpf_approvers(struct syscall_cache_t *syscall) {
- int pass_to_userspace = 0;
u32 key = 0;
+ struct u64_flags_filter_t *filter = bpf_map_lookup_elem(&bpf_cmd_approvers, &key);
+ if (filter == NULL || !filter->is_set) {
+ return 0;
+ }
- u64 *cmd_bitmask = bpf_map_lookup_elem(&bpf_cmd_approvers, &key);
- if (cmd_bitmask != NULL && ((1 << syscall->bpf.cmd) & *cmd_bitmask) > 0) {
+ if (((1 << syscall->bpf.cmd) & filter->flags) > 0) {
monitor_event_approved(syscall->type, FLAG_APPROVER_TYPE);
- pass_to_userspace = 1;
+ return 1;
}
- return pass_to_userspace;
+ return 0;
}
#endif
diff --git a/pkg/security/ebpf/c/include/hooks/network/router.h b/pkg/security/ebpf/c/include/hooks/network/router.h
index 11f42d748c195..81978692e5113 100644
--- a/pkg/security/ebpf/c/include/hooks/network/router.h
+++ b/pkg/security/ebpf/c/include/hooks/network/router.h
@@ -44,13 +44,17 @@ __attribute__((always_inline)) int route_pkt(struct __sk_buff *skb, struct packe
// TODO: l3 / l4 firewall
// route DNS requests
- if (pkt->l4_protocol == IPPROTO_UDP && pkt->translated_ns_flow.flow.dport == htons(53)) {
- tail_call_to_classifier(skb, DNS_REQUEST);
+ if (is_event_enabled(EVENT_DNS)) {
+ if (pkt->l4_protocol == IPPROTO_UDP && pkt->translated_ns_flow.flow.dport == htons(53)) {
+ tail_call_to_classifier(skb, DNS_REQUEST);
+ }
}
// route IMDS requests
- if (pkt->l4_protocol == IPPROTO_TCP && ((pkt->ns_flow.flow.saddr[0] & 0xFFFFFFFF) == get_imds_ip() || (pkt->ns_flow.flow.daddr[0] & 0xFFFFFFFF) == get_imds_ip())) {
- tail_call_to_classifier(skb, IMDS_REQUEST);
+ if (is_event_enabled(EVENT_IMDS)) {
+ if (pkt->l4_protocol == IPPROTO_TCP && ((pkt->ns_flow.flow.saddr[0] & 0xFFFFFFFF) == get_imds_ip() || (pkt->ns_flow.flow.daddr[0] & 0xFFFFFFFF) == get_imds_ip())) {
+ tail_call_to_classifier(skb, IMDS_REQUEST);
+ }
}
return ACT_OK;
diff --git a/pkg/security/ebpf/c/include/maps.h b/pkg/security/ebpf/c/include/maps.h
index d472b54d64627..91cca71ffe8ec 100644
--- a/pkg/security/ebpf/c/include/maps.h
+++ b/pkg/security/ebpf/c/include/maps.h
@@ -14,15 +14,15 @@ BPF_ARRAY_MAP(dr_erpc_buffer, char[DR_ERPC_BUFFER_LENGTH * 2], 1)
BPF_ARRAY_MAP(inode_disc_revisions, u32, REVISION_ARRAY_SIZE)
BPF_ARRAY_MAP(discarders_revision, u32, 1)
BPF_ARRAY_MAP(filter_policy, struct policy_t, EVENT_MAX)
-BPF_ARRAY_MAP(mmap_flags_approvers, u32, 1)
-BPF_ARRAY_MAP(mmap_protection_approvers, u32, 1)
-BPF_ARRAY_MAP(mprotect_vm_protection_approvers, u32, 1)
-BPF_ARRAY_MAP(mprotect_req_protection_approvers, u32, 1)
-BPF_ARRAY_MAP(open_flags_approvers, u32, 1)
+BPF_ARRAY_MAP(mmap_flags_approvers, struct u32_flags_filter_t, 1)
+BPF_ARRAY_MAP(mmap_protection_approvers, struct u32_flags_filter_t, 1)
+BPF_ARRAY_MAP(mprotect_vm_protection_approvers, struct u32_flags_filter_t, 1)
+BPF_ARRAY_MAP(mprotect_req_protection_approvers, struct u32_flags_filter_t, 1)
+BPF_ARRAY_MAP(open_flags_approvers, struct u32_flags_filter_t, 1)
BPF_ARRAY_MAP(selinux_enforce_status, u16, 2)
-BPF_ARRAY_MAP(splice_entry_flags_approvers, u32, 1)
-BPF_ARRAY_MAP(splice_exit_flags_approvers, u32, 1)
-BPF_ARRAY_MAP(bpf_cmd_approvers, u64, 1)
+BPF_ARRAY_MAP(splice_entry_flags_approvers, struct u32_flags_filter_t, 1)
+BPF_ARRAY_MAP(splice_exit_flags_approvers, struct u32_flags_filter_t, 1)
+BPF_ARRAY_MAP(bpf_cmd_approvers, struct u64_flags_filter_t, 1)
BPF_ARRAY_MAP(syscalls_stats_enabled, u32, 1)
BPF_ARRAY_MAP(syscall_ctx_gen_id, u32, 1)
BPF_ARRAY_MAP(syscall_ctx, char[MAX_SYSCALL_CTX_SIZE], MAX_SYSCALL_CTX_ENTRIES)
diff --git a/pkg/security/ebpf/c/include/structs/filter.h b/pkg/security/ebpf/c/include/structs/filter.h
index 724331aa84841..9d876d0a89ef9 100644
--- a/pkg/security/ebpf/c/include/structs/filter.h
+++ b/pkg/security/ebpf/c/include/structs/filter.h
@@ -24,6 +24,16 @@ struct basename_filter_t {
u64 event_mask;
};
+struct u32_flags_filter_t {
+ u32 flags;
+ u8 is_set;
+};
+
+struct u64_flags_filter_t {
+ u64 flags;
+ u8 is_set;
+};
+
// Discarders
struct discarder_stats_t {
diff --git a/pkg/security/ebpf/map.go b/pkg/security/ebpf/map.go
index 27973d376b00a..99e39dff3dbf5 100644
--- a/pkg/security/ebpf/map.go
+++ b/pkg/security/ebpf/map.go
@@ -86,6 +86,46 @@ func NewStringMapItem(str string, size int) *StringMapItem {
return &StringMapItem{str: str, size: size}
}
+// Uint32FlagsZeroMapItem value used to reset the map entry
+var Uint32FlagsZeroMapItem = make([]byte, 8)
+
+// Uint32FlagsMapItem describes an flags table key or value
+type Uint32FlagsMapItem uint32
+
+// MarshalBinary returns the binary representation of a Uint32FlagsMapItem
+func (i *Uint32FlagsMapItem) MarshalBinary() ([]byte, error) {
+ b := make([]byte, 8)
+ binary.NativeEndian.PutUint32(b, uint32(*i))
+ b[4] = 1
+ return b, nil
+}
+
+// NewUint32FlagsMapItem returns a new Uint32FlagsMapItem
+func NewUint32FlagsMapItem(i uint32) *Uint32FlagsMapItem {
+ item := Uint32FlagsMapItem(i)
+ return &item
+}
+
+// Uint64FlagsZeroMapItem value used to reset the map entry
+var Uint64FlagsZeroMapItem = make([]byte, 16)
+
+// Uint64FlagsMapItem describes an flags table key or value
+type Uint64FlagsMapItem uint64
+
+// MarshalBinary returns the binary representation of a Uint64FlagsMapItem
+func (i *Uint64FlagsMapItem) MarshalBinary() ([]byte, error) {
+ b := make([]byte, 16)
+ binary.NativeEndian.PutUint64(b, uint64(*i))
+ b[8] = 1
+ return b, nil
+}
+
+// NewUint64FlagsMapItem returns a new Uint64FlagsMapItem
+func NewUint64FlagsMapItem(i uint64) *Uint64FlagsMapItem {
+ item := Uint64FlagsMapItem(i)
+ return &item
+}
+
// Zero table items
var (
ZeroUint8MapItem = BytesMapItem([]byte{0})
diff --git a/pkg/security/generators/schemas/policy/main.go b/pkg/security/generators/schemas/policy/main.go
new file mode 100644
index 0000000000000..0dc6181eabead
--- /dev/null
+++ b/pkg/security/generators/schemas/policy/main.go
@@ -0,0 +1,70 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:generate go run github.com/DataDog/datadog-agent/pkg/security/generators/schemas/policy -output ../../../tests/schemas/policy.schema.json
+
+// Package main holds main related files
+package main
+
+import (
+ "encoding/json"
+ "flag"
+ "os"
+ "reflect"
+ "time"
+
+ "github.com/invopop/jsonschema"
+
+ "github.com/DataDog/datadog-agent/pkg/security/secl/rules"
+)
+
+func main() {
+ var output string
+ flag.StringVar(&output, "output", "", "output file")
+ flag.Parse()
+
+ if output == "" {
+ panic("an output file argument is required")
+ }
+
+ reflector := jsonschema.Reflector{
+ ExpandedStruct: true,
+ Mapper: func(t reflect.Type) *jsonschema.Schema {
+ switch t {
+ case reflect.TypeOf(time.Duration(0)):
+ return &jsonschema.Schema{
+ OneOf: []*jsonschema.Schema{
+ {
+ Type: "string",
+ Format: "duration",
+ Description: "Duration in Go format (e.g. 1h30m, see https://pkg.go.dev/time#ParseDuration)",
+ },
+ {
+ Type: "integer",
+ Description: "Duration in nanoseconds",
+ },
+ },
+ }
+ }
+ return nil
+ },
+ }
+
+ if err := reflector.AddGoComments("github.com/DataDog/datadog-agent/pkg/security/secl/rules/model.go", "../../../secl/rules"); err != nil {
+ panic(err)
+ }
+
+ schema := reflector.Reflect(&rules.PolicyDef{})
+ schema.ID = "https://github.com/DataDog/datadog-agent/tree/main/pkg/security/secl/rules"
+
+ data, err := json.MarshalIndent(schema, "", " ")
+ if err != nil {
+ panic(err)
+ }
+
+ if err := os.WriteFile(output, data, 0644); err != nil {
+ panic(err)
+ }
+}
diff --git a/pkg/security/metrics/metrics.go b/pkg/security/metrics/metrics.go
index 9213c0c4ff739..481f0d16bfaea 100644
--- a/pkg/security/metrics/metrics.go
+++ b/pkg/security/metrics/metrics.go
@@ -331,6 +331,21 @@ var (
// Tags: -
MetricRulesStatus = newRuntimeMetric(".rules_status")
+ // Enforcement metrics
+
+ // MetricEnforcementKillActionPerformed is the name of the metric used to report that a kill action was performed
+ // Tags: rule_id
+ MetricEnforcementKillActionPerformed = newRuntimeMetric(".enforcement.kill_action_performed")
+ // MetricEnforcementProcessKilled is the name of the metric used to report the number of processes killed
+ // Tags: rule_id
+ MetricEnforcementProcessKilled = newRuntimeMetric(".enforcement.process_killed")
+ // MetricEnforcementRuleDisarmed is the name of the metric used to report that a rule was disarmed
+ // Tags: rule_id, disarmer_type ('executable', 'container')
+ MetricEnforcementRuleDisarmed = newRuntimeMetric(".enforcement.rule_disarmed")
+ // MetricEnforcementRuleRearmed is the name of the metric used to report that a rule was rearmed
+ // Tags: rule_id
+ MetricEnforcementRuleRearmed = newRuntimeMetric(".enforcement.rule_rearmed")
+
// Others
// MetricSelfTest is the name of the metric used to report that a self test was performed
diff --git a/pkg/security/module/cws.go b/pkg/security/module/cws.go
index a5091b44ace95..4f2c298989e75 100644
--- a/pkg/security/module/cws.go
+++ b/pkg/security/module/cws.go
@@ -15,6 +15,7 @@ import (
"github.com/DataDog/datadog-go/v5/statsd"
+ workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/eventmonitor"
"github.com/DataDog/datadog-agent/pkg/security/config"
"github.com/DataDog/datadog-agent/pkg/security/events"
@@ -28,6 +29,7 @@ import (
"github.com/DataDog/datadog-agent/pkg/security/secl/rules"
"github.com/DataDog/datadog-agent/pkg/security/seclog"
"github.com/DataDog/datadog-agent/pkg/security/serializers"
+ "github.com/DataDog/datadog-agent/pkg/security/telemetry"
)
// CWSConsumer represents the system-probe module for the runtime security agent
@@ -49,17 +51,19 @@ type CWSConsumer struct {
ruleEngine *rulesmodule.RuleEngine
selfTester *selftests.SelfTester
reloader ReloaderInterface
+ crtelemetry *telemetry.ContainersRunningTelemetry
}
// NewCWSConsumer initializes the module with options
-func NewCWSConsumer(evm *eventmonitor.EventMonitor, cfg *config.RuntimeSecurityConfig, opts Opts) (*CWSConsumer, error) {
- ctx, cancelFnc := context.WithCancel(context.Background())
+func NewCWSConsumer(evm *eventmonitor.EventMonitor, cfg *config.RuntimeSecurityConfig, wmeta workloadmeta.Component, opts Opts) (*CWSConsumer, error) {
+ crtelemetry, err := telemetry.NewContainersRunningTelemetry(cfg, evm.StatsdClient, wmeta)
+ if err != nil {
+ return nil, err
+ }
- var (
- selfTester *selftests.SelfTester
- err error
- )
+ ctx, cancelFnc := context.WithCancel(context.Background())
+ var selfTester *selftests.SelfTester
if cfg.SelfTestEnabled {
selfTester, err = selftests.NewSelfTester(cfg, evm.Probe)
if err != nil {
@@ -82,6 +86,7 @@ func NewCWSConsumer(evm *eventmonitor.EventMonitor, cfg *config.RuntimeSecurityC
grpcServer: NewGRPCServer(family, address),
selfTester: selfTester,
reloader: NewReloader(),
+ crtelemetry: crtelemetry,
}
// set sender
@@ -151,6 +156,11 @@ func (c *CWSConsumer) Start() error {
c.wg.Add(1)
go c.statsSender()
+ if c.crtelemetry != nil {
+ // Send containers running telemetry
+ go c.crtelemetry.Run(c.ctx)
+ }
+
seclog.Infof("runtime security started")
// we can now wait for self test events
diff --git a/pkg/security/module/opts.go b/pkg/security/module/opts.go
index fd642eb438652..984f0c3872142 100644
--- a/pkg/security/module/opts.go
+++ b/pkg/security/module/opts.go
@@ -6,7 +6,9 @@
// Package module holds module related files
package module
-import "github.com/DataDog/datadog-agent/pkg/security/events"
+import (
+ "github.com/DataDog/datadog-agent/pkg/security/events"
+)
// Opts define module options
type Opts struct {
diff --git a/pkg/security/probe/constantfetch/btfhub/constants.json b/pkg/security/probe/constantfetch/btfhub/constants.json
index 6bd99c787014d..7bedf2df1b260 100644
--- a/pkg/security/probe/constantfetch/btfhub/constants.json
+++ b/pkg/security/probe/constantfetch/btfhub/constants.json
@@ -11855,6 +11855,13 @@
"uname_release": "4.14.350-266.564.amzn2.aarch64",
"cindex": 3
},
+ {
+ "distrib": "amzn",
+ "version": "2",
+ "arch": "arm64",
+ "uname_release": "4.14.352-267.564.amzn2.aarch64",
+ "cindex": 3
+ },
{
"distrib": "amzn",
"version": "2",
@@ -12653,6 +12660,13 @@
"uname_release": "4.14.350-266.564.amzn2.x86_64",
"cindex": 8
},
+ {
+ "distrib": "amzn",
+ "version": "2",
+ "arch": "x86_64",
+ "uname_release": "4.14.352-267.564.amzn2.x86_64",
+ "cindex": 8
+ },
{
"distrib": "amzn",
"version": "2",
@@ -20801,6 +20815,13 @@
"uname_release": "4.1.12-124.88.3.el7uek.x86_64",
"cindex": 94
},
+ {
+ "distrib": "ol",
+ "version": "7",
+ "arch": "x86_64",
+ "uname_release": "4.1.12-124.89.4.el7uek.x86_64",
+ "cindex": 94
+ },
{
"distrib": "ol",
"version": "7",
@@ -23657,6 +23678,13 @@
"uname_release": "4.14.35-2047.540.4.el7uek.x86_64",
"cindex": 96
},
+ {
+ "distrib": "ol",
+ "version": "7",
+ "arch": "x86_64",
+ "uname_release": "4.14.35-2047.541.1.el7uek.x86_64",
+ "cindex": 96
+ },
{
"distrib": "ol",
"version": "7",
diff --git a/pkg/security/probe/discarders_linux.go b/pkg/security/probe/discarders_linux.go
index 8adc92a9737a5..30b4333d1995b 100644
--- a/pkg/security/probe/discarders_linux.go
+++ b/pkg/security/probe/discarders_linux.go
@@ -399,13 +399,14 @@ func (id *inodeDiscarders) discardParentInode(req *erpc.Request, rs *rules.RuleS
parentKey := pathKey
for i := 0; i < discarderDepth; i++ {
- parentKey, err = id.dentryResolver.GetParent(parentKey)
+ key, err := id.dentryResolver.GetParent(parentKey)
if err != nil || dentry.IsFakeInode(pathKey.Inode) {
if i == 0 {
return false, 0, 0, err
}
break
}
+ parentKey = key
}
// do not insert multiple time the same discarder
diff --git a/pkg/security/probe/kfilters/approvers.go b/pkg/security/probe/kfilters/approvers.go
index 4da7b1cfb93a1..f902b9ccd0e83 100644
--- a/pkg/security/probe/kfilters/approvers.go
+++ b/pkg/security/probe/kfilters/approvers.go
@@ -44,7 +44,7 @@ func newBasenameKFilters(tableName string, eventType model.EventType, basenames
return approvers, nil
}
-func intValues[I int32 | int64](fvs rules.FilterValues) []I {
+func uintValues[I uint32 | uint64](fvs rules.FilterValues) []I {
var values []I
for _, v := range fvs {
values = append(values, I(v.Value.(int)))
@@ -52,35 +52,44 @@ func intValues[I int32 | int64](fvs rules.FilterValues) []I {
return values
}
-func newKFilterWithFlags[I int32 | int64](tableName string, flags ...I) (activeKFilter, error) {
- var flagsItem I
-
+func newKFilterWithUInt32Flags(tableName string, flags ...uint32) (activeKFilter, error) {
+ var bitmask uint32
for _, flag := range flags {
- flagsItem |= flag
+ bitmask |= flag
}
- if flagsItem != 0 {
- return &arrayEntry{
- tableName: tableName,
- index: uint32(0),
- value: flagsItem,
- zeroValue: I(0),
- }, nil
+ return &arrayEntry{
+ tableName: tableName,
+ index: uint32(0),
+ value: ebpf.NewUint32FlagsMapItem(bitmask),
+ zeroValue: ebpf.Uint32FlagsZeroMapItem,
+ }, nil
+}
+
+func newKFilterWithUInt64Flags(tableName string, flags ...uint64) (activeKFilter, error) {
+ var bitmask uint64
+ for _, flag := range flags {
+ bitmask |= flag
}
- return nil, nil
+ return &arrayEntry{
+ tableName: tableName,
+ index: uint32(0),
+ value: ebpf.NewUint64FlagsMapItem(bitmask),
+ zeroValue: ebpf.Uint64FlagsZeroMapItem,
+ }, nil
}
-func getFlagsKFilters(tableName string, flags ...int32) (activeKFilter, error) {
- return newKFilterWithFlags(tableName, flags...)
+func getFlagsKFilter(tableName string, flags ...uint32) (activeKFilter, error) {
+ return newKFilterWithUInt32Flags(tableName, flags...)
}
-func getEnumsKFilters(tableName string, enums ...int64) (activeKFilter, error) {
- var flags []int64
+func getEnumsKFilters(tableName string, enums ...uint64) (activeKFilter, error) {
+ var flags []uint64
for _, enum := range enums {
flags = append(flags, 1< 0 {
+ _ = statsd.Count(metrics.MetricEnforcementKillActionPerformed, stats.actionPerformed, ruleIDTag, 1)
+ stats.actionPerformed = 0
+ }
+
+ if stats.processesKilled > 0 {
+ _ = statsd.Count(metrics.MetricEnforcementProcessKilled, stats.processesKilled, ruleIDTag, 1)
+ stats.processesKilled = 0
+ }
+ }
+ p.perRuleStatsLock.Unlock()
+
+ p.ruleDisarmersLock.Lock()
+ for ruleID, disarmer := range p.ruleDisarmers {
+ ruleIDTag := []string{
+ "rule_id:" + string(ruleID),
+ }
+
+ disarmer.Lock()
+ for disarmerType, count := range disarmer.disarmedCount {
+ if count > 0 {
+ tags := append([]string{"disarmer_type:" + string(disarmerType)}, ruleIDTag...)
+ _ = statsd.Count(metrics.MetricEnforcementRuleDisarmed, count, tags, 1)
+ disarmer.disarmedCount[disarmerType] = 0
+ }
+ }
+ if disarmer.rearmedCount > 0 {
+ _ = statsd.Count(metrics.MetricEnforcementRuleRearmed, disarmer.rearmedCount, ruleIDTag, 1)
+ disarmer.rearmedCount = 0
+ }
+ disarmer.Unlock()
+ }
+ p.ruleDisarmersLock.Unlock()
+}
+
+// Start starts the go rountine responsible for flushing the disarmer caches
+func (p *ProcessKiller) Start(ctx context.Context, wg *sync.WaitGroup) {
+ if !p.cfg.RuntimeSecurity.EnforcementEnabled || (!p.cfg.RuntimeSecurity.EnforcementDisarmerContainerEnabled && !p.cfg.RuntimeSecurity.EnforcementDisarmerExecutableEnabled) {
+ return
+ }
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ ticker := time.NewTicker(disarmerCacheFlushInterval)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ticker.C:
+ p.ruleDisarmersLock.Lock()
+ for _, disarmer := range p.ruleDisarmers {
+ disarmer.Lock()
+ var cLength, eLength int
+ if disarmer.containerCache != nil {
+ cLength = disarmer.containerCache.flush()
+ }
+ if disarmer.executableCache != nil {
+ eLength = disarmer.executableCache.flush()
+ }
+ if disarmer.disarmed && cLength == 0 && eLength == 0 {
+ disarmer.disarmed = false
+ disarmer.rearmedCount++
+ seclog.Infof("kill action of rule `%s` has been re-armed", disarmer.ruleID)
+ }
+ disarmer.Unlock()
+ }
+ p.ruleDisarmersLock.Unlock()
+ case <-ctx.Done():
+ return
+ }
+ }
+ }()
+}
+
+type disarmerType string
+
+const (
+ containerDisarmer = disarmerType("container")
+ executableDisarmer = disarmerType("executable")
+)
+
+type killDisarmer struct {
+ sync.Mutex
+ disarmed bool
+ ruleID rules.RuleID
+ containerCache *disarmerCache[string, bool]
+ executableCache *disarmerCache[string, bool]
+ // stats
+ disarmedCount map[disarmerType]int64
+ rearmedCount int64
+}
+
+type disarmerCache[K comparable, V any] struct {
+ *ttlcache.Cache[K, V]
+ capacity uint64
+}
+
+func newDisarmerCache[K comparable, V any](capacity uint64, period time.Duration) *disarmerCache[K, V] {
+ cacheOpts := []ttlcache.Option[K, V]{
+ ttlcache.WithCapacity[K, V](capacity),
+ }
+
+ if period > 0 {
+ cacheOpts = append(cacheOpts, ttlcache.WithTTL[K, V](period))
+ }
+
+ return &disarmerCache[K, V]{
+ Cache: ttlcache.New[K, V](cacheOpts...),
+ capacity: capacity,
+ }
+}
+
+func (c *disarmerCache[K, V]) flush() int {
+ c.DeleteExpired()
+ return c.Len()
+}
+
+func newKillDisarmer(cfg *config.RuntimeSecurityConfig, ruleID rules.RuleID) *killDisarmer {
+ kd := &killDisarmer{
+ disarmed: false,
+ ruleID: ruleID,
+ disarmedCount: make(map[disarmerType]int64),
+ }
+
+ if cfg.EnforcementDisarmerContainerEnabled {
+ kd.containerCache = newDisarmerCache[string, bool](uint64(cfg.EnforcementDisarmerContainerMaxAllowed), cfg.EnforcementDisarmerContainerPeriod)
+ }
+
+ if cfg.EnforcementDisarmerExecutableEnabled {
+ kd.executableCache = newDisarmerCache[string, bool](uint64(cfg.EnforcementDisarmerExecutableMaxAllowed), cfg.EnforcementDisarmerExecutablePeriod)
+ }
+
+ return kd
+}
+
+func (kd *killDisarmer) allow(cache *disarmerCache[string, bool], typ disarmerType, key string, onDisarm func()) bool {
+ kd.Lock()
+ defer kd.Unlock()
+
+ if cache == nil {
+ return true
+ }
+
+ cache.DeleteExpired()
+ // if the key is not in the cache, check if the new key causes the number of keys to exceed the capacity
+ // otherwise, the key is already in the cache and cache.Get will update its TTL
+ if cache.Get(key) == nil {
+ alreadyAtCapacity := uint64(cache.Len()) >= cache.capacity
+ cache.Set(key, true, ttlcache.DefaultTTL)
+ if alreadyAtCapacity && !kd.disarmed {
+ kd.disarmed = true
+ kd.disarmedCount[typ]++
+ onDisarm()
+ }
+ }
+
+ return !kd.disarmed
+}
diff --git a/pkg/security/rconfig/policies.go b/pkg/security/rconfig/policies.go
index c9bed099acaff..3d8576481e07a 100644
--- a/pkg/security/rconfig/policies.go
+++ b/pkg/security/rconfig/policies.go
@@ -41,12 +41,13 @@ type RCPolicyProvider struct {
lastCustoms map[string]state.RawConfig
debouncer *debouncer.Debouncer
dumpPolicies bool
+ setEnforcementCb func(bool)
}
var _ rules.PolicyProvider = (*RCPolicyProvider)(nil)
// NewRCPolicyProvider returns a new Remote Config based policy provider
-func NewRCPolicyProvider(dumpPolicies bool) (*RCPolicyProvider, error) {
+func NewRCPolicyProvider(dumpPolicies bool, setEnforcementCallback func(bool)) (*RCPolicyProvider, error) {
agentVersion, err := utils.GetAgentSemverVersion()
if err != nil {
return nil, fmt.Errorf("failed to parse agent version: %w", err)
@@ -68,8 +69,9 @@ func NewRCPolicyProvider(dumpPolicies bool) (*RCPolicyProvider, error) {
}
r := &RCPolicyProvider{
- client: c,
- dumpPolicies: dumpPolicies,
+ client: c,
+ dumpPolicies: dumpPolicies,
+ setEnforcementCb: setEnforcementCallback,
}
r.debouncer = debouncer.New(debounceDelay, r.onNewPoliciesReady)
@@ -82,12 +84,18 @@ func (r *RCPolicyProvider) Start() {
r.debouncer.Start()
- r.client.Subscribe(state.ProductCWSDD, r.rcDefaultsUpdateCallback)
- r.client.Subscribe(state.ProductCWSCustom, r.rcCustomsUpdateCallback)
+ r.client.SubscribeAll(state.ProductCWSDD, client.NewListener(r.rcDefaultsUpdateCallback, r.rcStateChanged))
+ r.client.SubscribeAll(state.ProductCWSCustom, client.NewListener(r.rcCustomsUpdateCallback, r.rcStateChanged))
r.client.Start()
}
+func (r *RCPolicyProvider) rcStateChanged(state bool) {
+ if r.setEnforcementCb != nil {
+ r.setEnforcementCb(state)
+ }
+}
+
func (r *RCPolicyProvider) rcDefaultsUpdateCallback(configs map[string]state.RawConfig, _ func(string, state.ApplyStatus)) {
r.Lock()
if len(r.lastDefaults) == 0 && len(configs) == 0 {
@@ -191,6 +199,10 @@ func (r *RCPolicyProvider) onNewPoliciesReady() {
defer r.RUnlock()
if r.onNewPoliciesReadyCb != nil {
+ if r.setEnforcementCb != nil {
+ r.setEnforcementCb(true)
+ }
+
r.onNewPoliciesReadyCb()
}
}
diff --git a/pkg/security/resolvers/hash/resolver_linux.go b/pkg/security/resolvers/hash/resolver_linux.go
index 48b0d30fa3acb..d4ab18efb6f8d 100644
--- a/pkg/security/resolvers/hash/resolver_linux.go
+++ b/pkg/security/resolvers/hash/resolver_linux.go
@@ -18,7 +18,6 @@ import (
"io/fs"
"os"
"slices"
- "syscall"
"github.com/DataDog/datadog-go/v5/statsd"
"github.com/glaslos/ssdeep"
@@ -215,18 +214,17 @@ type fileUniqKey struct {
}
func getFileInfo(path string) (fs.FileMode, int64, fileUniqKey, error) {
- fileInfo, err := os.Stat(path)
+ stat, err := utils.UnixStat(path)
if err != nil {
return 0, 0, fileUniqKey{}, err
}
- stat := fileInfo.Sys().(*syscall.Stat_t)
fkey := fileUniqKey{
dev: stat.Dev,
inode: stat.Ino,
}
- return fileInfo.Mode(), fileInfo.Size(), fkey, nil
+ return utils.UnixStatModeToGoFileMode(stat.Mode), stat.Size, fkey, nil
}
// hash hashes the provided file event
diff --git a/pkg/security/rules/autosuppression/autosuppression.go b/pkg/security/rules/autosuppression/autosuppression.go
index 168eb23087be6..56f9d0b28ce34 100644
--- a/pkg/security/rules/autosuppression/autosuppression.go
+++ b/pkg/security/rules/autosuppression/autosuppression.go
@@ -38,7 +38,9 @@ const (
// Opts holds options for auto suppression
type Opts struct {
+ SecurityProfileEnabled bool
SecurityProfileAutoSuppressionEnabled bool
+ ActivityDumpEnabled bool
ActivityDumpAutoSuppressionEnabled bool
EventTypes []model.EventType
}
@@ -68,7 +70,7 @@ func (as *AutoSuppression) Init(opts Opts) {
// Suppresses returns true if the event should be suppressed for the given rule, false otherwise. It also counts statistics depending on this result
func (as *AutoSuppression) Suppresses(rule *rules.Rule, event *model.Event) bool {
if isAllowAutosuppressionRule(rule) && event.ContainerContext.ContainerID != "" && slices.Contains(as.opts.EventTypes, event.GetEventType()) {
- if as.opts.ActivityDumpAutoSuppressionEnabled {
+ if as.opts.ActivityDumpEnabled && as.opts.ActivityDumpAutoSuppressionEnabled {
if event.HasActiveActivityDump() {
as.count(rule.ID, activityDumpSuppressionType)
return true
@@ -77,7 +79,7 @@ func (as *AutoSuppression) Suppresses(rule *rules.Rule, event *model.Event) bool
return true
}
}
- if as.opts.SecurityProfileAutoSuppressionEnabled {
+ if as.opts.SecurityProfileEnabled && as.opts.SecurityProfileAutoSuppressionEnabled {
if event.IsInProfile() {
as.count(rule.ID, securityProfileSuppressionType)
return true
diff --git a/pkg/security/rules/engine.go b/pkg/security/rules/engine.go
index 67e48fa8845b2..2c24d48d230a8 100644
--- a/pkg/security/rules/engine.go
+++ b/pkg/security/rules/engine.go
@@ -88,7 +88,9 @@ func NewRuleEngine(evm *eventmonitor.EventMonitor, config *config.RuntimeSecurit
}
engine.AutoSuppression.Init(autosuppression.Opts{
+ SecurityProfileEnabled: config.SecurityProfileEnabled,
SecurityProfileAutoSuppressionEnabled: config.SecurityProfileAutoSuppressionEnabled,
+ ActivityDumpEnabled: config.ActivityDumpEnabled,
ActivityDumpAutoSuppressionEnabled: config.ActivityDumpAutoSuppressionEnabled,
EventTypes: config.SecurityProfileAutoSuppressionEventTypes,
})
@@ -360,7 +362,7 @@ func (e *RuleEngine) gatherDefaultPolicyProviders() []rules.PolicyProvider {
// add remote config as config provider if enabled.
if e.config.RemoteConfigurationEnabled {
- rcPolicyProvider, err := rconfig.NewRCPolicyProvider(e.config.RemoteConfigurationDumpPolicies)
+ rcPolicyProvider, err := rconfig.NewRCPolicyProvider(e.config.RemoteConfigurationDumpPolicies, e.rcStateCallback)
if err != nil {
seclog.Errorf("will be unable to load remote policies: %s", err)
} else {
@@ -378,6 +380,15 @@ func (e *RuleEngine) gatherDefaultPolicyProviders() []rules.PolicyProvider {
return policyProviders
}
+func (e *RuleEngine) rcStateCallback(state bool) {
+ if state {
+ seclog.Infof("Connection to remote config established")
+ } else {
+ seclog.Infof("Connection to remote config lost")
+ }
+ e.probe.EnableEnforcement(state)
+}
+
// EventDiscarderFound is called by the ruleset when a new discarder discovered
func (e *RuleEngine) EventDiscarderFound(rs *rules.RuleSet, event eval.Event, field eval.Field, eventType eval.EventType) {
if e.reloading.Load() {
diff --git a/pkg/security/secl/compiler/eval/variables.go b/pkg/security/secl/compiler/eval/variables.go
index 37802d9ed2649..3d0d865054276 100644
--- a/pkg/security/secl/compiler/eval/variables.go
+++ b/pkg/security/secl/compiler/eval/variables.go
@@ -13,7 +13,7 @@ import (
"regexp"
"time"
- "github.com/hashicorp/golang-lru/v2/expirable"
+ "github.com/jellydator/ttlcache/v3"
)
var (
@@ -304,7 +304,7 @@ func NewMutableStringVariable() *MutableStringVariable {
// MutableStringArrayVariable describes a mutable string array variable
type MutableStringArrayVariable struct {
- *expirable.LRU[string, bool]
+ LRU *ttlcache.Cache[string, bool]
}
// Set the variable with the specified value
@@ -314,7 +314,7 @@ func (m *MutableStringArrayVariable) Set(_ *Context, values interface{}) error {
}
for _, v := range values.([]string) {
- m.LRU.Add(v, true)
+ m.LRU.Set(v, true, ttlcache.DefaultTTL)
}
return nil
}
@@ -323,10 +323,10 @@ func (m *MutableStringArrayVariable) Set(_ *Context, values interface{}) error {
func (m *MutableStringArrayVariable) Append(_ *Context, value interface{}) error {
switch value := value.(type) {
case string:
- m.LRU.Add(value, true)
+ m.LRU.Set(value, true, ttlcache.DefaultTTL)
case []string:
for _, v := range value {
- m.LRU.Add(v, true)
+ m.LRU.Set(v, true, ttlcache.DefaultTTL)
}
default:
return errAppendNotSupported
@@ -349,7 +349,9 @@ func NewMutableStringArrayVariable(size int, ttl time.Duration) *MutableStringAr
size = defaultMaxVariables
}
- lru := expirable.NewLRU[string, bool](size, nil, ttl)
+ lru := ttlcache.New(ttlcache.WithCapacity[string, bool](uint64(size)), ttlcache.WithTTL[string, bool](ttl))
+ go lru.Start()
+
return &MutableStringArrayVariable{
LRU: lru,
}
@@ -433,7 +435,7 @@ func (v *GlobalVariables) GetVariable(_ string, value interface{}, opts Variable
// Variables holds a set of variables
type Variables struct {
- lru *expirable.LRU[string, interface{}]
+ lru *ttlcache.Cache[string, interface{}]
ttl time.Duration
}
@@ -444,36 +446,46 @@ func NewVariables() *Variables {
// GetBool returns the boolean value of the specified variable
func (v *Variables) GetBool(name string) bool {
- value, _ := v.lru.Get(name)
- bval, _ := value.(bool)
+ var bval bool
+ if item := v.lru.Get(name); item != nil {
+ bval, _ = item.Value().(bool)
+ }
return bval
}
// GetInt returns the integer value of the specified variable
func (v *Variables) GetInt(name string) int {
- value, _ := v.lru.Get(name)
- ival, _ := value.(int)
+ var ival int
+ if item := v.lru.Get(name); item != nil {
+ ival, _ = item.Value().(int)
+ }
return ival
}
// GetString returns the string value of the specified variable
func (v *Variables) GetString(name string) string {
- value, _ := v.lru.Get(name)
- sval, _ := value.(string)
+ var sval string
+ if item := v.lru.Get(name); item != nil {
+ sval, _ = item.Value().(string)
+ }
return sval
}
// GetStringArray returns the string array value of the specified variable
func (v *Variables) GetStringArray(name string) []string {
- value, _ := v.lru.Get(name)
- slval, _ := value.([]string)
+ var slval []string
+ if item := v.lru.Get(name); item != nil {
+ slval, _ = item.Value().([]string)
+ }
return slval
}
// GetIntArray returns the integer array value of the specified variable
func (v *Variables) GetIntArray(name string) []int {
- value, _ := v.lru.Get(name)
- ilval, _ := value.([]int)
+ var ilval []int
+ if item := v.lru.Get(name); item != nil {
+ ilval, _ = item.Value().([]int)
+ }
return ilval
}
@@ -483,12 +495,13 @@ const defaultMaxVariables = 100
func (v *Variables) Set(name string, value interface{}) bool {
existed := false
if v.lru == nil {
- v.lru = expirable.NewLRU[string, interface{}](defaultMaxVariables, nil, v.ttl)
+ v.lru = ttlcache.New(ttlcache.WithCapacity[string, interface{}](uint64(defaultMaxVariables)), ttlcache.WithTTL[string, interface{}](v.ttl))
+ go v.lru.Start()
} else {
- _, existed = v.lru.Get(name)
+ existed = v.lru.Get(name) != nil
}
- v.lru.Add(name, value)
+ v.lru.Set(name, value, ttlcache.DefaultTTL)
return !existed
}
diff --git a/pkg/security/secl/go.mod b/pkg/security/secl/go.mod
index b4da8135a20ad..a46de50a3a0b7 100644
--- a/pkg/security/secl/go.mod
+++ b/pkg/security/secl/go.mod
@@ -12,15 +12,18 @@ require (
github.com/google/go-cmp v0.6.0
github.com/hashicorp/go-multierror v1.1.1
github.com/hashicorp/golang-lru/v2 v2.0.7
+ github.com/jellydator/ttlcache/v3 v3.3.0
github.com/skydive-project/go-debouncer v1.0.0
github.com/spf13/cast v1.7.0
github.com/stretchr/testify v1.9.0
- golang.org/x/sys v0.24.0
- golang.org/x/text v0.17.0
- golang.org/x/tools v0.24.0
+ github.com/xeipuuv/gojsonschema v1.2.0
+ golang.org/x/sys v0.25.0
+ golang.org/x/text v0.18.0
+ golang.org/x/tools v0.25.0
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
modernc.org/mathutil v1.6.0
+ sigs.k8s.io/yaml v1.4.0
)
require (
@@ -34,8 +37,10 @@ require (
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/shopspring/decimal v1.4.0 // indirect
+ github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
+ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
golang.org/x/crypto v0.26.0 // indirect
- golang.org/x/mod v0.20.0 // indirect
+ golang.org/x/mod v0.21.0 // indirect
golang.org/x/sync v0.8.0 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
)
diff --git a/pkg/security/secl/go.sum b/pkg/security/secl/go.sum
index b0fa6b12c6903..d5c938d6789d2 100644
--- a/pkg/security/secl/go.sum
+++ b/pkg/security/secl/go.sum
@@ -18,6 +18,7 @@ github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHk
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
@@ -30,6 +31,8 @@ github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI=
github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
+github.com/jellydator/ttlcache/v3 v3.3.0 h1:BdoC9cE81qXfrxeb9eoJi9dWrdhSuwXMAnHTbnBm4Wc=
+github.com/jellydator/ttlcache/v3 v3.3.0/go.mod h1:bj2/e0l4jRnQdrnSTaGTsh4GSXvMjQcy41i7th0GVGw=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
@@ -54,21 +57,30 @@ github.com/skydive-project/go-debouncer v1.0.0/go.mod h1:7pK+5HBlYCD8W2cXhvMRsMs
github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w=
github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=
+github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
+github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
+github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
-golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0=
-golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
+golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
-golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg=
-golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
-golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
-golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24=
-golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
+golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
+golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
+golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
+golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE=
+golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
@@ -79,3 +91,5 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4=
modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo=
+sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
+sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
diff --git a/pkg/security/secl/rules/model.go b/pkg/security/secl/rules/model.go
index d347ec32a895f..30783522b272c 100644
--- a/pkg/security/secl/rules/model.go
+++ b/pkg/security/secl/rules/model.go
@@ -41,18 +41,18 @@ const (
// OverrideOptions defines combine options
type OverrideOptions struct {
- Fields []OverrideField `yaml:"fields"`
+ Fields []OverrideField `yaml:"fields" json:"fields" jsonschema:"enum=all,enum=expression,enum=actions,enum=every,enum=tags"`
}
// MacroDefinition holds the definition of a macro
type MacroDefinition struct {
- ID MacroID `yaml:"id"`
- Expression string `yaml:"expression"`
- Description string `yaml:"description"`
- AgentVersionConstraint string `yaml:"agent_version"`
- Filters []string `yaml:"filters"`
- Values []string `yaml:"values"`
- Combine CombinePolicy `yaml:"combine"`
+ ID MacroID `yaml:"id" json:"id"`
+ Expression string `yaml:"expression" json:"expression,omitempty" jsonschema:"oneof_required=MacroWithExpression"`
+ Description string `yaml:"description" json:"description,omitempty"`
+ AgentVersionConstraint string `yaml:"agent_version" json:"agent_version,omitempty"`
+ Filters []string `yaml:"filters" json:"filters,omitempty"`
+ Values []string `yaml:"values" json:"values,omitempty" jsonschema:"oneof_required=MacroWithValues"`
+ Combine CombinePolicy `yaml:"combine" json:"combine,omitempty" jsonschema:"enum=merge,enum=override"`
}
// RuleID represents the ID of a rule
@@ -60,20 +60,20 @@ type RuleID = string
// RuleDefinition holds the definition of a rule
type RuleDefinition struct {
- ID RuleID `yaml:"id"`
- Version string `yaml:"version"`
- Expression string `yaml:"expression"`
- Description string `yaml:"description"`
- Tags map[string]string `yaml:"tags"`
- AgentVersionConstraint string `yaml:"agent_version"`
- Filters []string `yaml:"filters"`
- Disabled bool `yaml:"disabled"`
- Combine CombinePolicy `yaml:"combine"`
- OverrideOptions OverrideOptions `yaml:"override_options"`
- Actions []*ActionDefinition `yaml:"actions"`
- Every time.Duration `yaml:"every"`
- Silent bool `yaml:"silent"`
- GroupID string `yaml:"group_id"`
+ ID RuleID `yaml:"id" json:"id"`
+ Version string `yaml:"version" json:"version,omitempty"`
+ Expression string `yaml:"expression" json:"expression,omitempty"`
+ Description string `yaml:"description" json:"description,omitempty"`
+ Tags map[string]string `yaml:"tags" json:"tags,omitempty"`
+ AgentVersionConstraint string `yaml:"agent_version" json:"agent_version,omitempty"`
+ Filters []string `yaml:"filters" json:"filters,omitempty"`
+ Disabled bool `yaml:"disabled" json:"disabled,omitempty"`
+ Combine CombinePolicy `yaml:"combine" json:"combine,omitempty" jsonschema:"enum=override"`
+ OverrideOptions OverrideOptions `yaml:"override_options" json:"override_options,omitempty"`
+ Actions []*ActionDefinition `yaml:"actions" json:"actions,omitempty"`
+ Every time.Duration `yaml:"every" json:"every,omitempty"`
+ Silent bool `yaml:"silent" json:"silent,omitempty"`
+ GroupID string `yaml:"group_id" json:"group_id,omitempty"`
}
// GetTag returns the tag value associated with a tag key
@@ -95,11 +95,11 @@ const (
// ActionDefinition describes a rule action section
type ActionDefinition struct {
- Filter *string `yaml:"filter"`
- Set *SetDefinition `yaml:"set"`
- Kill *KillDefinition `yaml:"kill"`
- CoreDump *CoreDumpDefinition `yaml:"coredump"`
- Hash *HashDefinition `yaml:"hash"`
+ Filter *string `yaml:"filter" json:"filter,omitempty"`
+ Set *SetDefinition `yaml:"set" json:"set,omitempty" jsonschema:"oneof_required=SetAction"`
+ Kill *KillDefinition `yaml:"kill" json:"kill,omitempty" jsonschema:"oneof_required=KillAction"`
+ CoreDump *CoreDumpDefinition `yaml:"coredump" json:"coredump,omitempty" jsonschema:"oneof_required=CoreDumpAction"`
+ Hash *HashDefinition `yaml:"hash" json:"hash,omitempty" jsonschema:"oneof_required=HashAction"`
}
// Scope describes the scope variables
@@ -107,27 +107,27 @@ type Scope string
// SetDefinition describes the 'set' section of a rule action
type SetDefinition struct {
- Name string `yaml:"name"`
- Value interface{} `yaml:"value"`
- Field string `yaml:"field"`
- Append bool `yaml:"append"`
- Scope Scope `yaml:"scope"`
- Size int `yaml:"size"`
- TTL time.Duration `yaml:"ttl"`
+ Name string `yaml:"name" json:"name"`
+ Value interface{} `yaml:"value" json:"value,omitempty" jsonschema:"oneof_required=SetWithValue"`
+ Field string `yaml:"field" json:"field,omitempty" jsonschema:"oneof_required=SetWithField"`
+ Append bool `yaml:"append" json:"append,omitempty"`
+ Scope Scope `yaml:"scope" json:"scope,omitempty" jsonschema:"enum=process,enum=container"`
+ Size int `yaml:"size" json:"size,omitempty"`
+ TTL time.Duration `yaml:"ttl" json:"ttl,omitempty"`
}
// KillDefinition describes the 'kill' section of a rule action
type KillDefinition struct {
- Signal string `yaml:"signal"`
- Scope string `yaml:"scope"`
+ Signal string `yaml:"signal" json:"signal" jsonschema:"description=A valid signal name,example=SIGKILL,example=SIGTERM"`
+ Scope string `yaml:"scope" json:"scope,omitempty" jsonschema:"enum=process,enum=container"`
}
// CoreDumpDefinition describes the 'coredump' action
type CoreDumpDefinition struct {
- Process bool `yaml:"process"`
- Mount bool `yaml:"mount"`
- Dentry bool `yaml:"dentry"`
- NoCompression bool `yaml:"no_compression"`
+ Process bool `yaml:"process" json:"process,omitempty" jsonschema:"anyof_required=CoreDumpWithProcess"`
+ Mount bool `yaml:"mount" json:"mount,omitempty" jsonschema:"anyof_required=CoreDumpWithMount"`
+ Dentry bool `yaml:"dentry" json:"dentry,omitempty" jsonschema:"anyof_required=CoreDumpWithDentry"`
+ NoCompression bool `yaml:"no_compression" json:"no_compression,omitempty"`
}
// HashDefinition describes the 'hash' section of a rule action
@@ -135,21 +135,21 @@ type HashDefinition struct{}
// OnDemandHookPoint represents a hook point definition
type OnDemandHookPoint struct {
- Name string `yaml:"name"`
- IsSyscall bool `yaml:"syscall"`
- Args []HookPointArg `yaml:"args"`
+ Name string `yaml:"name" json:"name"`
+ IsSyscall bool `yaml:"syscall" json:"syscall,omitempty"`
+ Args []HookPointArg `yaml:"args" json:"args,omitempty"`
}
// HookPointArg represents the definition of a hook point argument
type HookPointArg struct {
- N int `yaml:"n"`
- Kind string `yaml:"kind"`
+ N int `yaml:"n" json:"n" jsonschema:"description=Zero-based argument index"`
+ Kind string `yaml:"kind" json:"kind" jsonschema:"enum=uint,enum=null-terminated-string"`
}
// PolicyDef represents a policy file definition
type PolicyDef struct {
- Version string `yaml:"version"`
- Macros []*MacroDefinition `yaml:"macros"`
- Rules []*RuleDefinition `yaml:"rules"`
- OnDemandHookPoints []OnDemandHookPoint `yaml:"hooks"`
+ Version string `yaml:"version" json:"version"`
+ Macros []*MacroDefinition `yaml:"macros" json:"macros,omitempty"`
+ Rules []*RuleDefinition `yaml:"rules" json:"rules"`
+ OnDemandHookPoints []OnDemandHookPoint `yaml:"hooks" json:"hooks,omitempty"`
}
diff --git a/pkg/security/secl/rules/policy_test.go b/pkg/security/secl/rules/policy_test.go
index 8b7e44ebb734b..286b0fc1036da 100644
--- a/pkg/security/secl/rules/policy_test.go
+++ b/pkg/security/secl/rules/policy_test.go
@@ -10,6 +10,7 @@ package rules
import (
"fmt"
+ "net/http"
"os"
"path/filepath"
"strings"
@@ -18,11 +19,14 @@ import (
"time"
"github.com/google/go-cmp/cmp"
+ "github.com/xeipuuv/gojsonschema"
"github.com/Masterminds/semver/v3"
"github.com/hashicorp/go-multierror"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
+ yamlk8s "sigs.k8s.io/yaml"
"github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval"
"github.com/DataDog/datadog-agent/pkg/security/secl/model"
@@ -342,9 +346,9 @@ func TestActionSetVariableTTL(t *testing.T) {
assert.NotNil(t, stringArrayVar)
assert.True(t, ok)
- assert.True(t, stringArrayVar.Contains("foo"))
+ assert.True(t, stringArrayVar.LRU.Has("foo"))
time.Sleep(time.Second + 100*time.Millisecond)
- assert.False(t, stringArrayVar.Contains("foo"))
+ assert.False(t, stringArrayVar.LRU.Has("foo"))
}
func TestActionSetVariableConflict(t *testing.T) {
@@ -928,3 +932,187 @@ broken
})
}
}
+
+// go test -v github.com/DataDog/datadog-agent/pkg/security/secl/rules --run="TestPolicySchema"
+func TestPolicySchema(t *testing.T) {
+ tests := []struct {
+ name string
+ policy string
+ schemaResultCb func(*testing.T, *gojsonschema.Result)
+ }{
+ {
+ name: "valid",
+ policy: policyValid,
+ schemaResultCb: func(t *testing.T, result *gojsonschema.Result) {
+ if !assert.True(t, result.Valid(), "schema validation failed") {
+ for _, err := range result.Errors() {
+ t.Errorf("%s", err)
+ }
+ }
+ },
+ },
+ {
+ name: "missing required rule ID",
+ policy: policyWithMissingRequiredRuleID,
+ schemaResultCb: func(t *testing.T, result *gojsonschema.Result) {
+ require.False(t, result.Valid(), "schema validation should fail")
+ require.Len(t, result.Errors(), 1)
+ assert.Contains(t, result.Errors()[0].String(), "id is required")
+ },
+ },
+ {
+ name: "unknown field",
+ policy: policyWithUnknownField,
+ schemaResultCb: func(t *testing.T, result *gojsonschema.Result) {
+ require.False(t, result.Valid(), "schema validation should fail")
+ require.Len(t, result.Errors(), 1)
+ assert.Contains(t, result.Errors()[0].String(), "Additional property unknown_field is not allowed")
+ },
+ },
+ {
+ name: "invalid field type",
+ policy: policyWithInvalidFieldType,
+ schemaResultCb: func(t *testing.T, result *gojsonschema.Result) {
+ require.False(t, result.Valid(), "schema validation should fail")
+ require.Len(t, result.Errors(), 1)
+ assert.Contains(t, result.Errors()[0].String(), "Invalid type")
+
+ },
+ },
+ {
+ name: "multiple actions",
+ policy: policyWithMultipleActions,
+ schemaResultCb: func(t *testing.T, result *gojsonschema.Result) {
+ require.False(t, result.Valid(), "schema validation should fail")
+ require.Len(t, result.Errors(), 1)
+ assert.Contains(t, result.Errors()[0].String(), "Must validate one and only one schema")
+ },
+ },
+ }
+
+ fs := os.DirFS("../../../../pkg/security/tests/schemas")
+ schemaLoader := gojsonschema.NewReferenceLoaderFileSystem("file:///policy.schema.json", http.FS(fs))
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ json, err := yamlk8s.YAMLToJSON([]byte(test.policy))
+ require.NoErrorf(t, err, "failed to convert yaml to json: %v", err)
+ documentLoader := gojsonschema.NewBytesLoader(json)
+ result, err := gojsonschema.Validate(schemaLoader, documentLoader)
+ require.NoErrorf(t, err, "failed to validate schema: %v", err)
+ test.schemaResultCb(t, result)
+ })
+ }
+}
+
+const policyValid = `
+version: 1.2.3
+rules:
+ - id: basic
+ expression: exec.file.name == "foo"
+ - id: with_tags
+ description: Rule with tags
+ expression: exec.file.name == "foo"
+ tags:
+ tagA: a
+ tagB: b
+ - id: disabled
+ description: Disabled rule
+ expression: exec.file.name == "foo"
+ disabled: true
+ - id: with_tags
+ description: Rule with combine
+ expression: exec.file.name == "bar"
+ combine: override
+ override_options:
+ fields:
+ - expression
+ - id: with_filters
+ description: Rule with a filter and agent_version field
+ expression: exec.file.name == "foo"
+ agent_version: ">= 7.38"
+ filters:
+ - os == "linux"
+ - id: with_every_silent_group_id
+ description: Rule with a silent/every/group_id field
+ expression: exec.file.name == "foo"
+ silent: true
+ every: 10s
+ group_id: "baz_group"
+ - id: with_set_action_with_field
+ description: Rule with a set action using an event field
+ expression: exec.file.name == "foo"
+ actions:
+ - set:
+ name: process_names
+ field: process.file.name
+ append: true
+ size: 10
+ ttl: 10s
+ - id: with_set_action_with_value
+ description: Rule with a set action using a value
+ expression: exec.file.name == "foo"
+ actions:
+ - set:
+ name: global_var_set
+ value: true
+ - id: with_set_action_use
+ description: Rule using a variable set by a previous action
+ expression: open.file.path == "/tmp/bar" && ${global_var_set}
+ - id: with_kill_action
+ description: Rule with a kill action
+ expression: exec.file.name == "foo"
+ actions:
+ - kill:
+ signal: SIGKILL
+ scope: process
+ - id: with_coredump_action
+ description: Rule with a coredump action
+ expression: exec.file.name == "foo"
+ actions:
+ - coredump:
+ process: true
+ dentry: true
+ mount: true
+ no_compression: true
+ - id: with_hash_action
+ description: Rule with a hash action
+ expression: exec.file.name == "foo"
+ actions:
+ - hash: {}
+`
+const policyWithMissingRequiredRuleID = `
+version: 1.2.3
+rules:
+ - description: Rule with missing ID
+ expression: exec.file.name == "foo"
+`
+
+const policyWithUnknownField = `
+version: 1.2.3
+rules:
+ - id: rule with unknown field
+ expression: exec.file.name == "foo"
+ unknown_field: "bar"
+`
+
+const policyWithInvalidFieldType = `
+version: 1.2.3
+rules:
+ - id: 2
+ expression: exec.file.name == "foo"
+`
+
+const policyWithMultipleActions = `
+version: 1.2.3
+rules:
+ - id: rule with missing action
+ expression: exec.file.name == "foo"
+ actions:
+ - set:
+ name: global_var_set
+ value: true
+ kill:
+ signal: SIGKILL
+ scope: process
+`
diff --git a/pkg/security/seclwin/go.mod b/pkg/security/seclwin/go.mod
index 87d480ed5940e..dc91e07840f51 100644
--- a/pkg/security/seclwin/go.mod
+++ b/pkg/security/seclwin/go.mod
@@ -11,6 +11,7 @@ require (
require (
github.com/alecthomas/participle v0.7.1 // indirect
- github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
+ github.com/jellydator/ttlcache/v3 v3.3.0 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
+ golang.org/x/sync v0.8.0 // indirect
)
diff --git a/pkg/security/seclwin/go.sum b/pkg/security/seclwin/go.sum
index c8f0a2d739e33..39492baacd3ec 100644
--- a/pkg/security/seclwin/go.sum
+++ b/pkg/security/seclwin/go.sum
@@ -4,8 +4,8 @@ github.com/alecthomas/repr v0.0.0-20181024024818-d37bc2a10ba1/go.mod h1:xTS7Pm1p
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
-github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
+github.com/jellydator/ttlcache/v3 v3.3.0 h1:BdoC9cE81qXfrxeb9eoJi9dWrdhSuwXMAnHTbnBm4Wc=
+github.com/jellydator/ttlcache/v3 v3.3.0/go.mod h1:bj2/e0l4jRnQdrnSTaGTsh4GSXvMjQcy41i7th0GVGw=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
@@ -14,6 +14,10 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
+golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
+golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
diff --git a/pkg/security/security_profile/dump/local_storage.go b/pkg/security/security_profile/dump/local_storage.go
index b47e7266a8e83..a2d3af607eb08 100644
--- a/pkg/security/security_profile/dump/local_storage.go
+++ b/pkg/security/security_profile/dump/local_storage.go
@@ -85,7 +85,9 @@ func NewActivityDumpLocalStorage(cfg *config.Config, m *ActivityDumpManager) (Ac
// remove everything
for _, f := range *files {
- _ = os.Remove(f)
+ if err := os.Remove(path.Join(cfg.RuntimeSecurity.ActivityDumpLocalStorageDirectory, f)); err != nil {
+ seclog.Warnf("Failed to remove dump %s (limit of dumps reach): %v", f, err)
+ }
}
adls.deletedCount.Add(1)
@@ -118,8 +120,18 @@ func NewActivityDumpLocalStorage(cfg *config.Config, m *ActivityDumpManager) (Ac
// ignore this file
continue
}
+ // fetch MTime
+ dumpInfo, err := f.Info()
+ if err != nil {
+ seclog.Warnf("Failed to retrieve dump %s file informations: %v", f.Name(), err)
+ // ignore this file
+ continue
+ }
// retrieve the basename of the dump
dumpName := strings.TrimSuffix(filepath.Base(f.Name()), ext)
+ if ext == ".gz" {
+ dumpName = strings.TrimSuffix(dumpName, filepath.Ext(dumpName))
+ }
// insert the file in the list of dumps
ad, ok := localDumps[dumpName]
if !ok {
@@ -130,11 +142,6 @@ func NewActivityDumpLocalStorage(cfg *config.Config, m *ActivityDumpManager) (Ac
localDumps[dumpName] = ad
}
ad.Files = append(ad.Files, f.Name())
- dumpInfo, err := f.Info()
- if err != nil {
- // ignore this file
- continue
- }
if !ad.MTime.IsZero() && ad.MTime.Before(dumpInfo.ModTime()) {
ad.MTime = dumpInfo.ModTime()
}
@@ -144,8 +151,7 @@ func NewActivityDumpLocalStorage(cfg *config.Config, m *ActivityDumpManager) (Ac
sort.Sort(dumps)
// insert the dumps in cache (will trigger clean up if necessary)
for _, ad := range dumps {
- newFiles := ad.Files
- adls.localDumps.Add(ad.Name, &newFiles)
+ adls.localDumps.Add(ad.Name, &ad.Files)
}
}
diff --git a/pkg/security/agent/telemetry_linux.go b/pkg/security/telemetry/containers_running_telemetry_linux.go
similarity index 50%
rename from pkg/security/agent/telemetry_linux.go
rename to pkg/security/telemetry/containers_running_telemetry_linux.go
index abd606a35b400..3a325b7efa469 100644
--- a/pkg/security/agent/telemetry_linux.go
+++ b/pkg/security/telemetry/containers_running_telemetry_linux.go
@@ -3,49 +3,42 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-// Package agent holds agent related files
-package agent
+package telemetry
import (
"context"
- "errors"
"os"
"time"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
+ "github.com/DataDog/datadog-agent/pkg/security/config"
"github.com/DataDog/datadog-agent/pkg/security/metrics"
- "github.com/DataDog/datadog-agent/pkg/security/proto/api"
- sectelemetry "github.com/DataDog/datadog-agent/pkg/security/telemetry"
"github.com/DataDog/datadog-agent/pkg/util/log"
"github.com/DataDog/datadog-go/v5/statsd"
)
-// telemetry reports environment information (e.g containers running) when the runtime security component is running
-type telemetry struct {
- containers *sectelemetry.ContainersTelemetry
- runtimeSecurityClient *RuntimeSecurityClient
+// ContainersRunningTelemetry reports environment information (e.g containers running) when the runtime security component is running
+type ContainersRunningTelemetry struct {
+ cfg *config.RuntimeSecurityConfig
+ containers *ContainersTelemetry
}
-func newTelemetry(statsdClient statsd.ClientInterface, wmeta workloadmeta.Component, ignoreDDAgentContainers bool) (*telemetry, error) {
- runtimeSecurityClient, err := NewRuntimeSecurityClient()
+// NewContainersRunningTelemetry creates a new ContainersRunningTelemetry instance
+func NewContainersRunningTelemetry(cfg *config.RuntimeSecurityConfig, statsdClient statsd.ClientInterface, wmeta workloadmeta.Component) (*ContainersRunningTelemetry, error) {
+ telemetrySender := NewSimpleTelemetrySenderFromStatsd(statsdClient)
+ containersTelemetry, err := NewContainersTelemetry(telemetrySender, wmeta)
if err != nil {
return nil, err
}
- telemetrySender := sectelemetry.NewSimpleTelemetrySenderFromStatsd(statsdClient)
- containersTelemetry, err := sectelemetry.NewContainersTelemetry(telemetrySender, wmeta)
- if err != nil {
- return nil, err
- }
- containersTelemetry.IgnoreDDAgent = ignoreDDAgentContainers
-
- return &telemetry{
- containers: containersTelemetry,
- runtimeSecurityClient: runtimeSecurityClient,
+ return &ContainersRunningTelemetry{
+ cfg: cfg,
+ containers: containersTelemetry,
}, nil
}
-func (t *telemetry) run(ctx context.Context) {
+// Run starts the telemetry collection
+func (t *ContainersRunningTelemetry) Run(ctx context.Context) {
log.Info("started collecting Runtime Security Agent telemetry")
defer log.Info("stopping Runtime Security Agent telemetry")
@@ -64,33 +57,19 @@ func (t *telemetry) run(ctx context.Context) {
}
}
-func (t *telemetry) fetchConfig() (*api.SecurityConfigMessage, error) {
- cfg, err := t.runtimeSecurityClient.GetConfig()
- if err != nil {
- return cfg, errors.New("couldn't fetch config from runtime security module")
- }
- return cfg, nil
-}
-
-func (t *telemetry) reportContainers() error {
- // retrieve the runtime security module config
- cfg, err := t.fetchConfig()
- if err != nil {
- return err
- }
-
+func (t *ContainersRunningTelemetry) reportContainers() error {
var fargate bool
if os.Getenv("ECS_FARGATE") == "true" || os.Getenv("DD_ECS_FARGATE") == "true" || os.Getenv("DD_EKS_FARGATE") == "true" {
fargate = true
}
var metricName string
- if cfg.RuntimeEnabled {
+ if t.cfg.RuntimeEnabled {
metricName = metrics.MetricSecurityAgentRuntimeContainersRunning
if fargate {
metricName = metrics.MetricSecurityAgentFargateRuntimeContainersRunning
}
- } else if cfg.FIMEnabled {
+ } else if t.cfg.FIMEnabled {
metricName = metrics.MetricSecurityAgentFIMContainersRunning
if fargate {
metricName = metrics.MetricSecurityAgentFargateFIMContainersRunning
diff --git a/pkg/security/telemetry/containers_running_telemetry_others.go b/pkg/security/telemetry/containers_running_telemetry_others.go
new file mode 100644
index 0000000000000..3bb9658228d9a
--- /dev/null
+++ b/pkg/security/telemetry/containers_running_telemetry_others.go
@@ -0,0 +1,27 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build !linux
+
+package telemetry
+
+import (
+ "context"
+
+ workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
+ "github.com/DataDog/datadog-agent/pkg/security/config"
+ "github.com/DataDog/datadog-go/v5/statsd"
+)
+
+// ContainersRunningTelemetry reports environment information (e.g containers running) when the runtime security component is running
+type ContainersRunningTelemetry struct{}
+
+// NewContainersRunningTelemetry creates a new ContainersRunningTelemetry instance (not supported on non-linux platforms)
+func NewContainersRunningTelemetry(_ *config.RuntimeSecurityConfig, _ statsd.ClientInterface, _ workloadmeta.Component) (*ContainersRunningTelemetry, error) {
+ return nil, nil
+}
+
+// Run starts the telemetry collection
+func (t *ContainersRunningTelemetry) Run(_ context.Context) {}
diff --git a/pkg/security/telemetry/telemetry.go b/pkg/security/telemetry/telemetry.go
index 70ba2ca19a1c9..71985fb7b4b97 100644
--- a/pkg/security/telemetry/telemetry.go
+++ b/pkg/security/telemetry/telemetry.go
@@ -18,7 +18,6 @@ import (
type ContainersTelemetry struct {
TelemetrySender SimpleTelemetrySender
MetadataStore workloadmeta.Component
- IgnoreDDAgent bool
}
// NewContainersTelemetry returns a new ContainersTelemetry based on default/global objects
@@ -40,13 +39,12 @@ func (c *ContainersTelemetry) ReportContainers(metricName string) {
containers := c.ListRunningContainers()
for _, container := range containers {
- if c.IgnoreDDAgent {
- value := container.EnvVars["DOCKER_DD_AGENT"]
- value = strings.ToLower(value)
- if value == "yes" || value == "true" {
- log.Debugf("ignoring container: name=%s id=%s image_id=%s", container.Name, container.ID, container.Image.ID)
- continue
- }
+ // ignore DD agent containers
+ value := container.EnvVars["DOCKER_DD_AGENT"]
+ value = strings.ToLower(value)
+ if value == "yes" || value == "true" {
+ log.Debugf("ignoring container: name=%s id=%s image_id=%s", container.Name, container.ID, container.Image.ID)
+ continue
}
c.TelemetrySender.Gauge(metricName, 1.0, []string{"container_id:" + container.ID})
diff --git a/pkg/security/tests/action_test.go b/pkg/security/tests/action_test.go
index 8a9f9b6084ac1..5214d6961619c 100644
--- a/pkg/security/tests/action_test.go
+++ b/pkg/security/tests/action_test.go
@@ -354,3 +354,212 @@ func TestActionKillRuleSpecific(t *testing.T) {
}, retry.Delay(200*time.Millisecond), retry.Attempts(30), retry.DelayType(retry.FixedDelay))
assert.NoError(t, err)
}
+
+func TestActionKillDisarm(t *testing.T) {
+ SkipIfNotAvailable(t)
+
+ if testEnvironment == DockerEnvironment {
+ t.Skip("Skip test spawning docker containers on docker")
+ }
+
+ if _, err := whichNonFatal("docker"); err != nil {
+ t.Skip("Skip test where docker is unavailable")
+ }
+
+ checkKernelCompatibility(t, "bpf_send_signal is not supported on this kernel and agent is running in container mode", func(kv *kernel.Version) bool {
+ return !kv.SupportBPFSendSignal() && env.IsContainerized()
+ })
+
+ ruleDefs := []*rules.RuleDefinition{
+ {
+ ID: "kill_action_disarm_executable",
+ Expression: `exec.envs in ["TARGETTOKILL"] && container.id == ""`,
+ Actions: []*rules.ActionDefinition{
+ {
+ Kill: &rules.KillDefinition{
+ Signal: "SIGKILL",
+ },
+ },
+ },
+ },
+ {
+ ID: "kill_action_disarm_container",
+ Expression: `exec.envs in ["TARGETTOKILL"] && container.id != ""`,
+ Actions: []*rules.ActionDefinition{
+ {
+ Kill: &rules.KillDefinition{
+ Signal: "SIGKILL",
+ },
+ },
+ },
+ },
+ }
+
+ sleep := which(t, "sleep")
+ const (
+ enforcementDisarmerContainerPeriod = 10 * time.Second
+ enforcementDisarmerExecutablePeriod = 10 * time.Second
+ )
+
+ test, err := newTestModule(t, nil, ruleDefs, withStaticOpts(testOpts{
+ enforcementDisarmerContainerEnabled: true,
+ enforcementDisarmerContainerMaxAllowed: 1,
+ enforcementDisarmerContainerPeriod: enforcementDisarmerContainerPeriod,
+ enforcementDisarmerExecutableEnabled: true,
+ enforcementDisarmerExecutableMaxAllowed: 1,
+ enforcementDisarmerExecutablePeriod: enforcementDisarmerExecutablePeriod,
+ eventServerRetention: 1 * time.Nanosecond,
+ }))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer test.Close()
+
+ syscallTester, err := loadSyscallTester(t, test, "syscall_tester")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ testKillActionSuccess := func(t *testing.T, ruleID string, cmdFunc func(context.Context)) {
+ test.msgSender.flush()
+ err := test.GetEventSent(t, func() error {
+ ch := make(chan bool, 1)
+
+ go func() {
+ timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ cmdFunc(timeoutCtx)
+
+ ch <- true
+ }()
+
+ select {
+ case <-ch:
+ case <-time.After(time.Second * 3):
+ t.Error("signal timeout")
+ }
+ return nil
+ }, func(_ *rules.Rule, _ *model.Event) bool {
+ return true
+ }, time.Second*5, ruleID)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = retry.Do(func() error {
+ msg := test.msgSender.getMsg(ruleID)
+ if msg == nil {
+ return errors.New("not found")
+ }
+ validateMessageSchema(t, string(msg.Data))
+
+ jsonPathValidation(test, msg.Data, func(_ *testModule, obj interface{}) {
+ if _, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.signal="sigkill")]`); err != nil {
+ t.Error(err)
+ }
+ if _, err = jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.exited_at=~/20.*/)]`); err != nil {
+ t.Error(err)
+ }
+ })
+
+ return nil
+ }, retry.Delay(200*time.Millisecond), retry.Attempts(30), retry.DelayType(retry.FixedDelay))
+ assert.NoError(t, err)
+ }
+
+ testKillActionIgnored := func(t *testing.T, ruleID string, cmdFunc func(context.Context)) {
+ test.msgSender.flush()
+ err := test.GetEventSent(t, func() error {
+ cmdFunc(nil)
+ return nil
+ }, func(_ *rules.Rule, _ *model.Event) bool {
+ return true
+ }, time.Second*5, ruleID)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = retry.Do(func() error {
+ msg := test.msgSender.getMsg(ruleID)
+ if msg == nil {
+ return errors.New("not found")
+ }
+ validateMessageSchema(t, string(msg.Data))
+
+ jsonPathValidation(test, msg.Data, func(_ *testModule, obj interface{}) {
+ if _, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions`); err == nil {
+ t.Error(errors.New("unexpected rule action"))
+ }
+ })
+
+ return nil
+ }, retry.Delay(200*time.Millisecond), retry.Attempts(30), retry.DelayType(retry.FixedDelay))
+ assert.NoError(t, err)
+ }
+
+ t.Run("executable", func(t *testing.T) {
+ // test that we can kill processes with the same executable more than once
+ for i := 0; i < 2; i++ {
+ t.Logf("test iteration %d", i)
+ testKillActionSuccess(t, "kill_action_disarm_executable", func(ctx context.Context) {
+ cmd := exec.CommandContext(ctx, syscallTester, "sleep", "5")
+ cmd.Env = []string{"TARGETTOKILL=1"}
+ _ = cmd.Run()
+ })
+ }
+
+ // test that another executable dismars the kill action
+ testKillActionIgnored(t, "kill_action_disarm_executable", func(_ context.Context) {
+ cmd := exec.Command(sleep, "1")
+ cmd.Env = []string{"TARGETTOKILL=1"}
+ _ = cmd.Run()
+ })
+
+ // test that the kill action is re-armed after both executable cache entries have expired
+ // sleep for: (TTL + cache flush period + 1s) to ensure the cache is flushed
+ time.Sleep(enforcementDisarmerExecutablePeriod + 5*time.Second + 1*time.Second)
+ testKillActionSuccess(t, "kill_action_disarm_executable", func(_ context.Context) {
+ cmd := exec.Command(sleep, "1")
+ cmd.Env = []string{"TARGETTOKILL=1"}
+ _ = cmd.Run()
+ })
+ })
+
+ t.Run("container", func(t *testing.T) {
+ dockerInstance, err := test.StartADocker()
+ if err != nil {
+ t.Fatalf("failed to start a Docker instance: %v", err)
+ }
+ defer dockerInstance.stop()
+
+ // test that we can kill processes within the same container more than once
+ for i := 0; i < 2; i++ {
+ t.Logf("test iteration %d", i)
+ testKillActionSuccess(t, "kill_action_disarm_container", func(_ context.Context) {
+ cmd := dockerInstance.Command("env", []string{"-i", "-", "TARGETTOKILL=1", "sleep", "5"}, []string{})
+ _ = cmd.Run()
+ })
+ }
+
+ newDockerInstance, err := test.StartADocker()
+ if err != nil {
+ t.Fatalf("failed to start a second Docker instance: %v", err)
+ }
+ defer newDockerInstance.stop()
+
+ // test that another container dismars the kill action
+ testKillActionIgnored(t, "kill_action_disarm_container", func(_ context.Context) {
+ cmd := newDockerInstance.Command("env", []string{"-i", "-", "TARGETTOKILL=1", "sleep", "1"}, []string{})
+ _ = cmd.Run()
+ })
+
+ // test that the kill action is re-armed after both container cache entries have expired
+ // sleep for: (TTL + cache flush period + 1s) to ensure the cache is flushed
+ time.Sleep(enforcementDisarmerContainerPeriod + 5*time.Second + 1*time.Second)
+ testKillActionSuccess(t, "kill_action_disarm_container", func(_ context.Context) {
+ cmd := newDockerInstance.Command("env", []string{"-i", "-", "TARGETTOKILL=1", "sleep", "5"}, []string{})
+ _ = cmd.Run()
+ })
+ })
+}
diff --git a/pkg/security/tests/file_windows_test.go b/pkg/security/tests/file_windows_test.go
index 491fa3ea6cab4..15b7c57e7b0a3 100644
--- a/pkg/security/tests/file_windows_test.go
+++ b/pkg/security/tests/file_windows_test.go
@@ -223,7 +223,7 @@ func TestWriteFileEventWithCreate(t *testing.T) {
}
return f.Close()
}, test.validateFileEvent(t, noWrapperType, func(event *model.Event, rule *rules.Rule) {
- assertFieldEqualCaseInsensitve(t, event, "write.file.name", "test.bad", event, "write.file.name file didn't match")
+ assertFieldEqualCaseInsensitve(t, event, "write.file.name", "test.bad", "write.file.name file didn't match")
}))
})
}
diff --git a/pkg/security/tests/filters_test.go b/pkg/security/tests/filters_test.go
index b9dd3e8d29914..74486100f4470 100644
--- a/pkg/security/tests/filters_test.go
+++ b/pkg/security/tests/filters_test.go
@@ -102,6 +102,16 @@ func TestFilterOpenBasenameApprover(t *testing.T) {
}, testFile2); err == nil {
t.Fatal("shouldn't get an event")
}
+
+ if err := waitForOpenProbeEvent(test, func() error {
+ fd2, err = openTestFile(test, testFile2, syscall.O_RDONLY)
+ if err != nil {
+ return err
+ }
+ return syscall.Close(fd2)
+ }, testFile2); err == nil {
+ t.Fatal("shouldn't get an event")
+ }
}
func TestFilterOpenLeafDiscarder(t *testing.T) {
diff --git a/pkg/security/tests/main_linux.go b/pkg/security/tests/main_linux.go
index f503c3090829e..7de491a609fb9 100644
--- a/pkg/security/tests/main_linux.go
+++ b/pkg/security/tests/main_linux.go
@@ -101,6 +101,7 @@ func SkipIfNotAvailable(t *testing.T) {
"TestLoginUID/login-uid-open-test",
"TestLoginUID/login-uid-exec-test",
"TestActionKillExcludeBinary",
+ "TestActionKillDisarm",
}
if disableSeccomp {
diff --git a/pkg/security/tests/module_tester.go b/pkg/security/tests/module_tester.go
index bee5088cccb82..dcd853df9e258 100644
--- a/pkg/security/tests/module_tester.go
+++ b/pkg/security/tests/module_tester.go
@@ -27,6 +27,9 @@ import (
"unsafe"
spconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config"
+ "github.com/DataDog/datadog-agent/comp/core/telemetry"
+ workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
+ "go.uber.org/fx"
emconfig "github.com/DataDog/datadog-agent/pkg/eventmonitor/config"
secconfig "github.com/DataDog/datadog-agent/pkg/security/config"
@@ -775,6 +778,13 @@ func genTestConfigs(cfgDir string, opts testOpts) (*emconfig.Config, *secconfig.
"NetworkIngressEnabled": opts.networkIngressEnabled,
"OnDemandRateLimiterEnabled": !opts.disableOnDemandRateLimiter,
"EnforcementExcludeBinary": opts.enforcementExcludeBinary,
+ "EnforcementDisarmerContainerEnabled": opts.enforcementDisarmerContainerEnabled,
+ "EnforcementDisarmerContainerMaxAllowed": opts.enforcementDisarmerContainerMaxAllowed,
+ "EnforcementDisarmerContainerPeriod": opts.enforcementDisarmerContainerPeriod,
+ "EnforcementDisarmerExecutableEnabled": opts.enforcementDisarmerExecutableEnabled,
+ "EnforcementDisarmerExecutableMaxAllowed": opts.enforcementDisarmerExecutableMaxAllowed,
+ "EnforcementDisarmerExecutablePeriod": opts.enforcementDisarmerExecutablePeriod,
+ "EventServerRetention": opts.eventServerRetention,
}); err != nil {
return nil, nil, err
}
@@ -832,7 +842,7 @@ type fakeMsgSender struct {
msgs map[eval.RuleID]*api.SecurityEventMessage
}
-func (fs *fakeMsgSender) Send(msg *api.SecurityEventMessage, expireFnc func(*api.SecurityEventMessage)) {
+func (fs *fakeMsgSender) Send(msg *api.SecurityEventMessage, _ func(*api.SecurityEventMessage)) {
fs.Lock()
defer fs.Unlock()
@@ -878,3 +888,10 @@ func jsonPathValidation(testMod *testModule, data []byte, fnc func(testMod *test
fnc(testMod, obj)
}
+
+type testModuleFxDeps struct {
+ fx.In
+
+ Telemetry telemetry.Component
+ WMeta workloadmeta.Component
+}
diff --git a/pkg/security/tests/module_tester_linux.go b/pkg/security/tests/module_tester_linux.go
index 13b8b1b608f59..da0946b338ce6 100644
--- a/pkg/security/tests/module_tester_linux.go
+++ b/pkg/security/tests/module_tester_linux.go
@@ -35,8 +35,6 @@ import (
"golang.org/x/sys/unix"
"github.com/DataDog/datadog-agent/comp/core"
- "github.com/DataDog/datadog-agent/comp/core/telemetry"
- "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
wmmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock"
ebpftelemetry "github.com/DataDog/datadog-agent/pkg/ebpf/telemetry"
@@ -107,6 +105,10 @@ runtime_security_config:
enabled: {{ .RuntimeSecurityEnabled }}
internal_monitoring:
enabled: true
+{{ if gt .EventServerRetention 0 }}
+ event_server:
+ retention: {{ .EventServerRetention }}
+{{ end }}
remote_configuration:
enabled: false
on_demand:
@@ -118,11 +120,6 @@ runtime_security_config:
enabled: {{ .SBOMEnabled }}
host:
enabled: {{ .HostSBOMEnabled }}
- enforcement:
- exclude_binaries:
- - {{ .EnforcementExcludeBinary }}
- rule_source_allowed:
- - file
activity_dump:
enabled: {{ .EnableActivityDump }}
syscall_monitor:
@@ -195,6 +192,20 @@ runtime_security_config:
enabled: {{.EBPFLessEnabled}}
hash_resolver:
enabled: true
+ enforcement:
+ exclude_binaries:
+ - {{ .EnforcementExcludeBinary }}
+ rule_source_allowed:
+ - file
+ disarmer:
+ container:
+ enabled: {{.EnforcementDisarmerContainerEnabled}}
+ max_allowed: {{.EnforcementDisarmerContainerMaxAllowed}}
+ period: {{.EnforcementDisarmerContainerPeriod}}
+ executable:
+ enabled: {{.EnforcementDisarmerExecutableEnabled}}
+ max_allowed: {{.EnforcementDisarmerExecutableMaxAllowed}}
+ period: {{.EnforcementDisarmerExecutablePeriod}}
`
const testPolicy = `---
@@ -795,12 +806,13 @@ func newTestModuleWithOnDemandProbes(t testing.TB, onDemandHooks []rules.OnDeman
} else {
emopts.ProbeOpts.TagsResolver = NewFakeResolverDifferentImageNames()
}
- telemetry := fxutil.Test[telemetry.Component](t, telemetryimpl.MockModule())
- wmeta := fxutil.Test[workloadmeta.Component](t,
+
+ fxDeps := fxutil.Test[testModuleFxDeps](
+ t,
core.MockBundle(),
wmmock.MockModule(workloadmeta.NewParams()),
)
- testMod.eventMonitor, err = eventmonitor.NewEventMonitor(emconfig, secconfig, emopts, wmeta, telemetry)
+ testMod.eventMonitor, err = eventmonitor.NewEventMonitor(emconfig, secconfig, emopts, fxDeps.WMeta, fxDeps.Telemetry)
if err != nil {
return nil, err
}
@@ -810,7 +822,7 @@ func newTestModuleWithOnDemandProbes(t testing.TB, onDemandHooks []rules.OnDeman
if !opts.staticOpts.disableRuntimeSecurity {
msgSender := newFakeMsgSender(testMod)
- cws, err := module.NewCWSConsumer(testMod.eventMonitor, secconfig.RuntimeSecurity, module.Opts{EventSender: testMod, MsgSender: msgSender})
+ cws, err := module.NewCWSConsumer(testMod.eventMonitor, secconfig.RuntimeSecurity, fxDeps.WMeta, module.Opts{EventSender: testMod, MsgSender: msgSender})
if err != nil {
return nil, fmt.Errorf("failed to create module: %w", err)
}
diff --git a/pkg/security/tests/module_tester_windows.go b/pkg/security/tests/module_tester_windows.go
index e494bf347a558..972437a8ca435 100644
--- a/pkg/security/tests/module_tester_windows.go
+++ b/pkg/security/tests/module_tester_windows.go
@@ -19,8 +19,6 @@ import (
"github.com/hashicorp/go-multierror"
"github.com/DataDog/datadog-agent/comp/core"
- "github.com/DataDog/datadog-agent/comp/core/telemetry"
- "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
wmmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock"
"github.com/DataDog/datadog-agent/pkg/eventmonitor"
@@ -113,6 +111,10 @@ event_monitoring_config:
runtime_security_config:
enabled: {{ .RuntimeSecurityEnabled }}
+{{ if gt .EventServerRetention 0 }}
+ event_server:
+ retention: {{ .EventServerRetention }}
+{{ end }}
internal_monitoring:
enabled: true
remote_configuration:
@@ -179,6 +181,18 @@ runtime_security_config:
{{end}}
ebpfless:
enabled: {{.EBPFLessEnabled}}
+ enforcement:
+ exclude_binaries:
+ - {{ .EnforcementExcludeBinary }}
+ disarmer:
+ container:
+ enabled: {{.EnforcementDisarmerContainerEnabled}}
+ max_allowed: {{.EnforcementDisarmerContainerMaxAllowed}}
+ period: {{.EnforcementDisarmerContainerPeriod}}
+ executable:
+ enabled: {{.EnforcementDisarmerExecutableEnabled}}
+ max_allowed: {{.EnforcementDisarmerExecutableMaxAllowed}}
+ period: {{.EnforcementDisarmerExecutablePeriod}}
`
type onRuleHandler func(*model.Event, *rules.Rule)
@@ -265,12 +279,12 @@ func newTestModule(t testing.TB, macroDefs []*rules.MacroDefinition, ruleDefs []
StatsdClient: statsdClient,
},
}
- telemetry := fxutil.Test[telemetry.Component](t, telemetryimpl.MockModule())
- wmeta := fxutil.Test[workloadmeta.Component](t,
+ fxDeps := fxutil.Test[testModuleFxDeps](
+ t,
core.MockBundle(),
wmmock.MockModule(workloadmeta.NewParams()),
)
- testMod.eventMonitor, err = eventmonitor.NewEventMonitor(emconfig, secconfig, emopts, wmeta, telemetry)
+ testMod.eventMonitor, err = eventmonitor.NewEventMonitor(emconfig, secconfig, emopts, fxDeps.WMeta, fxDeps.Telemetry)
if err != nil {
return nil, err
}
@@ -278,7 +292,7 @@ func newTestModule(t testing.TB, macroDefs []*rules.MacroDefinition, ruleDefs []
var ruleSetloadedErr *multierror.Error
if !opts.staticOpts.disableRuntimeSecurity {
- cws, err := module.NewCWSConsumer(testMod.eventMonitor, secconfig.RuntimeSecurity, module.Opts{EventSender: testMod})
+ cws, err := module.NewCWSConsumer(testMod.eventMonitor, secconfig.RuntimeSecurity, fxDeps.WMeta, module.Opts{EventSender: testMod})
if err != nil {
return nil, fmt.Errorf("failed to create module: %w", err)
}
diff --git a/pkg/security/tests/schemas/policy.schema.json b/pkg/security/tests/schemas/policy.schema.json
new file mode 100644
index 0000000000000..bb300a7571a98
--- /dev/null
+++ b/pkg/security/tests/schemas/policy.schema.json
@@ -0,0 +1,407 @@
+{
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
+ "$id": "https://github.com/DataDog/datadog-agent/tree/main/pkg/security/secl/rules",
+ "$defs": {
+ "ActionDefinition": {
+ "oneOf": [
+ {
+ "required": [
+ "set"
+ ],
+ "title": "SetAction"
+ },
+ {
+ "required": [
+ "kill"
+ ],
+ "title": "KillAction"
+ },
+ {
+ "required": [
+ "coredump"
+ ],
+ "title": "CoreDumpAction"
+ },
+ {
+ "required": [
+ "hash"
+ ],
+ "title": "HashAction"
+ }
+ ],
+ "properties": {
+ "filter": {
+ "type": "string"
+ },
+ "set": {
+ "$ref": "#/$defs/SetDefinition"
+ },
+ "kill": {
+ "$ref": "#/$defs/KillDefinition"
+ },
+ "coredump": {
+ "$ref": "#/$defs/CoreDumpDefinition"
+ },
+ "hash": {
+ "$ref": "#/$defs/HashDefinition"
+ }
+ },
+ "additionalProperties": false,
+ "type": "object",
+ "description": "ActionDefinition describes a rule action section"
+ },
+ "CoreDumpDefinition": {
+ "anyOf": [
+ {
+ "required": [
+ "process"
+ ],
+ "title": "CoreDumpWithProcess"
+ },
+ {
+ "required": [
+ "mount"
+ ],
+ "title": "CoreDumpWithMount"
+ },
+ {
+ "required": [
+ "dentry"
+ ],
+ "title": "CoreDumpWithDentry"
+ }
+ ],
+ "properties": {
+ "process": {
+ "type": "boolean"
+ },
+ "mount": {
+ "type": "boolean"
+ },
+ "dentry": {
+ "type": "boolean"
+ },
+ "no_compression": {
+ "type": "boolean"
+ }
+ },
+ "additionalProperties": false,
+ "type": "object",
+ "description": "CoreDumpDefinition describes the 'coredump' action"
+ },
+ "HashDefinition": {
+ "properties": {},
+ "additionalProperties": false,
+ "type": "object",
+ "description": "HashDefinition describes the 'hash' section of a rule action"
+ },
+ "HookPointArg": {
+ "properties": {
+ "n": {
+ "type": "integer",
+ "description": "Zero-based argument index"
+ },
+ "kind": {
+ "type": "string",
+ "enum": [
+ "uint",
+ "null-terminated-string"
+ ]
+ }
+ },
+ "additionalProperties": false,
+ "type": "object",
+ "required": [
+ "n",
+ "kind"
+ ],
+ "description": "HookPointArg represents the definition of a hook point argument"
+ },
+ "KillDefinition": {
+ "properties": {
+ "signal": {
+ "type": "string",
+ "description": "A valid signal name",
+ "examples": [
+ "SIGKILL",
+ "SIGTERM"
+ ]
+ },
+ "scope": {
+ "type": "string",
+ "enum": [
+ "process",
+ "container"
+ ]
+ }
+ },
+ "additionalProperties": false,
+ "type": "object",
+ "required": [
+ "signal"
+ ],
+ "description": "KillDefinition describes the 'kill' section of a rule action"
+ },
+ "MacroDefinition": {
+ "oneOf": [
+ {
+ "required": [
+ "expression"
+ ],
+ "title": "MacroWithExpression"
+ },
+ {
+ "required": [
+ "values"
+ ],
+ "title": "MacroWithValues"
+ }
+ ],
+ "properties": {
+ "id": {
+ "type": "string"
+ },
+ "expression": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "agent_version": {
+ "type": "string"
+ },
+ "filters": {
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "values": {
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "combine": {
+ "type": "string",
+ "enum": [
+ "merge",
+ "override"
+ ]
+ }
+ },
+ "additionalProperties": false,
+ "type": "object",
+ "required": [
+ "id"
+ ],
+ "description": "MacroDefinition holds the definition of a macro"
+ },
+ "OnDemandHookPoint": {
+ "properties": {
+ "name": {
+ "type": "string"
+ },
+ "syscall": {
+ "type": "boolean"
+ },
+ "args": {
+ "items": {
+ "$ref": "#/$defs/HookPointArg"
+ },
+ "type": "array"
+ }
+ },
+ "additionalProperties": false,
+ "type": "object",
+ "required": [
+ "name"
+ ],
+ "description": "OnDemandHookPoint represents a hook point definition"
+ },
+ "OverrideOptions": {
+ "properties": {
+ "fields": {
+ "items": {
+ "type": "string",
+ "enum": [
+ "all",
+ "expression",
+ "actions",
+ "every",
+ "tags"
+ ]
+ },
+ "type": "array"
+ }
+ },
+ "additionalProperties": false,
+ "type": "object",
+ "required": [
+ "fields"
+ ],
+ "description": "OverrideOptions defines combine options"
+ },
+ "RuleDefinition": {
+ "properties": {
+ "id": {
+ "type": "string"
+ },
+ "version": {
+ "type": "string"
+ },
+ "expression": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "tags": {
+ "additionalProperties": {
+ "type": "string"
+ },
+ "type": "object"
+ },
+ "agent_version": {
+ "type": "string"
+ },
+ "filters": {
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "disabled": {
+ "type": "boolean"
+ },
+ "combine": {
+ "type": "string",
+ "enum": [
+ "override"
+ ]
+ },
+ "override_options": {
+ "$ref": "#/$defs/OverrideOptions"
+ },
+ "actions": {
+ "items": {
+ "$ref": "#/$defs/ActionDefinition"
+ },
+ "type": "array"
+ },
+ "every": {
+ "oneOf": [
+ {
+ "type": "string",
+ "format": "duration",
+ "description": "Duration in Go format (e.g. 1h30m, see https://pkg.go.dev/time#ParseDuration)"
+ },
+ {
+ "type": "integer",
+ "description": "Duration in nanoseconds"
+ }
+ ]
+ },
+ "silent": {
+ "type": "boolean"
+ },
+ "group_id": {
+ "type": "string"
+ }
+ },
+ "additionalProperties": false,
+ "type": "object",
+ "required": [
+ "id"
+ ],
+ "description": "RuleDefinition holds the definition of a rule"
+ },
+ "SetDefinition": {
+ "oneOf": [
+ {
+ "required": [
+ "value"
+ ],
+ "title": "SetWithValue"
+ },
+ {
+ "required": [
+ "field"
+ ],
+ "title": "SetWithField"
+ }
+ ],
+ "properties": {
+ "name": {
+ "type": "string"
+ },
+ "value": true,
+ "field": {
+ "type": "string"
+ },
+ "append": {
+ "type": "boolean"
+ },
+ "scope": {
+ "type": "string",
+ "enum": [
+ "process",
+ "container"
+ ]
+ },
+ "size": {
+ "type": "integer"
+ },
+ "ttl": {
+ "oneOf": [
+ {
+ "type": "string",
+ "format": "duration",
+ "description": "Duration in Go format (e.g. 1h30m, see https://pkg.go.dev/time#ParseDuration)"
+ },
+ {
+ "type": "integer",
+ "description": "Duration in nanoseconds"
+ }
+ ]
+ }
+ },
+ "additionalProperties": false,
+ "type": "object",
+ "required": [
+ "name"
+ ],
+ "description": "SetDefinition describes the 'set' section of a rule action"
+ }
+ },
+ "properties": {
+ "version": {
+ "type": "string"
+ },
+ "macros": {
+ "items": {
+ "$ref": "#/$defs/MacroDefinition"
+ },
+ "type": "array"
+ },
+ "rules": {
+ "items": {
+ "$ref": "#/$defs/RuleDefinition"
+ },
+ "type": "array"
+ },
+ "hooks": {
+ "items": {
+ "$ref": "#/$defs/OnDemandHookPoint"
+ },
+ "type": "array"
+ }
+ },
+ "additionalProperties": false,
+ "type": "object",
+ "required": [
+ "version",
+ "rules"
+ ],
+ "description": "PolicyDef represents a policy file definition"
+}
\ No newline at end of file
diff --git a/pkg/security/tests/testopts.go b/pkg/security/tests/testopts.go
index 1ea52a0344b79..43ef3dc1bbebf 100644
--- a/pkg/security/tests/testopts.go
+++ b/pkg/security/tests/testopts.go
@@ -64,6 +64,13 @@ type testOpts struct {
ebpfLessEnabled bool
dontWaitEBPFLessClient bool
enforcementExcludeBinary string
+ enforcementDisarmerContainerEnabled bool
+ enforcementDisarmerContainerMaxAllowed int
+ enforcementDisarmerContainerPeriod time.Duration
+ enforcementDisarmerExecutableEnabled bool
+ enforcementDisarmerExecutableMaxAllowed int
+ enforcementDisarmerExecutablePeriod time.Duration
+ eventServerRetention time.Duration
}
type dynamicTestOpts struct {
@@ -139,5 +146,12 @@ func (to testOpts) Equal(opts testOpts) bool {
to.networkIngressEnabled == opts.networkIngressEnabled &&
to.disableOnDemandRateLimiter == opts.disableOnDemandRateLimiter &&
to.ebpfLessEnabled == opts.ebpfLessEnabled &&
- to.enforcementExcludeBinary == opts.enforcementExcludeBinary
+ to.enforcementExcludeBinary == opts.enforcementExcludeBinary &&
+ to.enforcementDisarmerContainerEnabled == opts.enforcementDisarmerContainerEnabled &&
+ to.enforcementDisarmerContainerMaxAllowed == opts.enforcementDisarmerContainerMaxAllowed &&
+ to.enforcementDisarmerContainerPeriod == opts.enforcementDisarmerContainerPeriod &&
+ to.enforcementDisarmerExecutableEnabled == opts.enforcementDisarmerExecutableEnabled &&
+ to.enforcementDisarmerExecutableMaxAllowed == opts.enforcementDisarmerExecutableMaxAllowed &&
+ to.enforcementDisarmerExecutablePeriod == opts.enforcementDisarmerExecutablePeriod &&
+ to.eventServerRetention == opts.eventServerRetention
}
diff --git a/pkg/security/utils/proc_linux.go b/pkg/security/utils/proc_linux.go
index cfb0894125b81..6746091231e65 100644
--- a/pkg/security/utils/proc_linux.go
+++ b/pkg/security/utils/proc_linux.go
@@ -12,6 +12,7 @@ import (
"io"
"os"
"path"
+ "path/filepath"
"regexp"
"strconv"
"strings"
@@ -122,17 +123,20 @@ func ProcRootFilePath(pid uint32, file string) string {
return procPidPath2(pid, "root", file)
}
+// we do not use `HostProc` here because of the double call to `filepath.Join`
+// and those functions can be called in a tight loop
+
func procPidPath(pid uint32, path string) string {
- return kernel.HostProc(strconv.FormatUint(uint64(pid), 10), path)
+ return filepath.Join(kernel.ProcFSRoot(), strconv.FormatUint(uint64(pid), 10), path)
}
func procPidPath2(pid uint32, path1 string, path2 string) string {
- return kernel.HostProc(strconv.FormatUint(uint64(pid), 10), path1, path2)
+ return filepath.Join(kernel.ProcFSRoot(), strconv.FormatUint(uint64(pid), 10), path1, path2)
}
// ModulesPath returns the path to the modules file in /proc
func ModulesPath() string {
- return kernel.HostProc("modules")
+ return filepath.Join(kernel.ProcFSRoot(), "modules")
}
// GetLoginUID returns the login uid of the provided process
diff --git a/pkg/security/utils/stat_unix.go b/pkg/security/utils/stat_unix.go
new file mode 100644
index 0000000000000..4c12a3b6c8178
--- /dev/null
+++ b/pkg/security/utils/stat_unix.go
@@ -0,0 +1,59 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build unix
+
+// Package utils holds utils related files
+package utils
+
+import (
+ "io/fs"
+ "syscall"
+)
+
+// UnixStat is an unix only equivalent to os.Stat, but alloc-free,
+// and returning directly the platform-specific syscall.Stat_t structure.
+func UnixStat(path string) (syscall.Stat_t, error) {
+ var stat syscall.Stat_t
+ var err error
+ for {
+ err := syscall.Stat(path, &stat)
+ if err != syscall.EINTR {
+ break
+ }
+ }
+ return stat, err
+}
+
+// UnixStatModeToGoFileMode converts a Unix mode to a Go fs.FileMode.
+func UnixStatModeToGoFileMode(mode uint32) fs.FileMode {
+ fsmode := fs.FileMode(mode & 0777)
+ switch mode & syscall.S_IFMT {
+ case syscall.S_IFBLK:
+ fsmode |= fs.ModeDevice
+ case syscall.S_IFCHR:
+ fsmode |= fs.ModeDevice | fs.ModeCharDevice
+ case syscall.S_IFDIR:
+ fsmode |= fs.ModeDir
+ case syscall.S_IFIFO:
+ fsmode |= fs.ModeNamedPipe
+ case syscall.S_IFLNK:
+ fsmode |= fs.ModeSymlink
+ case syscall.S_IFREG:
+ // nothing to do
+ case syscall.S_IFSOCK:
+ fsmode |= fs.ModeSocket
+ }
+ if mode&syscall.S_ISGID != 0 {
+ fsmode |= fs.ModeSetgid
+ }
+ if mode&syscall.S_ISUID != 0 {
+ fsmode |= fs.ModeSetuid
+ }
+ if mode&syscall.S_ISVTX != 0 {
+ fsmode |= fs.ModeSticky
+ }
+ return fsmode
+}
diff --git a/pkg/serverless/metrics/enhanced_metrics.go b/pkg/serverless/metrics/enhanced_metrics.go
index c7dd8e62bcd22..3d81c71fce4a2 100644
--- a/pkg/serverless/metrics/enhanced_metrics.go
+++ b/pkg/serverless/metrics/enhanced_metrics.go
@@ -61,6 +61,8 @@ const (
totalNetworkMetric = "aws.lambda.enhanced.total_network"
tmpUsedMetric = "aws.lambda.enhanced.tmp_used"
tmpMaxMetric = "aws.lambda.enhanced.tmp_max"
+ fdMaxMetric = "aws.lambda.enhanced.fd_max"
+ fdUseMetric = "aws.lambda.enhanced.fd_use"
enhancedMetricsEnvVar = "DD_ENHANCED_METRICS"
// Bottlecap
@@ -564,6 +566,80 @@ func SendTmpEnhancedMetrics(sendMetrics chan bool, tags []string, metricAgent *S
}
+type generateFdEnhancedMetricsArgs struct {
+ FdMax float64
+ FdUse float64
+ Tags []string
+ Demux aggregator.Demultiplexer
+ Time float64
+}
+
+// generateFdEnhancedMetrics generates enhanced metrics for the maximum number of file descriptors available and in use
+func generateFdEnhancedMetrics(args generateFdEnhancedMetricsArgs) {
+ args.Demux.AggregateSample(metrics.MetricSample{
+ Name: fdMaxMetric,
+ Value: args.FdMax,
+ Mtype: metrics.DistributionType,
+ Tags: args.Tags,
+ SampleRate: 1,
+ Timestamp: args.Time,
+ })
+ args.Demux.AggregateSample(metrics.MetricSample{
+ Name: fdUseMetric,
+ Value: args.FdUse,
+ Mtype: metrics.DistributionType,
+ Tags: args.Tags,
+ SampleRate: 1,
+ Timestamp: args.Time,
+ })
+}
+
+func SendFdEnhancedMetrics(sendMetrics chan bool, tags []string, metricAgent *ServerlessMetricAgent) {
+ if enhancedMetricsDisabled {
+ return
+ }
+
+ fdMaxData, err := proc.GetFileDescriptorMaxData()
+ if err != nil {
+ log.Debug("Could not emit file descriptor enhanced metrics. %v", err)
+ return
+ }
+
+ fdUseData, err := proc.GetFileDescriptorUseData()
+ if err != nil {
+ log.Debugf("Could not emit file descriptor enhanced metrics. %v", err)
+ return
+ }
+
+ fdMax := fdMaxData.MaximumFileHandles
+ fdUse := fdUseData.UseFileHandles
+
+ ticker := time.NewTicker(1 * time.Millisecond)
+ defer ticker.Stop()
+ for {
+ select {
+ case _, open := <-sendMetrics:
+ if !open {
+ generateFdEnhancedMetrics(generateFdEnhancedMetricsArgs{
+ FdMax: fdMax,
+ FdUse: fdUse,
+ Tags: tags,
+ Demux: metricAgent.Demux,
+ Time: float64(time.Now().UnixNano()) / float64(time.Second),
+ })
+ return
+ }
+ case <-ticker.C:
+ fdUseData, err := proc.GetFileDescriptorUseData()
+ if err != nil {
+ log.Debugf("Could not emit file descriptor enhanced metrics. %v", err)
+ return
+ }
+ fdUse = math.Max(fdUse, fdUseData.UseFileHandles)
+ }
+ }
+}
+
// incrementEnhancedMetric sends an enhanced metric with a value of 1 to the metrics channel
func incrementEnhancedMetric(name string, tags []string, timestamp float64, demux aggregator.Demultiplexer, force bool) {
// TODO - pass config here, instead of directly looking up var
diff --git a/pkg/serverless/metrics/enhanced_metrics_test.go b/pkg/serverless/metrics/enhanced_metrics_test.go
index 6281b1326180f..1c274229ba145 100644
--- a/pkg/serverless/metrics/enhanced_metrics_test.go
+++ b/pkg/serverless/metrics/enhanced_metrics_test.go
@@ -730,6 +730,64 @@ func TestSendTmpEnhancedMetricsDisabled(t *testing.T) {
enhancedMetricsDisabled = false
}
+func TestSendFdEnhancedMetrics(t *testing.T) {
+ demux := createDemultiplexer(t)
+ tags := []string{"functionname:test-function"}
+ now := float64(time.Now().UnixNano()) / float64(time.Second)
+ args := generateFdEnhancedMetricsArgs{
+ FdMax: 1024,
+ FdUse: 26,
+ Tags: tags,
+ Demux: demux,
+ Time: now,
+ }
+ go generateFdEnhancedMetrics(args)
+ generatedMetrics, timedMetrics := demux.WaitForNumberOfSamples(3, 0, 100*time.Millisecond)
+ assert.Equal(t, []metrics.MetricSample{
+ {
+ Name: fdMaxMetric,
+ Value: 1024,
+ Mtype: metrics.DistributionType,
+ Tags: tags,
+ SampleRate: 1,
+ Timestamp: now,
+ },
+ {
+ Name: fdUseMetric,
+ Value: 26,
+ Mtype: metrics.DistributionType,
+ Tags: tags,
+ SampleRate: 1,
+ Timestamp: now,
+ },
+ },
+ generatedMetrics,
+ )
+ assert.Len(t, timedMetrics, 0)
+}
+
+func TestSendFdEnhancedMetricsDisabled(t *testing.T) {
+ var wg sync.WaitGroup
+ enhancedMetricsDisabled = true
+ demux := createDemultiplexer(t)
+ metricAgent := ServerlessMetricAgent{Demux: demux}
+ tags := []string{"functionname:test-function"}
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ SendFdEnhancedMetrics(make(chan bool), tags, &metricAgent)
+ }()
+
+ generatedMetrics, timedMetrics := demux.WaitForNumberOfSamples(1, 0, 100*time.Millisecond)
+
+ assert.Len(t, generatedMetrics, 0)
+ assert.Len(t, timedMetrics, 0)
+
+ wg.Wait()
+ enhancedMetricsDisabled = false
+}
+
func TestSendFailoverReasonMetric(t *testing.T) {
demux := createDemultiplexer(t)
tags := []string{"reason:test-reason"}
diff --git a/pkg/serverless/proc/proc.go b/pkg/serverless/proc/proc.go
index 76b2af63f3337..b8e612de91938 100644
--- a/pkg/serverless/proc/proc.go
+++ b/pkg/serverless/proc/proc.go
@@ -7,10 +7,12 @@
package proc
import (
+ "bufio"
"bytes"
"errors"
"fmt"
"io"
+ "math"
"os"
"strconv"
"strings"
@@ -22,6 +24,9 @@ const (
ProcStatPath = "/proc/stat"
ProcUptimePath = "/proc/uptime"
ProcNetDevPath = "/proc/net/dev"
+ ProcPath = "/proc"
+ PidLimitsPathFormat = "/%d/limits"
+ PidFdPathFormat = "/%d/fd"
lambdaNetworkInterface = "vinternal_1"
)
@@ -196,3 +201,83 @@ func getNetworkData(path string) (*NetworkData, error) {
}
}
+
+type FileDescriptorMaxData struct {
+ MaximumFileHandles float64
+}
+
+// GetFileDescriptorMaxData returns the maximum limit of file descriptors the function can use
+func GetFileDescriptorMaxData() (*FileDescriptorMaxData, error) {
+ return getFileDescriptorMaxData(ProcPath)
+}
+
+func getFileDescriptorMaxData(path string) (*FileDescriptorMaxData, error) {
+ pids := getPidList(path)
+ fdMax := math.Inf(1)
+
+ for _, pid := range pids {
+ limitsPath := fmt.Sprint(path + fmt.Sprintf(PidLimitsPathFormat, pid))
+ file, err := os.Open(limitsPath)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ scanner := bufio.NewScanner(file)
+ for scanner.Scan() {
+ line := scanner.Text()
+ if strings.HasPrefix(line, "Max open files") {
+ fields := strings.Fields(line)
+ if len(fields) < 6 {
+ log.Debugf("file descriptor max data not found in file '%s'", limitsPath)
+ break
+ }
+
+ fdMaxPidStr := fields[3]
+ fdMaxPid, err := strconv.Atoi(fdMaxPidStr)
+ if err != nil {
+ log.Debugf("file descriptor max data not found in file '%s'", limitsPath)
+ break
+ }
+
+ fdMax = math.Min(float64(fdMax), float64(fdMaxPid))
+ break
+ }
+ }
+ }
+
+ if fdMax != math.Inf(1) {
+ return &FileDescriptorMaxData{
+ MaximumFileHandles: fdMax,
+ }, nil
+ }
+
+ return nil, fmt.Errorf("file descriptor max data not found")
+}
+
+type FileDescriptorUseData struct {
+ UseFileHandles float64
+}
+
+// GetFileDescriptorUseData returns the maximum number of file descriptors the function has used at a time
+func GetFileDescriptorUseData() (*FileDescriptorUseData, error) {
+ return getFileDescriptorUseData(ProcPath)
+}
+
+func getFileDescriptorUseData(path string) (*FileDescriptorUseData, error) {
+ pids := getPidList(path)
+ fdUse := 0
+
+ for _, pid := range pids {
+ fdPath := fmt.Sprint(path + fmt.Sprintf(PidFdPathFormat, pid))
+ files, err := os.ReadDir(fdPath)
+ if err != nil {
+ return nil, fmt.Errorf("file descriptor use data not found in file '%s'", fdPath)
+ }
+ fdUse += len(files)
+ }
+
+ return &FileDescriptorUseData{
+ UseFileHandles: float64(fdUse),
+ }, nil
+}
diff --git a/pkg/serverless/proc/proc_test.go b/pkg/serverless/proc/proc_test.go
index 1f08cd1e0f9f5..6b445db35596c 100644
--- a/pkg/serverless/proc/proc_test.go
+++ b/pkg/serverless/proc/proc_test.go
@@ -143,3 +143,32 @@ func TestGetNetworkData(t *testing.T) {
assert.NotNil(t, err)
assert.Nil(t, networkData)
}
+
+func TestGetFileDescriptorMaxData(t *testing.T) {
+ path := "./testData/file-descriptor/valid"
+ fileDescriptorMaxData, err := getFileDescriptorMaxData(path)
+ assert.Nil(t, err)
+ assert.Equal(t, float64(1024), fileDescriptorMaxData.MaximumFileHandles)
+
+ path = "./testData/file-descriptor/invalid_malformed"
+ fileDescriptorMaxData, err = getFileDescriptorMaxData(path)
+ assert.NotNil(t, err)
+ assert.Nil(t, fileDescriptorMaxData)
+
+ path = "./testData/file-descriptor/invalid_missing"
+ fileDescriptorMaxData, err = getFileDescriptorMaxData(path)
+ assert.NotNil(t, err)
+ assert.Nil(t, fileDescriptorMaxData)
+}
+
+func TestGetFileDescriptorUseData(t *testing.T) {
+ path := "./testData/file-descriptor/valid"
+ fileDescriptorUseData, err := getFileDescriptorUseData(path)
+ assert.Nil(t, err)
+ assert.Equal(t, float64(5), fileDescriptorUseData.UseFileHandles)
+
+ path = "./testData/file-descriptor/invalid_missing"
+ fileDescriptorUseData, err = getFileDescriptorUseData(path)
+ assert.NotNil(t, err)
+ assert.Nil(t, fileDescriptorUseData)
+}
diff --git a/pkg/serverless/proc/testData/file-descriptor/invalid_malformed/31/limits b/pkg/serverless/proc/testData/file-descriptor/invalid_malformed/31/limits
new file mode 100644
index 0000000000000..45c06574a388c
--- /dev/null
+++ b/pkg/serverless/proc/testData/file-descriptor/invalid_malformed/31/limits
@@ -0,0 +1,17 @@
+Limit Soft Limit Hard Limit Units
+Max cpu time unlimited unlimited seconds
+Max file size unlimited unlimited bytes
+Max data size unlimited unlimited bytes
+Max stack size 8388608 unlimited bytes
+Max core file size unlimited unlimited bytes
+Max resident set unlimited unlimited bytes
+Max processes 1024 1024 processes
+Max open files 1024 1024
+Max locked memory 65536 65536 bytes
+Max address space unlimited unlimited bytes
+Max file locks unlimited unlimited locks
+Max pending signals 4622 4622 signals
+Max msgqueue size 819200 819200 bytes
+Max nice priority 0 0
+Max realtime priority 0 0
+Max realtime timeout unlimited unlimited us
diff --git a/pkg/serverless/proc/testData/file-descriptor/invalid_malformed/9/limits b/pkg/serverless/proc/testData/file-descriptor/invalid_malformed/9/limits
new file mode 100644
index 0000000000000..3ad780c33f48d
--- /dev/null
+++ b/pkg/serverless/proc/testData/file-descriptor/invalid_malformed/9/limits
@@ -0,0 +1,17 @@
+Limit Soft Limit Hard Limit Units
+Max cpu time unlimited unlimited seconds
+Max file size unlimited unlimited bytes
+Max data size unlimited unlimited bytes
+Max stack size 8388608 unlimited bytes
+Max core file size unlimited unlimited bytes
+Max resident set unlimited unlimited bytes
+Max processes 1024 1024 processes
+Max open files 1024
+Max locked memory 65536 65536 bytes
+Max address space unlimited unlimited bytes
+Max file locks unlimited unlimited locks
+Max pending signals 4622 4622 signals
+Max msgqueue size 819200 819200 bytes
+Max nice priority 0 0
+Max realtime priority 0 0
+Max realtime timeout unlimited unlimited us
diff --git a/pkg/serverless/proc/testData/file-descriptor/invalid_missing/31/limits b/pkg/serverless/proc/testData/file-descriptor/invalid_missing/31/limits
new file mode 100644
index 0000000000000..34925a8f557f9
--- /dev/null
+++ b/pkg/serverless/proc/testData/file-descriptor/invalid_missing/31/limits
@@ -0,0 +1,16 @@
+Limit Soft Limit Hard Limit Units
+Max cpu time unlimited unlimited seconds
+Max file size unlimited unlimited bytes
+Max data size unlimited unlimited bytes
+Max stack size 8388608 unlimited bytes
+Max core file size unlimited unlimited bytes
+Max resident set unlimited unlimited bytes
+Max processes 1024 1024 processes
+Max locked memory 65536 65536 bytes
+Max address space unlimited unlimited bytes
+Max file locks unlimited unlimited locks
+Max pending signals 4622 4622 signals
+Max msgqueue size 819200 819200 bytes
+Max nice priority 0 0
+Max realtime priority 0 0
+Max realtime timeout unlimited unlimited us
diff --git a/pkg/serverless/proc/testData/file-descriptor/invalid_missing/9/limits b/pkg/serverless/proc/testData/file-descriptor/invalid_missing/9/limits
new file mode 100644
index 0000000000000..17e615740c934
--- /dev/null
+++ b/pkg/serverless/proc/testData/file-descriptor/invalid_missing/9/limits
@@ -0,0 +1,16 @@
+Limit Soft Limit Hard Limit Units
+Max cpu time unlimited unlimited seconds
+Max file size unlimited unlimited bytes
+Max data size unlimited unlimited bytes
+Max stack size 8388608 unlimited bytes
+Max core file size unlimited unlimited bytes
+Max resident set unlimited unlimited bytes
+Max processes 1024 1024 processes
+Max locked memory 65536 65536 bytes
+Max address space unlimited unlimited bytes
+Max file locks unlimited unlimited locks
+Max pending signals 4622 4622 signals
+Max msgqueue size 819200 819200 bytes
+Max nice priority 0 0
+Max realtime priority 0 0
+Max realtime timeout unlimited unlimited us
diff --git a/pkg/serverless/proc/testData/file-descriptor/valid/31/fd/1 b/pkg/serverless/proc/testData/file-descriptor/valid/31/fd/1
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/pkg/serverless/proc/testData/file-descriptor/valid/31/fd/2 b/pkg/serverless/proc/testData/file-descriptor/valid/31/fd/2
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/pkg/serverless/proc/testData/file-descriptor/valid/31/limits b/pkg/serverless/proc/testData/file-descriptor/valid/31/limits
new file mode 100644
index 0000000000000..664f04c884fad
--- /dev/null
+++ b/pkg/serverless/proc/testData/file-descriptor/valid/31/limits
@@ -0,0 +1,17 @@
+Limit Soft Limit Hard Limit Units
+Max cpu time unlimited unlimited seconds
+Max file size unlimited unlimited bytes
+Max data size unlimited unlimited bytes
+Max stack size 8388608 unlimited bytes
+Max core file size unlimited unlimited bytes
+Max resident set unlimited unlimited bytes
+Max processes 1024 1024 processes
+Max open files 1024 1024 files
+Max locked memory 65536 65536 bytes
+Max address space unlimited unlimited bytes
+Max file locks unlimited unlimited locks
+Max pending signals 4622 4622 signals
+Max msgqueue size 819200 819200 bytes
+Max nice priority 0 0
+Max realtime priority 0 0
+Max realtime timeout unlimited unlimited us
diff --git a/pkg/serverless/proc/testData/file-descriptor/valid/9/fd/1 b/pkg/serverless/proc/testData/file-descriptor/valid/9/fd/1
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/pkg/serverless/proc/testData/file-descriptor/valid/9/fd/2 b/pkg/serverless/proc/testData/file-descriptor/valid/9/fd/2
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/pkg/serverless/proc/testData/file-descriptor/valid/9/fd/3 b/pkg/serverless/proc/testData/file-descriptor/valid/9/fd/3
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/pkg/serverless/proc/testData/file-descriptor/valid/9/limits b/pkg/serverless/proc/testData/file-descriptor/valid/9/limits
new file mode 100644
index 0000000000000..664f04c884fad
--- /dev/null
+++ b/pkg/serverless/proc/testData/file-descriptor/valid/9/limits
@@ -0,0 +1,17 @@
+Limit Soft Limit Hard Limit Units
+Max cpu time unlimited unlimited seconds
+Max file size unlimited unlimited bytes
+Max data size unlimited unlimited bytes
+Max stack size 8388608 unlimited bytes
+Max core file size unlimited unlimited bytes
+Max resident set unlimited unlimited bytes
+Max processes 1024 1024 processes
+Max open files 1024 1024 files
+Max locked memory 65536 65536 bytes
+Max address space unlimited unlimited bytes
+Max file locks unlimited unlimited locks
+Max pending signals 4622 4622 signals
+Max msgqueue size 819200 819200 bytes
+Max nice priority 0 0
+Max realtime priority 0 0
+Max realtime timeout unlimited unlimited us
diff --git a/pkg/serverless/serverless.go b/pkg/serverless/serverless.go
index 992ca51d55bda..2528e71d81e7a 100644
--- a/pkg/serverless/serverless.go
+++ b/pkg/serverless/serverless.go
@@ -161,6 +161,8 @@ func callInvocationHandler(daemon *daemon.Daemon, arn string, deadlineMs int64,
cpuOffsetData, cpuOffsetErr := proc.GetCPUData()
uptimeOffset, uptimeOffsetErr := proc.GetUptime()
networkOffsetData, networkOffsetErr := proc.GetNetworkData()
+ sendFdMetrics := make(chan bool)
+ go metrics.SendFdEnhancedMetrics(sendFdMetrics, daemon.ExtraTags.Tags, daemon.MetricAgent)
sendTmpMetrics := make(chan bool)
go metrics.SendTmpEnhancedMetrics(sendTmpMetrics, daemon.ExtraTags.Tags, daemon.MetricAgent)
@@ -179,16 +181,17 @@ func callInvocationHandler(daemon *daemon.Daemon, arn string, deadlineMs int64,
case <-doneChannel:
break
}
- sendSystemEnhancedMetrics(daemon, cpuOffsetErr == nil && uptimeOffsetErr == nil, networkOffsetErr == nil, uptimeOffset, cpuOffsetData, networkOffsetData, sendTmpMetrics)
+ sendSystemEnhancedMetrics(daemon, cpuOffsetErr == nil && uptimeOffsetErr == nil, networkOffsetErr == nil, uptimeOffset, cpuOffsetData, networkOffsetData, sendTmpMetrics, sendFdMetrics)
}
-func sendSystemEnhancedMetrics(daemon *daemon.Daemon, emitCPUMetrics, emitNetworkMetrics bool, uptimeOffset float64, cpuOffsetData *proc.CPUData, networkOffsetData *proc.NetworkData, sendTmpMetrics chan bool) {
+func sendSystemEnhancedMetrics(daemon *daemon.Daemon, emitCPUMetrics, emitNetworkMetrics bool, uptimeOffset float64, cpuOffsetData *proc.CPUData, networkOffsetData *proc.NetworkData, sendTmpMetrics chan bool, sendFdMetrics chan bool) {
if daemon.MetricAgent == nil {
log.Debug("Could not send system enhanced metrics")
return
}
close(sendTmpMetrics)
+ close(sendFdMetrics)
if emitCPUMetrics {
metrics.SendCPUEnhancedMetrics(cpuOffsetData, uptimeOffset, daemon.ExtraTags.Tags, daemon.MetricAgent.Demux)
diff --git a/pkg/trace/api/api.go b/pkg/trace/api/api.go
index 816a14eef938d..7062efc03e307 100644
--- a/pkg/trace/api/api.go
+++ b/pkg/trace/api/api.go
@@ -279,17 +279,18 @@ func (r *HTTPReceiver) Start() {
if _, err := os.Stat(filepath.Dir(path)); !os.IsNotExist(err) {
ln, err := r.listenUnix(path)
if err != nil {
+ log.Errorf("Error creating UDS listener: %v", err)
r.telemetryCollector.SendStartupError(telemetry.CantStartUdsServer, err)
- killProcess("Error creating UDS listener: %v", err)
+ } else {
+ go func() {
+ defer watchdog.LogOnPanic(r.statsd)
+ if err := r.server.Serve(ln); err != nil && err != http.ErrServerClosed {
+ log.Errorf("Could not start UDS server: %v. UDS receiver disabled.", err)
+ r.telemetryCollector.SendStartupError(telemetry.CantStartUdsServer, err)
+ }
+ }()
+ log.Infof("Listening for traces at unix://%s", path)
}
- go func() {
- defer watchdog.LogOnPanic(r.statsd)
- if err := r.server.Serve(ln); err != nil && err != http.ErrServerClosed {
- log.Errorf("Could not start UDS server: %v. UDS receiver disabled.", err)
- r.telemetryCollector.SendStartupError(telemetry.CantStartUdsServer, err)
- }
- }()
- log.Infof("Listening for traces at unix://%s", path)
} else {
log.Errorf("Could not start UDS listener: socket directory does not exist: %s", path)
}
diff --git a/pkg/trace/api/api_nix_test.go b/pkg/trace/api/api_nix_test.go
index 8c871ad875f9d..fbefae776493e 100644
--- a/pkg/trace/api/api_nix_test.go
+++ b/pkg/trace/api/api_nix_test.go
@@ -13,6 +13,7 @@ import (
"fmt"
"net"
"net/http"
+ "os"
"path/filepath"
"testing"
"time"
@@ -83,6 +84,21 @@ func TestUDS(t *testing.T) {
t.Fatalf("expected http.StatusOK, got response: %#v", resp)
}
})
+
+ t.Run("uds_permission_err", func(t *testing.T) {
+ dir := t.TempDir()
+ err := os.Chmod(dir, 0444) // read-only
+ assert.NoError(t, err)
+
+ conf := config.New()
+ conf.Endpoints[0].APIKey = "apikey_2"
+ conf.ReceiverSocket = filepath.Join(dir, "apm.socket")
+
+ r := newTestReceiverFromConfig(conf)
+ // should not crash
+ r.Start()
+ r.Stop()
+ })
}
func TestHTTPReceiverStart(t *testing.T) {
diff --git a/pkg/trace/go.mod b/pkg/trace/go.mod
index 512debc4740e6..a16cdf5d8f6a9 100644
--- a/pkg/trace/go.mod
+++ b/pkg/trace/go.mod
@@ -53,7 +53,7 @@ require (
)
require (
- github.com/DataDog/go-sqllexer v0.0.13 // indirect
+ github.com/DataDog/go-sqllexer v0.0.14 // indirect
github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect
github.com/DataDog/zstd v1.5.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
diff --git a/pkg/trace/go.sum b/pkg/trace/go.sum
index 9ae6fe28770bc..0acbab963039c 100644
--- a/pkg/trace/go.sum
+++ b/pkg/trace/go.sum
@@ -2,8 +2,8 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU=
github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw=
-github.com/DataDog/go-sqllexer v0.0.13 h1:9mKfe+3s73GI/7dWBxi2Ds7+xZynJqMKK9cIUBrutak=
-github.com/DataDog/go-sqllexer v0.0.13/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc=
+github.com/DataDog/go-sqllexer v0.0.14 h1:xUQh2tLr/95LGxDzLmttLgTo/1gzFeOyuwrQa/Iig4Q=
+github.com/DataDog/go-sqllexer v0.0.14/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc=
github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4=
github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0=
github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.14.0 h1:10TPqpTlIkmDPFWVIEZ4ZX3rWrCrx3rEoeoAooZr6LM=
diff --git a/pkg/trace/stats/oteltest/go.mod b/pkg/trace/stats/oteltest/go.mod
index 7ba2f55d9b746..e3466f3cda4f7 100644
--- a/pkg/trace/stats/oteltest/go.mod
+++ b/pkg/trace/stats/oteltest/go.mod
@@ -26,7 +26,7 @@ require (
github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 // indirect
github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect
github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect
- github.com/DataDog/go-sqllexer v0.0.13 // indirect
+ github.com/DataDog/go-sqllexer v0.0.14 // indirect
github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect
github.com/DataDog/sketches-go v1.4.2 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
diff --git a/pkg/trace/stats/oteltest/go.sum b/pkg/trace/stats/oteltest/go.sum
index dcf72c47c645c..a928af42bb225 100644
--- a/pkg/trace/stats/oteltest/go.sum
+++ b/pkg/trace/stats/oteltest/go.sum
@@ -1,7 +1,7 @@
github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU=
github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw=
-github.com/DataDog/go-sqllexer v0.0.13 h1:9mKfe+3s73GI/7dWBxi2Ds7+xZynJqMKK9cIUBrutak=
-github.com/DataDog/go-sqllexer v0.0.13/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc=
+github.com/DataDog/go-sqllexer v0.0.14 h1:xUQh2tLr/95LGxDzLmttLgTo/1gzFeOyuwrQa/Iig4Q=
+github.com/DataDog/go-sqllexer v0.0.14/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc=
github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4=
github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0=
github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.16.1 h1:ZI8u3CgdMXpDplrf9/gIr13+/g/tUzUcBMk2ZhXgzLE=
diff --git a/pkg/trace/writer/trace.go b/pkg/trace/writer/trace.go
index 2a7b5ab6b9314..d2879dc7f0c2b 100644
--- a/pkg/trace/writer/trace.go
+++ b/pkg/trace/writer/trace.go
@@ -294,14 +294,14 @@ func (w *TraceWriter) serialize(pl *pb.AgentPayload) {
if err != nil {
// it will never happen, unless an invalid compression is chosen;
// we know gzip.BestSpeed is valid.
- log.Errorf("Failed to initialize gzip writer. No traces can be sent: %v", err)
+ log.Errorf("Failed to initialize %s writer. No traces can be sent: %v", w.compressor.Encoding(), err)
return
}
if _, err := writer.Write(b); err != nil {
- log.Errorf("Error gzipping trace payload: %v", err)
+ log.Errorf("Error %s trace payload: %v", w.compressor.Encoding(), err)
}
if err := writer.Close(); err != nil {
- log.Errorf("Error closing gzip stream when writing trace payload: %v", err)
+ log.Errorf("Error closing %s stream when writing trace payload: %v", w.compressor.Encoding(), err)
}
sendPayloads(w.senders, p, w.syncMode)
diff --git a/pkg/util/cloudproviders/network.go b/pkg/util/cloudproviders/network.go
index 12c7496579c3f..c183ea96ce06f 100644
--- a/pkg/util/cloudproviders/network.go
+++ b/pkg/util/cloudproviders/network.go
@@ -30,7 +30,7 @@ func GetNetworkID(ctx context.Context) (string, error) {
return cache.Get[string](
networkIDCacheKey,
func() (string, error) {
- // the the id from configuration
+ // the id from configuration
if networkID := config.Datadog().GetString("network.id"); networkID != "" {
log.Debugf("GetNetworkID: using configured network ID: %s", networkID)
return networkID, nil
diff --git a/pkg/util/ec2/ec2_test.go b/pkg/util/ec2/ec2_test.go
index d92242dada46f..cbb660df50c8a 100644
--- a/pkg/util/ec2/ec2_test.go
+++ b/pkg/util/ec2/ec2_test.go
@@ -30,6 +30,8 @@ var (
initialTokenURL = tokenURL
)
+const testIMDSToken = "AQAAAFKw7LyqwVmmBMkqXHpDBuDWw2GnfGswTHi2yiIOGvzD7OMaWw=="
+
func resetPackageVars() {
config.Datadog().SetWithoutSource("ec2_metadata_timeout", initialTimeout)
metadataURL = initialMetadataURL
@@ -301,12 +303,11 @@ func TestExtractClusterName(t *testing.T) {
func TestGetToken(t *testing.T) {
ctx := context.Background()
- originalToken := "AQAAAFKw7LyqwVmmBMkqXHpDBuDWw2GnfGswTHi2yiIOGvzD7OMaWw=="
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain")
h := r.Header.Get("X-aws-ec2-metadata-token-ttl-seconds")
if h != "" && r.Method == http.MethodPut {
- io.WriteString(w, originalToken)
+ io.WriteString(w, testIMDSToken)
} else {
w.WriteHeader(http.StatusNotFound)
}
@@ -319,7 +320,7 @@ func TestGetToken(t *testing.T) {
token, err := token.Get(ctx)
require.NoError(t, err)
- assert.Equal(t, originalToken, token)
+ assert.Equal(t, testIMDSToken, token)
}
func TestMetedataRequestWithToken(t *testing.T) {
@@ -331,7 +332,6 @@ func TestMetedataRequestWithToken(t *testing.T) {
ctx := context.Background()
ipv4 := "198.51.100.1"
- tok := "AQAAAFKw7LyqwVmmBMkqXHpDBuDWw2GnfGswTHi2yiIOGvzD7OMaWw=="
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain")
@@ -345,11 +345,11 @@ func TestMetedataRequestWithToken(t *testing.T) {
r.Header.Add("X-sequence", fmt.Sprintf("%v", seq))
seq++
requestForToken = r
- io.WriteString(w, tok)
+ io.WriteString(w, testIMDSToken)
case http.MethodGet:
// Should be a metadata request
t := r.Header.Get("X-aws-ec2-metadata-token")
- if t != tok {
+ if t != testIMDSToken {
r.Header.Add("X-sequence", fmt.Sprintf("%v", seq))
seq++
requestWithoutToken = r
@@ -386,7 +386,7 @@ func TestMetedataRequestWithToken(t *testing.T) {
assert.Equal(t, fmt.Sprint(config.Datadog().GetInt("ec2_metadata_token_lifetime")), requestForToken.Header.Get("X-aws-ec2-metadata-token-ttl-seconds"))
assert.Equal(t, http.MethodPut, requestForToken.Method)
assert.Equal(t, "/", requestForToken.RequestURI)
- assert.Equal(t, tok, requestWithToken.Header.Get("X-aws-ec2-metadata-token"))
+ assert.Equal(t, testIMDSToken, requestWithToken.Header.Get("X-aws-ec2-metadata-token"))
assert.Equal(t, "/public-ipv4", requestWithToken.RequestURI)
assert.Equal(t, http.MethodGet, requestWithToken.Method)
@@ -515,7 +515,7 @@ func TestMetadataSourceIMDS(t *testing.T) {
w.Header().Set("Content-Type", "text/plain")
switch r.Method {
case http.MethodPut: // token request
- io.WriteString(w, "AQAAAFKw7LyqwVmmBMkqXHpDBuDWw2GnfGswTHi2yiIOGvzD7OMaWw==")
+ io.WriteString(w, testIMDSToken)
case http.MethodGet: // metadata request
switch r.RequestURI {
case "/hostname":
diff --git a/pkg/util/ec2/imds_helpers.go b/pkg/util/ec2/imds_helpers.go
index 510fad39f43c4..afc2ef22fffbd 100644
--- a/pkg/util/ec2/imds_helpers.go
+++ b/pkg/util/ec2/imds_helpers.go
@@ -77,7 +77,7 @@ func doHTTPRequest(ctx context.Context, url string, forceIMDSv2 bool) (string, e
tokenValue, err := token.Get(ctx)
if err != nil {
if forceIMDSv2 {
- return "", fmt.Errorf("Could not fetch token from IMDSv2")
+ return "", fmt.Errorf("could not fetch token from IMDSv2")
}
log.Warnf("ec2_prefer_imdsv2 is set to true in the configuration but the agent was unable to proceed: %s", err)
} else {
diff --git a/pkg/util/ec2/network.go b/pkg/util/ec2/network.go
index 5fafa6bed62d7..a7fa4730513a7 100644
--- a/pkg/util/ec2/network.go
+++ b/pkg/util/ec2/network.go
@@ -30,9 +30,9 @@ func GetPublicIPv4(ctx context.Context) (string, error) {
var networkIDFetcher = cachedfetch.Fetcher{
Name: "VPC IDs",
Attempt: func(ctx context.Context) (interface{}, error) {
- resp, err := getMetadataItem(ctx, imdsNetworkMacs, false)
+ resp, err := getMetadataItem(ctx, imdsNetworkMacs, true)
if err != nil {
- return "", err
+ return "", fmt.Errorf("EC2: GetNetworkID failed to get mac addresses: %w", err)
}
macs := strings.Split(strings.TrimSpace(resp), "\n")
@@ -43,9 +43,9 @@ var networkIDFetcher = cachedfetch.Fetcher{
continue
}
mac = strings.TrimSuffix(mac, "/")
- id, err := getMetadataItem(ctx, fmt.Sprintf("%s/%s/vpc-id", imdsNetworkMacs, mac), false)
+ id, err := getMetadataItem(ctx, fmt.Sprintf("%s/%s/vpc-id", imdsNetworkMacs, mac), true)
if err != nil {
- return "", err
+ return "", fmt.Errorf("EC2: GetNetworkID failed to get vpc id for mac %s: %w", mac, err)
}
vpcIDs.Add(id)
}
diff --git a/pkg/util/ec2/network_test.go b/pkg/util/ec2/network_test.go
index 7fa773b41b888..1e4ca0bc36b42 100644
--- a/pkg/util/ec2/network_test.go
+++ b/pkg/util/ec2/network_test.go
@@ -23,11 +23,16 @@ func TestGetPublicIPv4(t *testing.T) {
ip := "10.0.0.2"
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain")
- switch r.RequestURI {
- case "/public-ipv4":
- io.WriteString(w, ip)
- default:
- w.WriteHeader(http.StatusNotFound)
+ switch r.Method {
+ case http.MethodPut: // token request
+ io.WriteString(w, testIMDSToken)
+ case http.MethodGet: // metadata request
+ switch r.RequestURI {
+ case "/public-ipv4":
+ io.WriteString(w, ip)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
}
}))
@@ -47,18 +52,24 @@ func TestGetNetworkID(t *testing.T) {
vpc := "vpc-12345"
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain")
- switch r.RequestURI {
- case "/network/interfaces/macs":
- io.WriteString(w, mac+"/")
- case "/network/interfaces/macs/00:00:00:00:00/vpc-id":
- io.WriteString(w, vpc)
- default:
- w.WriteHeader(http.StatusNotFound)
+ switch r.Method {
+ case http.MethodPut: // token request
+ io.WriteString(w, testIMDSToken)
+ case http.MethodGet: // metadata request
+ switch r.RequestURI {
+ case "/network/interfaces/macs":
+ io.WriteString(w, mac+"/")
+ case "/network/interfaces/macs/00:00:00:00:00/vpc-id":
+ io.WriteString(w, vpc)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
}
}))
defer ts.Close()
metadataURL = ts.URL
+ tokenURL = ts.URL
config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000)
defer resetPackageVars()
@@ -69,18 +80,25 @@ func TestGetNetworkID(t *testing.T) {
func TestGetInstanceIDNoMac(t *testing.T) {
ctx := context.Background()
- ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
- io.WriteString(w, "")
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "text/plain")
+ switch r.Method {
+ case http.MethodPut: // token request
+ io.WriteString(w, testIMDSToken)
+ case http.MethodGet: // metadata request
+ io.WriteString(w, "")
+ }
}))
defer ts.Close()
metadataURL = ts.URL
+ tokenURL = ts.URL
config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000)
defer resetPackageVars()
_, err := GetNetworkID(ctx)
require.Error(t, err)
- assert.Contains(t, err.Error(), "no mac addresses returned")
+ assert.Contains(t, err.Error(), "EC2: GetNetworkID no mac addresses returned")
}
func TestGetInstanceIDMultipleVPC(t *testing.T) {
@@ -91,21 +109,27 @@ func TestGetInstanceIDMultipleVPC(t *testing.T) {
vpc2 := "vpc-6789"
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain")
- switch r.RequestURI {
- case "/network/interfaces/macs":
- io.WriteString(w, mac+"/\n")
- io.WriteString(w, mac2+"/\n")
- case "/network/interfaces/macs/00:00:00:00:00/vpc-id":
- io.WriteString(w, vpc)
- case "/network/interfaces/macs/00:00:00:00:01/vpc-id":
- io.WriteString(w, vpc2)
- default:
- w.WriteHeader(http.StatusNotFound)
+ switch r.Method {
+ case http.MethodPut: // token request
+ io.WriteString(w, testIMDSToken)
+ case http.MethodGet: // metadata request
+ switch r.RequestURI {
+ case "/network/interfaces/macs":
+ io.WriteString(w, mac+"/\n")
+ io.WriteString(w, mac2+"/\n")
+ case "/network/interfaces/macs/00:00:00:00:00/vpc-id":
+ io.WriteString(w, vpc)
+ case "/network/interfaces/macs/00:00:00:00:01/vpc-id":
+ io.WriteString(w, vpc2)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
}
}))
defer ts.Close()
metadataURL = ts.URL
+ tokenURL = ts.URL
config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000)
defer resetPackageVars()
diff --git a/pkg/util/fxutil/logging.go b/pkg/util/fxutil/logging.go
deleted file mode 100644
index 0b17166529ae4..0000000000000
--- a/pkg/util/fxutil/logging.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed
-// under the Apache License Version 2.0.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016-present Datadog, Inc.
-
-package fxutil
-
-import (
- "os"
-
- "go.uber.org/fx"
- "go.uber.org/fx/fxevent"
-)
-
-// FxLoggingOption creates an fx.Option to configure the Fx logger, either to do nothing
-// (the default) or to log to the console (when TRACE_FX is set).
-func FxLoggingOption() fx.Option {
- return fx.WithLogger(
- func() fxevent.Logger {
- if os.Getenv("TRACE_FX") == "" {
- return fxevent.NopLogger
- }
- return &fxevent.ConsoleLogger{W: os.Stderr}
- },
- )
-}
diff --git a/pkg/util/fxutil/logging/logging.go b/pkg/util/fxutil/logging/logging.go
new file mode 100644
index 0000000000000..8cb34a694ae11
--- /dev/null
+++ b/pkg/util/fxutil/logging/logging.go
@@ -0,0 +1,61 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package logging provides a logger that logs fx events.
+package logging
+
+import (
+ "go.uber.org/fx"
+ "go.uber.org/fx/fxevent"
+)
+
+// Logger is a logger that logs fx events.
+type Logger interface {
+ Debug(v ...interface{})
+}
+
+type fxEventLogger struct {
+ logger func(v ...interface{})
+}
+
+// NewFxEventLoggerOption returns an fx option that provides a fxEventLogger.
+// Generic is used in order to not depends on the logger package.
+func NewFxEventLoggerOption[T Logger]() fx.Option {
+ // Note: The pointer in *T is needed for `optional:"true"`
+ return fx.Provide(func(logger *T) *fxEventLogger {
+ if logger == nil {
+ return nil
+ }
+ return &fxEventLogger{logger: (*logger).Debug}
+ })
+}
+
+// Write writes the given bytes to the logger.
+func (l *fxEventLogger) Write(p []byte) (n int, err error) {
+ l.logger(string(p))
+ return len(p), nil
+}
+
+type loggingParams struct {
+ fx.In
+
+ // Note to the reader: Don't use `optional:"true"` except if you truly understand how does it work
+ // See https://github.com/uber-go/fx/issues/613. It should ideally use only for logging and debuggin purpose.
+ FxEventLogging *fxEventLogger `optional:"true"`
+}
+
+// FxLoggingOption returns an fx.Option that provides a logger that logs fx events.
+// If fxEventLogger is provided, it will be used, otherwise nothing is logged.
+// Typically, this logs fx events when log_level is debug or above.
+func FxLoggingOption() fx.Option {
+ return fx.WithLogger(
+ func(params loggingParams) fxevent.Logger {
+ if params.FxEventLogging != nil {
+ return &fxevent.ConsoleLogger{W: params.FxEventLogging}
+ }
+ return fxevent.NopLogger
+ },
+ )
+}
diff --git a/pkg/util/fxutil/oneshot.go b/pkg/util/fxutil/oneshot.go
index f970f13efd4d0..fa602836c050a 100644
--- a/pkg/util/fxutil/oneshot.go
+++ b/pkg/util/fxutil/oneshot.go
@@ -33,7 +33,7 @@ func OneShot(oneShotFunc interface{}, opts ...fx.Option) error {
opts = append(opts,
delayedCall.option(),
- FxAgentBase(),
+ FxAgentBase(true),
)
// Temporarily increase timeout for all fxutil.OneShot calls until we can better characterize our
// start time requirements. Prepend to opts so individual calls can override the timeout.
diff --git a/pkg/util/fxutil/provide_comp.go b/pkg/util/fxutil/provide_comp.go
index e483a4df90666..733db928b35d9 100644
--- a/pkg/util/fxutil/provide_comp.go
+++ b/pkg/util/fxutil/provide_comp.go
@@ -14,6 +14,7 @@ import (
"unicode/utf8"
compdef "github.com/DataDog/datadog-agent/comp/def"
+ "github.com/DataDog/datadog-agent/pkg/util/fxutil/logging"
"go.uber.org/fx"
)
@@ -301,12 +302,13 @@ func coerceStructTo(input reflect.Value, outType reflect.Type, oldEmbed, newEmbe
}
// FxAgentBase returns all of our adapters from compdef types to fx types
-func FxAgentBase() fx.Option {
- return fx.Options(
- FxLoggingOption(),
- fx.Provide(newFxLifecycleAdapter),
- fx.Provide(newFxShutdownerAdapter),
- )
+func FxAgentBase(logFxEvents bool) fx.Option {
+ options := []fx.Option{fx.Provide(newFxLifecycleAdapter),
+ fx.Provide(newFxShutdownerAdapter)}
+ if logFxEvents {
+ options = append(options, logging.FxLoggingOption())
+ }
+ return fx.Options(options...)
}
// Lifecycle is a compdef interface compatible with fx.Lifecycle, to provide start/stop hooks
diff --git a/pkg/util/fxutil/run.go b/pkg/util/fxutil/run.go
index d094927ee7e22..9fb86b3021c92 100644
--- a/pkg/util/fxutil/run.go
+++ b/pkg/util/fxutil/run.go
@@ -21,7 +21,7 @@ func Run(opts ...fx.Option) error {
return fxAppTestOverride(func() {}, opts)
}
- opts = append(opts, FxAgentBase())
+ opts = append(opts, FxAgentBase(true))
// Temporarily increase timeout for all fxutil.Run calls until we can better characterize our
// start time requirements. Prepend to opts so individual calls can override the timeout.
opts = append(
diff --git a/pkg/util/fxutil/test.go b/pkg/util/fxutil/test.go
index 4ba94db3faeed..76bffc7711d39 100644
--- a/pkg/util/fxutil/test.go
+++ b/pkg/util/fxutil/test.go
@@ -35,7 +35,7 @@ func Test[T any](t testing.TB, opts ...fx.Option) T {
app := fxtest.New(
t,
- FxAgentBase(),
+ FxAgentBase(false),
fx.Supply(fx.Annotate(t, fx.As(new(testing.TB)))),
delayed.option(),
fx.Options(opts...),
@@ -64,7 +64,7 @@ func TestApp[T any](opts ...fx.Option) (*fx.App, T, error) {
})
app := fx.New(
- FxAgentBase(),
+ FxAgentBase(false),
delayed.option(),
fx.Options(opts...),
)
@@ -97,7 +97,7 @@ type appAssertFn func(testing.TB, *fx.App)
func TestStart(t testing.TB, opts fx.Option, appAssert appAssertFn, fn interface{}) {
delayed := newDelayedFxInvocation(fn)
app := fx.New(
- FxAgentBase(),
+ FxAgentBase(false),
fx.Supply(fx.Annotate(t, fx.As(new(testing.TB)))),
delayed.option(),
opts,
@@ -116,7 +116,7 @@ func TestRun(t *testing.T, f func() error) {
var fxFakeAppRan bool
fxAppTestOverride = func(_ interface{}, opts []fx.Option) error {
fxFakeAppRan = true
- opts = append(opts, FxAgentBase())
+ opts = append(opts, FxAgentBase(false))
require.NoError(t, fx.ValidateApp(opts...))
return nil
}
@@ -163,13 +163,13 @@ func TestOneShotSubcommand(
require.NoError(t,
fx.ValidateApp(
append(opts,
- FxAgentBase(),
+ FxAgentBase(false),
fx.Invoke(oneShotFunc))...))
// build an app without the oneShotFunc, and with verifyFn
app := fxtest.New(t,
append(opts,
- FxAgentBase(),
+ FxAgentBase(false),
fx.Supply(fx.Annotate(t, fx.As(new(testing.TB)))),
fx.Invoke(verifyFn))...)
defer app.RequireStart().RequireStop()
@@ -201,7 +201,7 @@ func TestOneShot(t *testing.T, fct func()) {
require.NoError(t,
fx.ValidateApp(
append(opts,
- FxAgentBase(),
+ FxAgentBase(false),
fx.Invoke(oneShotFunc))...))
return nil
}
@@ -234,7 +234,7 @@ func TestBundle(t *testing.T, bundle BundleOptions, extraOptions ...fx.Option) {
invoke,
bundle,
fx.Options(extraOptions...),
- FxAgentBase(),
+ FxAgentBase(false),
fx.Supply(fx.Annotate(t, fx.As(new(testing.TB)))),
))
}
diff --git a/pkg/util/kubernetes/helpers.go b/pkg/util/kubernetes/helpers.go
index 66a6ef5fbce07..142e62e562fea 100644
--- a/pkg/util/kubernetes/helpers.go
+++ b/pkg/util/kubernetes/helpers.go
@@ -20,23 +20,23 @@ const Digits = "1234567890"
// ParseDeploymentForReplicaSet gets the deployment name from a replicaset,
// or returns an empty string if no parent deployment is found.
func ParseDeploymentForReplicaSet(name string) string {
- lastDash := strings.LastIndexByte(name, '-')
- if lastDash == -1 {
- // No dash
- return ""
- }
- suffix := name[lastDash+1:]
- if len(suffix) < 3 {
- // Suffix is variable length but we cutoff at 3+ characters
- return ""
- }
+ return removeKubernetesNameSuffix(name)
+}
- if !stringInRuneset(suffix, Digits) && !stringInRuneset(suffix, KubeAllowedEncodeStringAlphaNums) {
- // Invalid suffix
+// ParseDeploymentForPodName gets the deployment name from a pod name,
+// or returns an empty string if no parent deployment is found.
+func ParseDeploymentForPodName(name string) string {
+ replicaSet := removeKubernetesNameSuffix(name)
+ if replicaSet == "" {
return ""
}
+ return ParseDeploymentForReplicaSet(replicaSet)
+}
- return name[:lastDash]
+// ParseReplicaSetForPodName gets the replica set name from a pod name,
+// or returns an empty string if no parent replica set is found.
+func ParseReplicaSetForPodName(name string) string {
+ return removeKubernetesNameSuffix(name)
}
// ParseCronJobForJob gets the cronjob name from a job,
@@ -79,3 +79,25 @@ func stringInRuneset(name, subset string) bool {
}
return true
}
+
+// removeKubernetesNameSuffix removes the suffix from a kubernetes name
+// or returns an empty string if either the suffix or name are invalid.
+func removeKubernetesNameSuffix(name string) string {
+ lastDash := strings.LastIndexByte(name, '-')
+ if lastDash == -1 {
+ // No dash
+ return ""
+ }
+ suffix := name[lastDash+1:]
+ if len(suffix) < 3 {
+ // Suffix is variable length but we cutoff at 3+ characters
+ return ""
+ }
+
+ if !stringInRuneset(suffix, Digits) && !stringInRuneset(suffix, KubeAllowedEncodeStringAlphaNums) {
+ // Invalid suffix
+ return ""
+ }
+
+ return name[:lastDash]
+}
diff --git a/pkg/util/kubernetes/helpers_test.go b/pkg/util/kubernetes/helpers_test.go
index b3bc300c953fd..899ba603a2c0e 100644
--- a/pkg/util/kubernetes/helpers_test.go
+++ b/pkg/util/kubernetes/helpers_test.go
@@ -40,6 +40,62 @@ func TestParseDeploymentForReplicaSet(t *testing.T) {
}
}
+func TestParseDeploymentForPodName(t *testing.T) {
+ for in, out := range map[string]string{
+ // Nominal 1.6 cases
+ "frontend-2891696001-51234": "frontend",
+ "front-end-2891696001-72346": "front-end",
+
+ // Non-deployment 1.6 cases
+ "frontend2891696001-31-": "",
+ "-frontend2891696001-21": "",
+ "manually-created": "",
+
+ // 1.8+ nominal cases
+ "frontend-56c89cfff7-tsdww": "frontend",
+ "frontend-56c-p2q": "frontend",
+ "frontend-56c89cff-qhxl8": "frontend",
+ "frontend-56c89cfff7c2-g9lmb": "frontend",
+ "front-end-768dd754b7-ptdcc": "front-end",
+
+ // 1.8+ non-deployment cases
+ "frontend-56c89cff-bx": "", // too short
+ "frontend-56a89cfff7-a": "", // no vowels allowed
+ } {
+ t.Run(fmt.Sprintf("case: %s", in), func(t *testing.T) {
+ assert.Equal(t, out, ParseDeploymentForPodName(in))
+ })
+ }
+}
+
+func TestParseReplicaSetForPodName(t *testing.T) {
+ for in, out := range map[string]string{
+ // Nominal 1.6 cases
+ "frontend-2891696001-51234": "frontend-2891696001",
+ "front-end-2891696001-72346": "front-end-2891696001",
+
+ // Non-replica-set 1.6 cases
+ "frontend2891696001-31-": "",
+ "-frontend2891696001-21": "",
+ "manually-created": "",
+
+ // 1.8+ nominal cases
+ "frontend-56c89cfff7-tsdww": "frontend-56c89cfff7",
+ "frontend-56c-p2q": "frontend-56c",
+ "frontend-56c89cff-qhxl8": "frontend-56c89cff",
+ "frontend-56c89cfff7c2-g9lmb": "frontend-56c89cfff7c2",
+ "front-end-768dd754b7-ptdcc": "front-end-768dd754b7",
+
+ // 1.8+ non-replica-set cases
+ "frontend-56c89cff-bx": "", // too short
+ "frontend-56a89cfff7-a": "", // no vowels allowed
+ } {
+ t.Run(fmt.Sprintf("case: %s", in), func(t *testing.T) {
+ assert.Equal(t, out, ParseReplicaSetForPodName(in))
+ })
+ }
+}
+
func TestParseCronJobForJob(t *testing.T) {
for in, out := range map[string]struct {
string
diff --git a/pkg/util/tagger/go.mod b/pkg/util/tagger/go.mod
new file mode 100644
index 0000000000000..0d482a50ec691
--- /dev/null
+++ b/pkg/util/tagger/go.mod
@@ -0,0 +1,78 @@
+module github.com/DataDog/datadog-agent/pkg/util/tagger
+
+go 1.22.0
+
+replace (
+ github.com/DataDog/datadog-agent/comp/api/api/def => ../../../comp/api/api/def
+ github.com/DataDog/datadog-agent/comp/core/flare/builder => ../../../comp/core/flare/builder
+ github.com/DataDog/datadog-agent/comp/core/flare/types => ../../../comp/core/flare/types
+ github.com/DataDog/datadog-agent/comp/core/secrets => ../../../comp/core/secrets
+ github.com/DataDog/datadog-agent/comp/core/telemetry => ../../../comp/core/telemetry
+ github.com/DataDog/datadog-agent/comp/def => ../../../comp/def
+ github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../../collector/check/defaults
+ github.com/DataDog/datadog-agent/pkg/config/env => ../../config/env
+ github.com/DataDog/datadog-agent/pkg/config/model => ../../config/model
+ github.com/DataDog/datadog-agent/pkg/config/setup => ../../config/setup
+ github.com/DataDog/datadog-agent/pkg/util/executable => ../../util/executable
+ github.com/DataDog/datadog-agent/pkg/util/filesystem => ../../util/filesystem
+ github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../util/fxutil
+ github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../util/hostname/validate
+ github.com/DataDog/datadog-agent/pkg/util/log => ../../util/log
+ github.com/DataDog/datadog-agent/pkg/util/optional => ../../util/optional
+ github.com/DataDog/datadog-agent/pkg/util/pointer => ../../util/pointer
+ github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../util/scrubber
+ github.com/DataDog/datadog-agent/pkg/util/system => ../../util/system
+ github.com/DataDog/datadog-agent/pkg/util/system/socket => ../../util/system/socket
+ github.com/DataDog/datadog-agent/pkg/util/testutil => ../../util/testutil
+ github.com/DataDog/datadog-agent/pkg/util/winutil => ../../util/winutil
+)
+
+require github.com/DataDog/datadog-agent/pkg/config/setup v0.56.2
+
+require (
+ github.com/DataDog/datadog-agent/comp/core/secrets v0.56.2 // indirect
+ github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.2 // indirect
+ github.com/DataDog/datadog-agent/pkg/config/env v0.56.2 // indirect
+ github.com/DataDog/datadog-agent/pkg/config/model v0.56.2 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/executable v0.56.2 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.2 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.2 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/log v0.56.2 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/optional v0.56.2 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.2 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.2 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/system v0.56.2 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.2 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.2 // indirect
+ github.com/DataDog/viper v1.13.5 // indirect
+ github.com/Microsoft/go-winio v0.6.1 // indirect
+ github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect
+ github.com/fsnotify/fsnotify v1.7.0 // indirect
+ github.com/go-ole/go-ole v1.2.6 // indirect
+ github.com/hashicorp/hcl v1.0.0 // indirect
+ github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 // indirect
+ github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
+ github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
+ github.com/magiconair/properties v1.8.1 // indirect
+ github.com/mitchellh/mapstructure v1.1.2 // indirect
+ github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect
+ github.com/pelletier/go-toml v1.2.0 // indirect
+ github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
+ github.com/shirou/gopsutil/v3 v3.23.12 // indirect
+ github.com/shoenig/go-m1cpu v0.1.6 // indirect
+ github.com/spf13/afero v1.1.2 // indirect
+ github.com/spf13/cast v1.5.1 // indirect
+ github.com/spf13/jwalterweatherman v1.0.0 // indirect
+ github.com/spf13/pflag v1.0.5 // indirect
+ github.com/tklauser/go-sysconf v0.3.12 // indirect
+ github.com/tklauser/numcpus v0.6.1 // indirect
+ github.com/yusufpapurcu/wmi v1.2.3 // indirect
+ go.uber.org/atomic v1.11.0 // indirect
+ golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect
+ golang.org/x/mod v0.20.0 // indirect
+ golang.org/x/sync v0.8.0 // indirect
+ golang.org/x/sys v0.24.0 // indirect
+ golang.org/x/text v0.17.0 // indirect
+ golang.org/x/tools v0.24.0 // indirect
+ gopkg.in/yaml.v2 v2.4.0 // indirect
+)
diff --git a/pkg/util/tagger/go.sum b/pkg/util/tagger/go.sum
new file mode 100644
index 0000000000000..77ba213060c82
--- /dev/null
+++ b/pkg/util/tagger/go.sum
@@ -0,0 +1,352 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64=
+github.com/DataDog/viper v1.13.5/go.mod h1:wDdUVJ2SHaMaPrCZrlRCObwkubsX8j5sme3LaR/SGTc=
+github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
+github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
+github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs=
+github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
+github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
+github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
+github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
+github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
+github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
+github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
+github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
+github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c=
+github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 h1:S4qyfL2sEm5Budr4KVMyEniCy+PbS55651I/a+Kn/NQ=
+github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA=
+github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
+github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
+github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
+github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
+github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
+github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
+github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw=
+github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
+github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
+github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
+github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
+github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
+github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
+github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
+github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
+github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
+github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
+github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY=
+github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
+github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
+github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
+github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4=
+github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM=
+github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM=
+github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
+github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU=
+github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
+github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
+github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA=
+github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48=
+github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
+github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
+github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
+github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
+github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
+github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
+github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
+github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20200122045848-3419fae592fc/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
+github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
+github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
+github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
+github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
+go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg=
+go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ=
+go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik=
+go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak=
+go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI=
+go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A=
+go.opentelemetry.io/otel/sdk/metric v1.27.0 h1:5uGNOlpXi+Hbo/DRoI31BSb1v+OGcpv2NemcCrOL8gI=
+go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw=
+go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw=
+go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4=
+go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
+go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
+go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
+go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw=
+go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
+go.uber.org/fx v1.22.2 h1:iPW+OPxv0G8w75OemJ1RAnTUrF55zOJlXlo1TbJ0Buw=
+go.uber.org/fx v1.22.2/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU=
+go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
+go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
+go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
+go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
+go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI=
+golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0=
+golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
+golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg=
+golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
+golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24=
+golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
+google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
+gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
diff --git a/pkg/util/tagger/tagger.go b/pkg/util/tagger/tagger.go
index 61615355e9747..b8ae45a2f97c5 100644
--- a/pkg/util/tagger/tagger.go
+++ b/pkg/util/tagger/tagger.go
@@ -6,11 +6,21 @@
// Package tagger provides function to check if the tagger should use composite entity id and object store
package tagger
-import "github.com/DataDog/datadog-agent/pkg/config"
+import (
+ "sync"
+
+ pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup"
+)
+
+var useCompositeStore bool
+var doOnce sync.Once
// ShouldUseCompositeStore indicates whether the tagger should use the default or composite implementation
// of entity ID and object store.
// TODO: remove this when we switch over fully to the composite implementation
func ShouldUseCompositeStore() bool {
- return config.Datadog().GetBool("tagger.tagstore_use_composite_entity_id")
+ doOnce.Do(func() {
+ useCompositeStore = pkgconfigsetup.Datadog().GetBool("tagger.tagstore_use_composite_entity_id")
+ })
+ return useCompositeStore
}
diff --git a/pkg/util/winutil/iphelper/routes.go b/pkg/util/winutil/iphelper/routes.go
index 2de67ec872cae..08ca4f956dfe0 100644
--- a/pkg/util/winutil/iphelper/routes.go
+++ b/pkg/util/winutil/iphelper/routes.go
@@ -198,20 +198,3 @@ func GetIFTable() (table map[uint32]windows.MibIfRow, err error) {
return table, nil
}
-
-// Ntohs converts a network byte order 16 bit int to host byte order
-func Ntohs(i uint16) uint16 {
- return binary.BigEndian.Uint16((*(*[2]byte)(unsafe.Pointer(&i)))[:])
-}
-
-// Ntohl converts a network byte order 32 bit int to host byte order
-func Ntohl(i uint32) uint32 {
- return binary.BigEndian.Uint32((*(*[4]byte)(unsafe.Pointer(&i)))[:])
-}
-
-// Htonl converts a host byte order 32 bit int to network byte order
-func Htonl(i uint32) uint32 {
- b := make([]byte, 4)
- binary.BigEndian.PutUint32(b, i)
- return *(*uint32)(unsafe.Pointer(&b[0]))
-}
diff --git a/release.json b/release.json
index c6e75e4722f0d..34d4e70efb1cd 100644
--- a/release.json
+++ b/release.json
@@ -1,47 +1,47 @@
{
"base_branch": "main",
- "current_milestone": "7.58.0",
+ "current_milestone": "7.59.0",
"last_stable": {
"6": "6.53.0",
- "7": "7.56.2"
+ "7": "7.57.0"
},
"nightly": {
"INTEGRATIONS_CORE_VERSION": "master",
- "OMNIBUS_SOFTWARE_VERSION": "b479a9f6605bf3c28284829608fd6365d95c11f5",
+ "OMNIBUS_SOFTWARE_VERSION": "5d4f6995c19b604d7fc876446e4350ce52b235fb",
"OMNIBUS_RUBY_VERSION": "f3fc847e03ba7081e266b2d333210ba129128a14",
"JMXFETCH_VERSION": "0.49.3",
"JMXFETCH_HASH": "258085a94d529a6bdf914db36dd50faf6fde2cebc44b1f54a60eb209a5d8917c",
"MACOS_BUILD_VERSION": "master",
"WINDOWS_DDNPM_DRIVER": "release-signed",
- "WINDOWS_DDNPM_VERSION": "2.7.0",
- "WINDOWS_DDNPM_SHASUM": "de6a2f437b906d1d0f3cfc9222c7f686b3d69726355c940476448a34535064c8",
+ "WINDOWS_DDNPM_VERSION": "2.7.1",
+ "WINDOWS_DDNPM_SHASUM": "0f4665761324e1fef1c21651be5b70e79c72b5e7e5662d74619e7db2b27d5bc2",
"SECURITY_AGENT_POLICIES_VERSION": "master",
"WINDOWS_DDPROCMON_DRIVER": "release-signed",
- "WINDOWS_DDPROCMON_VERSION": "1.0.2",
- "WINDOWS_DDPROCMON_SHASUM": "cf55e5163659dbbfac0c0cced6559a3042107da9e4df8140bea17067278061ab",
+ "WINDOWS_DDPROCMON_VERSION": "1.0.4",
+ "WINDOWS_DDPROCMON_SHASUM": "3a23804adc7280390aabc01f0b709853755baa111f821f99627cd661ee917490",
"WINDOWS_APMINJECT_COMMENT": "The WINDOWS_APMINJECT entries below should NOT be added to the release targets",
"WINDOWS_APMINJECT_MODULE": "release-signed",
- "WINDOWS_APMINJECT_VERSION": "1.1.2",
- "WINDOWS_APMINJECT_SHASUM": "27d85ab3a26c123b2655a838b0bec099268de2f2b86d2b8a74232e65f4f8f05f"
+ "WINDOWS_APMINJECT_VERSION": "1.1.3",
+ "WINDOWS_APMINJECT_SHASUM": "5fdd62a84e640204386b9c28dc2e3ac5d9b8adde6427cb9f5914619f94d7b5bd"
},
"nightly-a7": {
"INTEGRATIONS_CORE_VERSION": "master",
- "OMNIBUS_SOFTWARE_VERSION": "b479a9f6605bf3c28284829608fd6365d95c11f5",
+ "OMNIBUS_SOFTWARE_VERSION": "5d4f6995c19b604d7fc876446e4350ce52b235fb",
"OMNIBUS_RUBY_VERSION": "f3fc847e03ba7081e266b2d333210ba129128a14",
"JMXFETCH_VERSION": "0.49.3",
"JMXFETCH_HASH": "258085a94d529a6bdf914db36dd50faf6fde2cebc44b1f54a60eb209a5d8917c",
"MACOS_BUILD_VERSION": "master",
"WINDOWS_DDNPM_DRIVER": "release-signed",
- "WINDOWS_DDNPM_VERSION": "2.7.0",
- "WINDOWS_DDNPM_SHASUM": "de6a2f437b906d1d0f3cfc9222c7f686b3d69726355c940476448a34535064c8",
+ "WINDOWS_DDNPM_VERSION": "2.7.1",
+ "WINDOWS_DDNPM_SHASUM": "0f4665761324e1fef1c21651be5b70e79c72b5e7e5662d74619e7db2b27d5bc2",
"SECURITY_AGENT_POLICIES_VERSION": "master",
"WINDOWS_DDPROCMON_DRIVER": "release-signed",
- "WINDOWS_DDPROCMON_VERSION": "1.0.2",
- "WINDOWS_DDPROCMON_SHASUM": "cf55e5163659dbbfac0c0cced6559a3042107da9e4df8140bea17067278061ab",
+ "WINDOWS_DDPROCMON_VERSION": "1.0.4",
+ "WINDOWS_DDPROCMON_SHASUM": "3a23804adc7280390aabc01f0b709853755baa111f821f99627cd661ee917490",
"WINDOWS_APMINJECT_COMMENT": "The WINDOWS_APMINJECT entries below should NOT be added to the release targets",
"WINDOWS_APMINJECT_MODULE": "release-signed",
- "WINDOWS_APMINJECT_VERSION": "1.1.2",
- "WINDOWS_APMINJECT_SHASUM": "27d85ab3a26c123b2655a838b0bec099268de2f2b86d2b8a74232e65f4f8f05f"
+ "WINDOWS_APMINJECT_VERSION": "1.1.3",
+ "WINDOWS_APMINJECT_SHASUM": "5fdd62a84e640204386b9c28dc2e3ac5d9b8adde6427cb9f5914619f94d7b5bd"
},
"release-a6": {
"INTEGRATIONS_CORE_VERSION": "7.56.0-rc.2",
@@ -49,14 +49,14 @@
"OMNIBUS_RUBY_VERSION": "7.56.0-rc.1",
"JMXFETCH_VERSION": "0.49.3",
"JMXFETCH_HASH": "258085a94d529a6bdf914db36dd50faf6fde2cebc44b1f54a60eb209a5d8917c",
- "SECURITY_AGENT_POLICIES_VERSION": "v0.57.0",
+ "SECURITY_AGENT_POLICIES_VERSION": "v0.58.0",
"MACOS_BUILD_VERSION": "6.56.0-rc.3",
"WINDOWS_DDNPM_DRIVER": "release-signed",
- "WINDOWS_DDNPM_VERSION": "2.7.0",
- "WINDOWS_DDNPM_SHASUM": "de6a2f437b906d1d0f3cfc9222c7f686b3d69726355c940476448a34535064c8",
+ "WINDOWS_DDNPM_VERSION": "2.7.1",
+ "WINDOWS_DDNPM_SHASUM": "0f4665761324e1fef1c21651be5b70e79c72b5e7e5662d74619e7db2b27d5bc2",
"WINDOWS_DDPROCMON_DRIVER": "release-signed",
- "WINDOWS_DDPROCMON_VERSION": "1.0.2",
- "WINDOWS_DDPROCMON_SHASUM": "cf55e5163659dbbfac0c0cced6559a3042107da9e4df8140bea17067278061ab"
+ "WINDOWS_DDPROCMON_VERSION": "1.0.4",
+ "WINDOWS_DDPROCMON_SHASUM": "3a23804adc7280390aabc01f0b709853755baa111f821f99627cd661ee917490"
},
"release-a7": {
"INTEGRATIONS_CORE_VERSION": "7.56.0-rc.2",
@@ -64,14 +64,14 @@
"OMNIBUS_RUBY_VERSION": "7.56.0-rc.1",
"JMXFETCH_VERSION": "0.49.3",
"JMXFETCH_HASH": "258085a94d529a6bdf914db36dd50faf6fde2cebc44b1f54a60eb209a5d8917c",
- "SECURITY_AGENT_POLICIES_VERSION": "v0.57.0",
+ "SECURITY_AGENT_POLICIES_VERSION": "v0.58.0",
"MACOS_BUILD_VERSION": "7.56.0-rc.3",
"WINDOWS_DDNPM_DRIVER": "release-signed",
- "WINDOWS_DDNPM_VERSION": "2.7.0",
- "WINDOWS_DDNPM_SHASUM": "de6a2f437b906d1d0f3cfc9222c7f686b3d69726355c940476448a34535064c8",
+ "WINDOWS_DDNPM_VERSION": "2.7.1",
+ "WINDOWS_DDNPM_SHASUM": "0f4665761324e1fef1c21651be5b70e79c72b5e7e5662d74619e7db2b27d5bc2",
"WINDOWS_DDPROCMON_DRIVER": "release-signed",
- "WINDOWS_DDPROCMON_VERSION": "1.0.2",
- "WINDOWS_DDPROCMON_SHASUM": "cf55e5163659dbbfac0c0cced6559a3042107da9e4df8140bea17067278061ab"
+ "WINDOWS_DDPROCMON_VERSION": "1.0.4",
+ "WINDOWS_DDPROCMON_SHASUM": "3a23804adc7280390aabc01f0b709853755baa111f821f99627cd661ee917490"
},
"dca-1.17.0": {
"SECURITY_AGENT_POLICIES_VERSION": "v0.18.6"
diff --git a/releasenotes-dca/notes/admin-controller-vols-with-type-socket-dd57e8c0d3bb2c51.yaml b/releasenotes-dca/notes/admin-controller-vols-with-type-socket-dd57e8c0d3bb2c51.yaml
new file mode 100644
index 0000000000000..807f941327e69
--- /dev/null
+++ b/releasenotes-dca/notes/admin-controller-vols-with-type-socket-dd57e8c0d3bb2c51.yaml
@@ -0,0 +1,17 @@
+# Each section from every release note are combined when the
+# CHANGELOG.rst is rendered. So the text needs to be worded so that
+# it does not depend on any information only available in another
+# section. This may mean repeating some details, but each section
+# must be readable independently of the other.
+#
+# Each section note must be formatted as reStructuredText.
+---
+enhancements:
+ - |
+ Added a new option for the Cluster Agent
+ ("admission_controller.inject_config.type_socket_volumes") to specify that
+ injected volumes should be of type "Socket". This option is disabled by
+ default. When set to true, injected pods will not start until the Agent
+ creates the DogstatsD and trace-agent sockets. This ensures no traces or
+ DogstatsD metrics are lost, but it can cause the pod to wait if the Agent
+ has issues creating the sockets.
diff --git a/releasenotes/notes/ad-label-support-logs-24ddb721e3f429fe.yaml b/releasenotes/notes/ad-label-support-logs-24ddb721e3f429fe.yaml
new file mode 100644
index 0000000000000..1866763d95841
--- /dev/null
+++ b/releasenotes/notes/ad-label-support-logs-24ddb721e3f429fe.yaml
@@ -0,0 +1,12 @@
+# Each section from every release note are combined when the
+# CHANGELOG.rst is rendered. So the text needs to be worded so that
+# it does not depend on any information only available in another
+# section. This may mean repeating some details, but each section
+# must be readable independently of the other.
+#
+# Each section note must be formatted as reStructuredText.
+---
+fixes:
+ - |
+ Adds missing support for the logs config key to work with AD annotations V2.
+
diff --git a/releasenotes/notes/bump-go-sqllexer-0.0.14-26ba053cf04ac223.yaml b/releasenotes/notes/bump-go-sqllexer-0.0.14-26ba053cf04ac223.yaml
new file mode 100644
index 0000000000000..75e66514f275a
--- /dev/null
+++ b/releasenotes/notes/bump-go-sqllexer-0.0.14-26ba053cf04ac223.yaml
@@ -0,0 +1,11 @@
+# Each section from every release note are combined when the
+# CHANGELOG.rst is rendered. So the text needs to be worded so that
+# it does not depend on any information only available in another
+# section. This may mean repeating some details, but each section
+# must be readable independently of the other.
+#
+# Each section note must be formatted as reStructuredText.
+---
+enhancements:
+ - |
+ [DBM] Bump go-sqllexer to 0.0.14 to skip collecting CTE tables as SQL metadata.
diff --git a/releasenotes/notes/fixdriverinstallationhandleleak-346543b8c5f21303.yaml b/releasenotes/notes/fixdriverinstallationhandleleak-346543b8c5f21303.yaml
new file mode 100644
index 0000000000000..cd824cd795d5d
--- /dev/null
+++ b/releasenotes/notes/fixdriverinstallationhandleleak-346543b8c5f21303.yaml
@@ -0,0 +1,11 @@
+# Each section from every release note are combined when the
+# CHANGELOG.rst is rendered. So the text needs to be worded so that
+# it does not depend on any information only available in another
+# section. This may mean repeating some details, but each section
+# must be readable independently of the other.
+#
+# Each section note must be formatted as reStructuredText.
+---
+fixes:
+ - |
+ Fixes a bug on Windows in the driver installation custom actions that could prevent rollback from working properly if an installation failed or was canceled.
diff --git a/releasenotes/notes/network-path-change-timeout-configuration-4ccec24497bd1574.yaml b/releasenotes/notes/network-path-change-timeout-configuration-4ccec24497bd1574.yaml
new file mode 100644
index 0000000000000..79170d87fd8cb
--- /dev/null
+++ b/releasenotes/notes/network-path-change-timeout-configuration-4ccec24497bd1574.yaml
@@ -0,0 +1,14 @@
+# Each section from every release note are combined when the
+# CHANGELOG.rst is rendered. So the text needs to be worded so that
+# it does not depend on any information only available in another
+# section. This may mean repeating some details, but each section
+# must be readable independently of the other.
+#
+# Each section note must be formatted as reStructuredText.
+---
+upgrade:
+ - |
+ Changes behavior of the timeout for Network Path. Previously, the timeout
+ signified the total time to wait for a full traceroute to complete. Now,
+ the timeout signifies the time to wait for each hop in the traceroute.
+ Additionally, the default timeout has been changed to 1000ms.
diff --git a/releasenotes/notes/networkpath-use-default-udp-port-4145a4b3700e98f4.yaml b/releasenotes/notes/networkpath-use-default-udp-port-4145a4b3700e98f4.yaml
new file mode 100644
index 0000000000000..fc143b588792e
--- /dev/null
+++ b/releasenotes/notes/networkpath-use-default-udp-port-4145a4b3700e98f4.yaml
@@ -0,0 +1,11 @@
+# Each section from every release note are combined when the
+# CHANGELOG.rst is rendered. So the text needs to be worded so that
+# it does not depend on any information only available in another
+# section. This may mean repeating some details, but each section
+# must be readable independently of the other.
+#
+# Each section note must be formatted as reStructuredText.
+---
+enhancements:
+ - |
+ The default UDP port for traceroute (port 33434) is now used for Network Traffic based paths, instead of the port detected by NPM.
diff --git a/releasenotes/notes/npm-udp-packet-counting-ebce8760bec29f50.yaml b/releasenotes/notes/npm-udp-packet-counting-ebce8760bec29f50.yaml
new file mode 100644
index 0000000000000..19c9c532dfc58
--- /dev/null
+++ b/releasenotes/notes/npm-udp-packet-counting-ebce8760bec29f50.yaml
@@ -0,0 +1,11 @@
+# Each section from every release note are combined when the
+# CHANGELOG.rst is rendered. So the text needs to be worded so that
+# it does not depend on any information only available in another
+# section. This may mean repeating some details, but each section
+# must be readable independently of the other.
+#
+# Each section note must be formatted as reStructuredText.
+---
+features:
+ - |
+ NPM - adds UDP "Packets Sent" and "Packets Received" to the network telemetry in Linux.
diff --git a/releasenotes/notes/oom-kill-cgroup-victim-2aa7ca4e8e3ffac2.yaml b/releasenotes/notes/oom-kill-cgroup-victim-2aa7ca4e8e3ffac2.yaml
new file mode 100644
index 0000000000000..e5bf2a5ef0573
--- /dev/null
+++ b/releasenotes/notes/oom-kill-cgroup-victim-2aa7ca4e8e3ffac2.yaml
@@ -0,0 +1,11 @@
+# Each section from every release note are combined when the
+# CHANGELOG.rst is rendered. So the text needs to be worded so that
+# it does not depend on any information only available in another
+# section. This may mean repeating some details, but each section
+# must be readable independently of the other.
+#
+# Each section note must be formatted as reStructuredText.
+---
+fixes:
+ - |
+ OOM Kill Check now reports the cgroup name of the victim process rather than the triggering process.
diff --git a/releasenotes/notes/oracle-telemetry-bcc1ab08a2b92bc6.yaml b/releasenotes/notes/oracle-telemetry-bcc1ab08a2b92bc6.yaml
new file mode 100644
index 0000000000000..194a7a58b5d88
--- /dev/null
+++ b/releasenotes/notes/oracle-telemetry-bcc1ab08a2b92bc6.yaml
@@ -0,0 +1,11 @@
+# Each section from every release note are combined when the
+# CHANGELOG.rst is rendered. So the text needs to be worded so that
+# it does not depend on any information only available in another
+# section. This may mean repeating some details, but each section
+# must be readable independently of the other.
+#
+# Each section note must be formatted as reStructuredText.
+---
+other:
+ - |
+ Adds Agent telemetry for Oracle collector.
diff --git a/releasenotes/notes/otel-replace-logging-exporter-82062f8c865cb529.yaml b/releasenotes/notes/otel-replace-logging-exporter-82062f8c865cb529.yaml
new file mode 100644
index 0000000000000..ff993fc5f45d1
--- /dev/null
+++ b/releasenotes/notes/otel-replace-logging-exporter-82062f8c865cb529.yaml
@@ -0,0 +1,16 @@
+# Each section from every release note are combined when the
+# CHANGELOG.rst is rendered. So the text needs to be worded so that
+# it does not depend on any information only available in another
+# section. This may mean repeating some details, but each section
+# must be readable independently of the other.
+#
+# Each section note must be formatted as reStructuredText.
+---
+upgrade:
+ - |
+ Removed the deprecated config option ``otlp_config.debug.loglevel`` in favor of ``otlp_config.debug.verbosity``:
+ * ``loglevel: debug`` maps to ``verbosity: detailed``
+ * ``loglevel: info`` maps to ``verbosity: normal``
+ * ``loglevel: warn/error`` maps to ``verbosity: basic``
+ * ``loglevel: disabled`` maps to ``verbosity: none``
+
diff --git a/releasenotes/notes/snmp-session-8e4b5a534a8c9837.yaml b/releasenotes/notes/snmp-session-8e4b5a534a8c9837.yaml
new file mode 100644
index 0000000000000..39e25e452387c
--- /dev/null
+++ b/releasenotes/notes/snmp-session-8e4b5a534a8c9837.yaml
@@ -0,0 +1,11 @@
+# Each section from every release note are combined when the
+# CHANGELOG.rst is rendered. So the text needs to be worded so that
+# it does not depend on any information only available in another
+# section. This may mean repeating some details, but each section
+# must be readable independently of the other.
+#
+# Each section note must be formatted as reStructuredText.
+---
+fixes:
+ - |
+ Fix a bug preventing SNMP V3 reconnection.
diff --git a/tasks/dogstatsd.py b/tasks/dogstatsd.py
index 874eb4974341d..6373af271bf0b 100644
--- a/tasks/dogstatsd.py
+++ b/tasks/dogstatsd.py
@@ -18,7 +18,7 @@
# constants
DOGSTATSD_BIN_PATH = os.path.join(".", "bin", "dogstatsd")
STATIC_BIN_PATH = os.path.join(".", "bin", "static")
-MAX_BINARY_SIZE = 42 * 1024
+MAX_BINARY_SIZE = 44 * 1024
DOGSTATSD_TAG = "datadog/dogstatsd:master"
diff --git a/tasks/libs/common/git.py b/tasks/libs/common/git.py
index c16a5c42e0cbe..c3c6781a98bb8 100644
--- a/tasks/libs/common/git.py
+++ b/tasks/libs/common/git.py
@@ -167,7 +167,12 @@ def get_last_commit(ctx, repo, branch):
)
-def get_last_tag(ctx, repo, pattern):
+def get_last_release_tag(ctx, repo, pattern):
+ import re
+ from functools import cmp_to_key
+
+ import semver
+
tags = ctx.run(
rf'git ls-remote -t https://github.com/DataDog/{repo} "{pattern}"',
hide=True,
@@ -180,9 +185,24 @@ def get_last_tag(ctx, repo, pattern):
),
code=1,
)
- last_tag = tags.splitlines()[-1]
+
+ release_pattern = re.compile(r'.*7\.[0-9]+\.[0-9]+(-rc.*|-devel.*)?$')
+ tags_without_suffix = [
+ line for line in tags.splitlines() if not line.endswith("^{}") and release_pattern.match(line)
+ ]
+ last_tag = max(tags_without_suffix, key=lambda x: cmp_to_key(semver.compare)(x.split('/')[-1]))
last_tag_commit, last_tag_name = last_tag.split()
- if last_tag_name.endswith("^{}"):
- last_tag_name = last_tag_name.removesuffix("^{}")
+ tags_with_suffix = [line for line in tags.splitlines() if line.endswith("^{}") and release_pattern.match(line)]
+ if tags_with_suffix:
+ last_tag_with_suffix = max(
+ tags_with_suffix, key=lambda x: cmp_to_key(semver.compare)(x.split('/')[-1].removesuffix("^{}"))
+ )
+ last_tag_commit_with_suffix, last_tag_name_with_suffix = last_tag_with_suffix.split()
+ if (
+ semver.compare(last_tag_name_with_suffix.split('/')[-1].removesuffix("^{}"), last_tag_name.split("/")[-1])
+ >= 0
+ ):
+ last_tag_commit = last_tag_commit_with_suffix
+ last_tag_name = last_tag_name_with_suffix.removesuffix("^{}")
last_tag_name = last_tag_name.removeprefix("refs/tags/")
return last_tag_commit, last_tag_name
diff --git a/tasks/libs/common/omnibus.py b/tasks/libs/common/omnibus.py
index c8c59478738e1..d91b2649d27ab 100644
--- a/tasks/libs/common/omnibus.py
+++ b/tasks/libs/common/omnibus.py
@@ -95,7 +95,7 @@ def env_filter(item):
"BUILD_HOOK",
"BUNDLE_MIRROR__RUBYGEMS__ORG",
"BUCKET_BRANCH",
- "CHANGELOG_COMMIT_SHA_SSM_NAME",
+ "CHANGELOG_COMMIT_SHA",
"CLANG_LLVM_VER",
"CHANNEL",
"CHART",
@@ -121,7 +121,7 @@ def env_filter(item):
"HOSTNAME",
"HOST_IP",
"INFOPATH",
- "INSTALL_SCRIPT_API_KEY_SSM_NAME",
+ "INSTALL_SCRIPT_API_KEY",
"INTEGRATION_WHEELS_CACHE_BUCKET",
"IRBRC",
"KITCHEN_INFRASTRUCTURE_FLAKES_RETRY",
@@ -159,11 +159,11 @@ def env_filter(item):
"USERDOMAIN",
"USERNAME",
"USERPROFILE",
- "VCPKG_BLOB_SAS_URL_SSM_NAME",
+ "VCPKG_BLOB_SAS_URL",
"VERSION",
"VM_ASSETS",
"WIN_S3_BUCKET",
- "WINGET_PAT_SSM_NAME",
+ "WINGET_PAT",
"WORKFLOW",
"_",
"build_before",
@@ -318,7 +318,7 @@ def send_build_metrics(ctx, overall_duration):
}
)
dd_api_key = ctx.run(
- f'{aws_cmd} ssm get-parameter --region us-east-1 --name {os.environ["API_KEY_ORG2_SSM_NAME"]} --with-decryption --query "Parameter.Value" --out text',
+ f'{aws_cmd} ssm get-parameter --region us-east-1 --name {os.environ["API_KEY_ORG2"]} --with-decryption --query "Parameter.Value" --out text',
hide=True,
).stdout.strip()
headers = {'Accept': 'application/json', 'Content-Type': 'application/json', 'DD-API-KEY': dd_api_key}
@@ -336,7 +336,7 @@ def send_cache_miss_event(ctx, pipeline_id, job_name, job_id):
else:
aws_cmd = "aws"
dd_api_key = ctx.run(
- f'{aws_cmd} ssm get-parameter --region us-east-1 --name {os.environ["API_KEY_ORG2_SSM_NAME"]} --with-decryption --query "Parameter.Value" --out text',
+ f'{aws_cmd} ssm get-parameter --region us-east-1 --name {os.environ["API_KEY_ORG2"]} --with-decryption --query "Parameter.Value" --out text',
hide=True,
).stdout.strip()
headers = {'Accept': 'application/json', 'Content-Type': 'application/json', 'DD-API-KEY': dd_api_key}
diff --git a/tasks/libs/common/utils.py b/tasks/libs/common/utils.py
index f2de7730ed88d..8b2e04244bb52 100644
--- a/tasks/libs/common/utils.py
+++ b/tasks/libs/common/utils.py
@@ -8,6 +8,7 @@
import platform
import re
import sys
+import tempfile
import time
import traceback
from collections import Counter
@@ -18,15 +19,12 @@
from subprocess import CalledProcessError, check_output
from types import SimpleNamespace
+import requests
from invoke.context import Context
from invoke.exceptions import Exit
from tasks.libs.common.color import Color, color_message
-from tasks.libs.common.constants import (
- ALLOWED_REPO_ALL_BRANCHES,
- DEFAULT_BRANCH,
- REPO_PATH,
-)
+from tasks.libs.common.constants import ALLOWED_REPO_ALL_BRANCHES, DEFAULT_BRANCH, REPO_PATH
from tasks.libs.common.git import get_commit_sha
from tasks.libs.owners.parsing import search_owners
from tasks.libs.releasing.version import get_version
@@ -699,3 +697,31 @@ def team_to_label(team):
'asm-go': "agent-security",
}
return dico.get(team, team)
+
+
+@contextmanager
+def download_to_tempfile(url, checksum=None):
+ """
+ Download a file from @url to a temporary file and yields the path.
+
+ The temporary file is removed when the context manager exits.
+
+ if @checksum is provided it will be updated with each chunk of the file
+ """
+ fd, tmp_path = tempfile.mkstemp()
+ try:
+ with requests.get(url, stream=True) as r:
+ r.raise_for_status()
+ with os.fdopen(fd, "wb") as f:
+ # fd will be closed by context manager, so we no longer need it
+ fd = None
+ for chunk in r.iter_content(chunk_size=8192):
+ if checksum:
+ checksum.update(chunk)
+ f.write(chunk)
+ yield tmp_path
+ finally:
+ if fd is not None:
+ os.close(fd)
+ if os.path.exists(tmp_path):
+ os.remove(tmp_path)
diff --git a/tasks/libs/pipeline/github_slack_map.yaml b/tasks/libs/pipeline/github_slack_map.yaml
index 42f19b76c9bb4..5e8ce75acd72d 100644
--- a/tasks/libs/pipeline/github_slack_map.yaml
+++ b/tasks/libs/pipeline/github_slack_map.yaml
@@ -41,3 +41,4 @@
'@datadog/agent-devx-infra': '#agent-devx-ops'
'@datadog/agent-devx-loops': '#agent-devx-ops'
'@datadog/apm-onboarding': '#apm-onboarding'
+'@datadog/apm-reliability-and-performance': '#apm-ecosystems-reliability-and-performance'
diff --git a/tasks/libs/types/copyright.py b/tasks/libs/types/copyright.py
index a19773744f811..301db649f32b0 100755
--- a/tasks/libs/types/copyright.py
+++ b/tasks/libs/types/copyright.py
@@ -66,6 +66,7 @@
'^// This file is licensed under the MIT License.',
'^// Copyright \\(C\\) 2017 ScyllaDB',
'^// Copyright \\(c\\) Tailscale Inc & AUTHORS',
+ '^// Code generated by github.com/tinylib/msgp DO NOT EDIT.',
]
@@ -144,7 +145,7 @@ def _is_excluded_header(header, exclude=None):
exclude = []
for matcher in exclude:
- if re.search(matcher, header[0]):
+ if re.search(matcher, header[0]) or re.search(matcher, header[2]):
return True
return False
diff --git a/tasks/linter.py b/tasks/linter.py
index 2136ae60f25d0..1d32a7e38ea43 100644
--- a/tasks/linter.py
+++ b/tasks/linter.py
@@ -349,11 +349,11 @@ def ssm_parameters(ctx, mode="all", folders=None):
for filename in error_files:
print(f" - {filename}")
raise Exit(code=1)
- print(f"[{color_message('OK', Color.GREEN)}] All files are correctly using wrapper for aws ssm parameters.")
+ print(f"[{color_message('OK', Color.GREEN)}] All files are correctly using wrapper for secret parameters.")
class SSMParameterCall:
- def __init__(self, file, line_nb, with_wrapper=False, with_env_var=False, standard=True):
+ def __init__(self, file, line_nb, with_wrapper=False, with_env_var=False):
"""
Initialize an SSMParameterCall instance.
@@ -362,18 +362,16 @@ def __init__(self, file, line_nb, with_wrapper=False, with_env_var=False, standa
line_nb (int): The line number in the file where the SSM parameter call is located.
with_wrapper (bool, optional): If the call is using the wrapper. Defaults to False.
with_env_var (bool, optional): If the call is using an environment variable defined in .gitlab-ci.yml. Defaults to False.
- not_standard (bool, optional): If the call is standard (matching either "aws ssm get-parameter --name" or "aws_ssm_get_wrapper"). Defaults to True.
"""
self.file = file
self.line_nb = line_nb
self.with_wrapper = with_wrapper
self.with_env_var = with_env_var
- self.standard = standard
def __str__(self):
message = ""
- if not self.with_wrapper or not self.standard:
- message += "Please use the dedicated `aws_ssm_get_wrapper.(sh|ps1)`."
+ if not self.with_wrapper:
+ message += "Please use the dedicated `fetch_secret.(sh|ps1)`."
if not self.with_env_var:
message += " Save your parameter name as environment variable in .gitlab-ci.yml file."
return f"{self.file}:{self.line_nb + 1}. {message}"
@@ -383,29 +381,24 @@ def __repr__(self):
def list_get_parameter_calls(file):
- ssm_get = re.compile(r"^.+ssm.get.+$")
aws_ssm_call = re.compile(r"^.+ssm get-parameter.+--name +(?P[^ ]+).*$")
- # remove the 'a' of 'aws' because '\a' is badly interpreted for windows paths
- ssm_wrapper_call = re.compile(r"^.+ws_ssm_get_wrapper.(sh|ps1)[\"]? +(?P[^ )]+).*$")
+ # remove the first letter of the script name because '\f' is badly interpreted for windows paths
+ wrapper_call = re.compile(r"^.+etch_secret.(sh|ps1)[\"]? +(?P[^ )]+).*$")
calls = []
with open(file) as f:
try:
for nb, line in enumerate(f):
- is_ssm_get = ssm_get.match(line.strip())
- if is_ssm_get:
- m = aws_ssm_call.match(line.strip())
- if m:
- # Remove possible quotes
- param = m["param"].replace('"', '').replace("'", "")
- calls.append(
- SSMParameterCall(file, nb, with_env_var=(param.startswith("$") or "os.environ" in param))
- )
- m = ssm_wrapper_call.match(line.strip())
- param = m["param"].replace('"', '').replace("'", "") if m else None
- if m and not (param.startswith("$") or "os.environ" in param):
- calls.append(SSMParameterCall(file, nb, with_wrapper=True))
- if not m:
- calls.append(SSMParameterCall(file, nb, standard=False))
+ m = aws_ssm_call.match(line.strip())
+ if m:
+ # Remove possible quotes
+ param = m["param"].replace('"', '').replace("'", "")
+ calls.append(
+ SSMParameterCall(file, nb, with_env_var=(param.startswith("$") or "os.environ" in param))
+ )
+ m = wrapper_call.match(line.strip())
+ param = m["param"].replace('"', '').replace("'", "") if m else None
+ if m and not (param.startswith("$") or "os.environ" in param):
+ calls.append(SSMParameterCall(file, nb, with_wrapper=True))
except UnicodeDecodeError:
pass
return calls
diff --git a/tasks/modules.py b/tasks/modules.py
index 39cbb81e14242..ebed764f57eaf 100644
--- a/tasks/modules.py
+++ b/tasks/modules.py
@@ -147,6 +147,7 @@ def dependency_path(self, agent_version):
"comp/core/secrets": GoModule("comp/core/secrets", independent=True, used_by_otel=True),
"comp/core/status": GoModule("comp/core/status", independent=True, used_by_otel=True),
"comp/core/status/statusimpl": GoModule("comp/core/status/statusimpl", independent=True),
+ "comp/core/tagger/types": GoModule("comp/core/tagger/types", independent=True, used_by_otel=True),
"comp/core/tagger/utils": GoModule("comp/core/tagger/utils", independent=True, used_by_otel=True),
"comp/core/telemetry": GoModule("comp/core/telemetry", independent=True, used_by_otel=True),
"comp/def": GoModule("comp/def", independent=True, used_by_otel=True),
@@ -275,6 +276,7 @@ def dependency_path(self, agent_version):
"pkg/util/statstracker": GoModule("pkg/util/statstracker", independent=True, used_by_otel=True),
"pkg/util/system": GoModule("pkg/util/system", independent=True, used_by_otel=True),
"pkg/util/system/socket": GoModule("pkg/util/system/socket", independent=True, used_by_otel=True),
+ "pkg/util/tagger": GoModule("pkg/util/tagger", independent=True, used_by_otel=True),
"pkg/util/testutil": GoModule("pkg/util/testutil", independent=True, used_by_otel=True),
"pkg/util/uuid": GoModule("pkg/util/uuid", independent=True),
"pkg/util/winutil": GoModule("pkg/util/winutil", independent=True, used_by_otel=True),
diff --git a/tasks/msi.py b/tasks/msi.py
index 5574f67debb98..ea838f27817c5 100644
--- a/tasks/msi.py
+++ b/tasks/msi.py
@@ -2,6 +2,7 @@
msi namespaced tasks
"""
+import hashlib
import mmap
import os
import shutil
@@ -11,7 +12,7 @@
from invoke import task
from invoke.exceptions import Exit, UnexpectedExit
-from tasks.libs.common.utils import timed
+from tasks.libs.common.utils import download_to_tempfile, timed
from tasks.libs.releasing.version import get_version, load_release_versions
# Windows only import
@@ -29,6 +30,8 @@
BUILD_ROOT_DIR = os.path.join('C:\\', "dev", "msi", "DatadogAgentInstaller")
BUILD_SOURCE_DIR = os.path.join(BUILD_ROOT_DIR, "src")
BUILD_OUTPUT_DIR = os.path.join(BUILD_ROOT_DIR, "output")
+# Match to AgentInstaller.cs BinSource
+AGENT_BIN_SOURCE_DIR = os.path.join('C:\\', 'opt', 'datadog-agent', 'bin', 'agent')
NUGET_PACKAGES_DIR = os.path.join(BUILD_ROOT_DIR, 'packages')
NUGET_CONFIG_FILE = os.path.join(BUILD_ROOT_DIR, 'NuGet.config')
@@ -433,3 +436,85 @@ def MsiClosing(obj):
yield obj
finally:
obj.Close()
+
+
+def get_msm_info(ctx, release_version):
+ """
+ Get the merge module info from the release.json for the given release_version
+ """
+ env = load_release_versions(ctx, release_version)
+ base_url = "https://s3.amazonaws.com/dd-windowsfilter/builds"
+ msm_info = {}
+ if 'WINDOWS_DDNPM_VERSION' in env:
+ info = {
+ 'filename': 'DDNPM.msm',
+ 'build': env['WINDOWS_DDNPM_DRIVER'],
+ 'version': env['WINDOWS_DDNPM_VERSION'],
+ 'shasum': env['WINDOWS_DDNPM_SHASUM'],
+ }
+ info['url'] = f"{base_url}/{info['build']}/ddnpminstall-{info['version']}.msm"
+ msm_info['DDNPM'] = info
+ if 'WINDOWS_DDPROCMON_VERSION' in env:
+ info = {
+ 'filename': 'DDPROCMON.msm',
+ 'build': env['WINDOWS_DDPROCMON_DRIVER'],
+ 'version': env['WINDOWS_DDPROCMON_VERSION'],
+ 'shasum': env['WINDOWS_DDPROCMON_SHASUM'],
+ }
+ info['url'] = f"{base_url}/{info['build']}/ddprocmoninstall-{info['version']}.msm"
+ msm_info['DDPROCMON'] = info
+ if 'WINDOWS_APMINJECT_VERSION' in env:
+ info = {
+ 'filename': 'ddapminstall.msm',
+ 'build': env['WINDOWS_APMINJECT_MODULE'],
+ 'version': env['WINDOWS_APMINJECT_VERSION'],
+ 'shasum': env['WINDOWS_APMINJECT_SHASUM'],
+ }
+ info['url'] = f"{base_url}/{info['build']}/ddapminstall-{info['version']}.msm"
+ msm_info['APMINJECT'] = info
+ return msm_info
+
+
+@task(
+ iterable=['drivers'],
+ help={
+ 'drivers': 'List of drivers to fetch (default: DDNPM, DDPROCMON, APMINJECT)',
+ 'release_version': 'Release version to fetch drivers from (default: nightly-a7)',
+ },
+)
+def fetch_driver_msm(ctx, drivers=None, release_version=None):
+ """
+ Fetch the driver merge modules (.msm) that are consumed by the Agent MSI.
+
+ Defaults to the versions provided in the @release_version section of release.json
+ """
+ ALLOWED_DRIVERS = ['DDNPM', 'DDPROCMON', 'APMINJECT']
+ if not release_version:
+ release_version = 'nightly-a7'
+
+ msm_info = get_msm_info(ctx, release_version)
+ if not drivers:
+ # if user did not specify drivers, use the ones in the release.json
+ drivers = msm_info.keys()
+
+ for driver in drivers:
+ driver = driver.upper()
+ if driver not in ALLOWED_DRIVERS:
+ raise Exit(f"Invalid driver: {driver}, choose from {ALLOWED_DRIVERS}")
+
+ info = msm_info[driver]
+ url = info['url']
+ shasum = info['shasum']
+ path = os.path.join(AGENT_BIN_SOURCE_DIR, info['filename'])
+
+ # download from url with requests package
+ checksum = hashlib.sha256()
+ with download_to_tempfile(url, checksum) as tmp_path:
+ # check sha256
+ if checksum.hexdigest().lower() != shasum.lower():
+ raise Exit(f"Checksum mismatch for {url}")
+ # move to final path
+ shutil.move(tmp_path, path)
+
+ print(f"Updated {driver}")
+ print(f"\t-> Downloaded {url} to {path}")
diff --git a/tasks/pipeline.py b/tasks/pipeline.py
index 8072ba025d4c5..1d300a7f38ee2 100644
--- a/tasks/pipeline.py
+++ b/tasks/pipeline.py
@@ -529,7 +529,7 @@ def changelog(ctx, new_commit_sha):
else:
parent_dir = os.getcwd()
old_commit_sha = ctx.run(
- f"{parent_dir}/tools/ci/aws_ssm_get_wrapper.sh {os.environ['CHANGELOG_COMMIT_SHA_SSM_NAME']}",
+ f"{parent_dir}/tools/ci/fetch_secret.sh {os.environ['CHANGELOG_COMMIT_SHA']}",
hide=True,
).stdout.strip()
if not new_commit_sha:
diff --git a/tasks/release.py b/tasks/release.py
index ac168432829c9..8668e9f3a8b50 100644
--- a/tasks/release.py
+++ b/tasks/release.py
@@ -29,7 +29,7 @@
clone,
get_current_branch,
get_last_commit,
- get_last_tag,
+ get_last_release_tag,
try_git_command,
)
from tasks.libs.common.user_interactions import yes_no_question
@@ -598,6 +598,7 @@ def create_release_branches(ctx, base_directory="~/dd", major_versions="6,7", up
current = current_version(ctx, max(list_major_versions))
next = current.next_version(bump_minor=True)
current.rc = False
+ current.devel = False
next.devel = False
# Strings with proper branch/tag names
@@ -1008,7 +1009,7 @@ def check_for_changes(ctx, release_branch, warning_mode=False):
changes = 'false'
for repo_name, repo in repo_data.items():
head_commit = get_last_commit(ctx, repo_name, repo['branch'])
- last_tag_commit, last_tag_name = get_last_tag(ctx, repo_name, next_version.tag_pattern())
+ last_tag_commit, last_tag_name = get_last_release_tag(ctx, repo_name, next_version.tag_pattern())
if last_tag_commit != "" and last_tag_commit != head_commit:
changes = 'true'
print(f"{repo_name} has new commits since {last_tag_name}", file=sys.stderr)
diff --git a/tasks/security_agent.py b/tasks/security_agent.py
index d2d97597b4cb9..9b38f03ad6f07 100644
--- a/tasks/security_agent.py
+++ b/tasks/security_agent.py
@@ -239,7 +239,7 @@ def build_go_syscall_tester(ctx, build_dir):
return syscall_tester_exe_file
-def ninja_c_syscall_tester_common(nw, file_name, build_dir, flags=None, libs=None, static=True):
+def ninja_c_syscall_tester_common(nw, file_name, build_dir, flags=None, libs=None, static=True, compiler='clang'):
if flags is None:
flags = []
if libs is None:
@@ -256,11 +256,11 @@ def ninja_c_syscall_tester_common(nw, file_name, build_dir, flags=None, libs=Non
nw.build(
inputs=[syscall_tester_c_file],
outputs=[syscall_tester_exe_file],
- rule="execlang",
+ rule="exe" + compiler,
variables={
"exeflags": flags,
"exelibs": libs,
- "flags": [f"-D__{uname_m}__", f"-isystem/usr/include/{uname_m}-linux-gnu"],
+ "flags": [f"-isystem/usr/include/{uname_m}-linux-gnu"],
},
)
return syscall_tester_exe_file
@@ -307,12 +307,16 @@ def build_embed_latency_tools(ctx, static=True):
ctx.run(f"ninja -f {nf_path}")
-def ninja_syscall_x86_tester(ctx, build_dir, static=True):
- return ninja_c_syscall_tester_common(ctx, "syscall_x86_tester", build_dir, flags=["-m32"], static=static)
+def ninja_syscall_x86_tester(ctx, build_dir, static=True, compiler='clang'):
+ return ninja_c_syscall_tester_common(
+ ctx, "syscall_x86_tester", build_dir, flags=["-m32"], static=static, compiler=compiler
+ )
-def ninja_syscall_tester(ctx, build_dir, static=True):
- return ninja_c_syscall_tester_common(ctx, "syscall_tester", build_dir, libs=["-lpthread"], static=static)
+def ninja_syscall_tester(ctx, build_dir, static=True, compiler='clang'):
+ return ninja_c_syscall_tester_common(
+ ctx, "syscall_tester", build_dir, libs=["-lpthread"], static=static, compiler=compiler
+ )
def create_dir_if_needed(dir):
@@ -324,7 +328,7 @@ def create_dir_if_needed(dir):
@task
-def build_embed_syscall_tester(ctx, arch: str | Arch = CURRENT_ARCH, static=True):
+def build_embed_syscall_tester(ctx, arch: str | Arch = CURRENT_ARCH, static=True, compiler="clang"):
arch = Arch.from_str(arch)
check_for_ninja(ctx)
build_dir = os.path.join("pkg", "security", "tests", "syscall_tester", "bin")
@@ -335,11 +339,11 @@ def build_embed_syscall_tester(ctx, arch: str | Arch = CURRENT_ARCH, static=True
with open(nf_path, 'w') as ninja_file:
nw = NinjaWriter(ninja_file, width=120)
ninja_define_ebpf_compiler(nw, arch=arch)
- ninja_define_exe_compiler(nw)
+ ninja_define_exe_compiler(nw, compiler=compiler)
- ninja_syscall_tester(nw, build_dir, static=static)
+ ninja_syscall_tester(nw, build_dir, static=static, compiler=compiler)
if arch == ARCH_AMD64:
- ninja_syscall_x86_tester(nw, build_dir, static=static)
+ ninja_syscall_x86_tester(nw, build_dir, static=static, compiler=compiler)
ninja_ebpf_probe_syscall_tester(nw, go_dir)
ctx.run(f"ninja -f {nf_path}")
@@ -362,6 +366,7 @@ def build_functional_tests(
kernel_release=None,
debug=False,
skip_object_files=False,
+ syscall_tester_compiler='clang',
):
if not is_windows:
if not skip_object_files:
@@ -373,7 +378,7 @@ def build_functional_tests(
debug=debug,
bundle_ebpf=bundle_ebpf,
)
- build_embed_syscall_tester(ctx)
+ build_embed_syscall_tester(ctx, compiler=syscall_tester_compiler)
arch = Arch.from_str(arch)
ldflags, gcflags, env = get_build_flags(ctx, major_version=major_version, static=static, arch=arch)
diff --git a/tasks/system_probe.py b/tasks/system_probe.py
index fa89df422bafb..a0e218e222fbd 100644
--- a/tasks/system_probe.py
+++ b/tasks/system_probe.py
@@ -54,6 +54,7 @@
"./pkg/collector/corechecks/ebpf/...",
"./pkg/collector/corechecks/servicediscovery/module/...",
"./pkg/process/monitor/...",
+ "./pkg/dynamicinstrumentation/...",
]
TEST_PACKAGES = " ".join(TEST_PACKAGES_LIST)
# change `timeouts` in `test/new-e2e/system-probe/test-runner/main.go` if you change them here
@@ -141,10 +142,10 @@ def ninja_define_co_re_compiler(nw: NinjaWriter, arch: Arch | None = None):
)
-def ninja_define_exe_compiler(nw: NinjaWriter):
+def ninja_define_exe_compiler(nw: NinjaWriter, compiler='clang'):
nw.rule(
- name="execlang",
- command="clang -MD -MF $out.d $exeflags $flags $in -o $out $exelibs",
+ name="exe" + compiler,
+ command=f"{compiler} -MD -MF $out.d $exeflags $flags $in -o $out $exelibs",
depfile="$out.d",
)
@@ -386,6 +387,7 @@ def ninja_runtime_compilation_files(nw: NinjaWriter, gobin):
"pkg/network/tracer/connection/kprobe/compile.go": "tracer",
"pkg/network/tracer/offsetguess_test.go": "offsetguess-test",
"pkg/security/ebpf/compile.go": "runtime-security",
+ "pkg/dynamicinstrumentation/codegen/compile.go": "dynamicinstrumentation",
}
nw.rule(
@@ -494,6 +496,7 @@ def ninja_cgo_type_files(nw: NinjaWriter):
"pkg/ebpf/types.go": [
"pkg/ebpf/c/lock_contention.h",
],
+ "pkg/dynamicinstrumentation/ditypes/ebpf.go": ["pkg/dynamicinstrumentation/codegen/c/types.h"],
}
nw.rule(
name="godefs",
@@ -513,7 +516,7 @@ def ninja_cgo_type_files(nw: NinjaWriter):
inputs=[f],
outputs=[os.path.join(in_dir, out_file)],
rule="godefs",
- implicit=headers,
+ implicit=headers + [script_path],
variables={
"in_dir": in_dir,
"in_file": in_file,
diff --git a/tasks/unit_tests/libs/common/git_tests.py b/tasks/unit_tests/libs/common/git_tests.py
index dcfd884f76115..4a803a58b2758 100644
--- a/tasks/unit_tests/libs/common/git_tests.py
+++ b/tasks/unit_tests/libs/common/git_tests.py
@@ -1,11 +1,14 @@
import unittest
from unittest.mock import MagicMock
+from invoke import MockContext, Result
+
from tasks.libs.common.git import (
check_local_branch,
check_uncommitted_changes,
get_commit_sha,
get_current_branch,
+ get_last_release_tag,
get_staged_files,
)
@@ -107,3 +110,71 @@ def test_get_commit_sha(self):
f"git rev-parse {'--short ' if test['short'] else ''}HEAD", hide=True
)
self.ctx_mock.run.reset_mock()
+
+
+class TestGetLastTag(unittest.TestCase):
+ def test_ordered(self):
+ c = MockContext(
+ run={
+ 'git ls-remote -t https://github.com/DataDog/woof "7.56.*"': Result(
+ "e1b8e9163203b7446c74fac0b8d4153eb24227a0 refs/tags/7.56.0-rc.1\n7c6777bb7add533a789c69293b59e3261711d330 refs/tags/7.56.0-rc.2\n2b8b710b322feb03148f871a77ab92163a0a12de refs/tags/7.56.0-rc.3"
+ )
+ }
+ )
+ _, name = get_last_release_tag(c, "woof", "7.56.*")
+ self.assertEqual(name, "7.56.0-rc.3")
+
+ def test_non_ordered(self):
+ c = MockContext(
+ run={
+ 'git ls-remote -t https://github.com/DataDog/woof "7.56.*"': Result(
+ "e1b8e9163203b7446c74fac0b8d4153eb24227a0 refs/tags/7.56.0-rc.1\n7c6777bb7add533a789c69293b59e3261711d330 refs/tags/7.56.0-rc.11\n2b8b710b322feb03148f871a77ab92163a0a12de refs/tags/7.56.0-rc.3"
+ )
+ }
+ )
+ _, name = get_last_release_tag(c, "woof", "7.56.*")
+ self.assertEqual(name, "7.56.0-rc.11")
+
+ def test_suffix_lower(self):
+ c = MockContext(
+ run={
+ 'git ls-remote -t https://github.com/DataDog/woof "7.56.*"': Result(
+ "e1b8e9163203b7446c74fac0b8d4153eb24227a0 refs/tags/7.56.0-rc.1\n7c6777bb7add533a789c69293b59e3261711d330 refs/tags/7.56.0-rc.2^{}\n2b8b710b322feb03148f871a77ab92163a0a12de refs/tags/7.56.0-rc.3"
+ )
+ }
+ )
+ _, name = get_last_release_tag(c, "woof", "7.56.*")
+ self.assertEqual(name, "7.56.0-rc.3")
+
+ def test_suffix_equal(self):
+ c = MockContext(
+ run={
+ 'git ls-remote -t https://github.com/DataDog/woof "7.56.*"': Result(
+ "e1b8e9163203b7446c74fac0b8d4153eb24227a0 refs/tags/7.56.0-rc.1\n7c6777bb7add533a789c69293b59e3261711d330 refs/tags/7.56.0-rc.3^{}\n2b8b710b322feb03148f871a77ab92163a0a12de refs/tags/7.56.0-rc.3"
+ )
+ }
+ )
+ commit, _ = get_last_release_tag(c, "woof", "7.56.*")
+ self.assertEqual(commit, "7c6777bb7add533a789c69293b59e3261711d330")
+
+ def test_suffix_greater(self):
+ c = MockContext(
+ run={
+ 'git ls-remote -t https://github.com/DataDog/woof "7.56.*"': Result(
+ "e1b8e9163203b7446c74fac0b8d4153eb24227a0 refs/tags/7.56.0-rc.1\n7c6777bb7add533a789c69293b59e3261711d330 refs/tags/7.56.0-rc.4^{}\n2b8b710b322feb03148f871a77ab92163a0a12de refs/tags/7.56.0-rc.3"
+ )
+ }
+ )
+ _, name = get_last_release_tag(c, "woof", "7.56.*")
+ self.assertEqual(name, "7.56.0-rc.4")
+
+ def test_only_release_tags(self):
+ c = MockContext(
+ run={
+ 'git ls-remote -t https://github.com/DataDog/woof "7.57.*"': Result(
+ "43638bd55a74fd6ec51264cc7b3b1003d0b1c7ac\trefs/tags/7.57.0-dbm-mongo-1.5\ne01bcf3d12e6d6742b1fa8296882938c6dba9922\trefs/tags/7.57.0-devel\n6a5ad7fda590c7b8ba7036bca70dc8a0872e7afe\trefs/tags/7.57.0-devel^{}\n2c2eb2293cccd33100d7d930a59c136319942915\trefs/tags/7.57.0-installer-0.5.0-rc.1\n2c2eb2293cccd33100d7d930a59c136319942915\trefs/tags/7.57.0-installer-0.5.0-rc.2\n6a91fcca0ade9f77f08cd98d923a8d9ec18d7e8f\trefs/tags/7.57.0-installer-0.5.0-rc.3\n7e8ffc3de15f0486e6cb2184fa59f02da6ecfab9\trefs/tags/7.57.0-rc.1\nfa72fd12e3483a2d5957ea71fe01a8b1af376424\trefs/tags/7.57.0-rc.1^{}\n22587b746d6a0876cb7477b9b335e8573bdc3ac5\trefs/tags/7.57.0-rc.2\nd6c151a36487c3b54145ae9bf200f6c356bb9348\trefs/tags/7.57.0-rc.2^{}\n948ed4dd8c8cdf0aae467997086bb2229d4f1916\trefs/tags/7.57.0-rc.3\n259ed086a45960006e110622332cc8a39f9c6bb9\trefs/tags/7.57.0-rc.3^{}\na249f4607e5da894715a3e011dba8046b46678ed\trefs/tags/7.57.0-rc.4\n51a3b405a244348aec711d38e5810a6d88075b77\trefs/tags/7.57.0-rc.4^{}\n06519be707d6f24fb8265cde5a50cf0a66d5cb02\trefs/tags/7.57.0-rc.5\n7f43a5180446290f498742e68d8b28a75da04188\trefs/tags/7.57.0-rc.5^{}\n6bb640559e7626131290c63dab3959ba806c9886\trefs/tags/7.57.0-rc.6\nc5ed1f8b4734d31e94c2a83f307dbcb2b5a1faac\trefs/tags/7.57.0-rc.6^{}\n260697e624bb1d92ad306fdc301aab9b2975a627\trefs/tags/7.57.0-rc.7\n48617a0f56747e33b75d3dcf570bc2237726dc0e\trefs/tags/7.57.0-rc.7^{}\n5e11e104ff99b40b01ff2cfa702c0e4a465f98de\trefs/tags/7.57.1-beta-ndm-rdns-enrichment\n91c7c85d7c8fbb94421a90b273aea75630617eef\trefs/tags/7.57.1-beta-ndm-rdns-enrichment^{}\n3ad359da2894fa3de6e265c56dea8fabdb128454\trefs/tags/7.57.1-beta-ndm-rdns-enrichment2\n86683ad80578912014cc947dcf247ba020532403\trefs/tags/7.57.1-beta-ndm-rdns-enrichment2^{}"
+ )
+ }
+ )
+ _, name = get_last_release_tag(c, "woof", "7.57.*")
+ self.assertEqual(name, "7.57.0-rc.7")
diff --git a/tasks/unit_tests/linter_tests.py b/tasks/unit_tests/linter_tests.py
index 78ac5a323ee1d..b6a2cd3a5f283 100644
--- a/tasks/unit_tests/linter_tests.py
+++ b/tasks/unit_tests/linter_tests.py
@@ -32,7 +32,7 @@ def test_without_wrapper_no_env(self):
def test_without_wrapper_with_env(self):
with open(self.test_file, "w") as f:
f.write(
- " - export DD_API_KEY=$(aws ssm get-parameter --region us-east-1 --name $API_KEY_ORG2_SSM_NAME --with-decryption --query Parameter.Value --out text"
+ " - export DD_API_KEY=$(aws ssm get-parameter --region us-east-1 --name $API_KEY_ORG2 --with-decryption --query Parameter.Value --out text"
)
matched = linter.list_get_parameter_calls(self.test_file)[0]
self.assertFalse(matched.with_wrapper)
@@ -41,7 +41,7 @@ def test_without_wrapper_with_env(self):
def test_with_wrapper_no_env(self):
with open(self.test_file, "w") as f:
f.write(
- "export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh test.datadog-agent.datadog_api_key_org2)"
+ "export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh test.datadog-agent.datadog_api_key_org2)"
)
matched = linter.list_get_parameter_calls(self.test_file)[0]
self.assertTrue(matched.with_wrapper)
@@ -49,25 +49,24 @@ def test_with_wrapper_no_env(self):
def test_with_wrapper_with_env(self):
with open(self.test_file, "w") as f:
- f.write("export DD_APP_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $APP_KEY_ORG2_SSM_NAME)")
+ f.write("export DD_APP_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $APP_KEY_ORG2)")
matched = linter.list_get_parameter_calls(self.test_file)
self.assertListEqual([], matched)
def test_multi_match_windows(self):
with open(self.test_file, "w") as f:
f.write(
- 'DD_API_KEY=$(& "$CI_PROJECT_DIR\tools \\ci\aws_ssm_get_wrapper.ps1" "test.datadog-agent.datadog_api_key_org2)\n'
- 'DD_API_KEY=$(& "$CI_PROJECT_DIR\tools \\ci\aws_ssm_get wrapper.ps1" "$Env:MISSING_UNDERSCORE)\n'
- '`DD_APP_KEY=$(& "$CI_PROJECT_DIR\tools\\ci\aws_ssm_get_wrapper.ps1" "bad.name")\n'
- 'DD_APP=$(& "$CI_PROJECT_DIR\tools\\ci\aws_ssm_get_wrapper.ps1" "$Env:TEST")\n'
+ 'DD_API_KEY=$(& "$CI_PROJECT_DIR\tools \\ci\fetch_secret.ps1" test.datadog-agent.datadog_api_key_org2 $tmpfile)\n'
+ 'DD_API_KEY=$(& "$CI_PROJECT_DIR\tools \\ci\fetch secret.ps1" "$Env:MISSING_UNDERSCORE" $tmpfile)\n'
+ '`DD_APP_KEY=$(& "$CI_PROJECT_DIR\tools\\ci\fetch_secret.ps1" "bad.name" "$tmpfile")\n'
+ 'DD_APP=$(& "$CI_PROJECT_DIR\tools\\ci\fetch_secret.ps1" "$Env:TEST" $tmpfile)\n'
)
matched = linter.list_get_parameter_calls(self.test_file)
- self.assertEqual(3, len(matched))
+ self.assertEqual(2, len(matched))
self.assertTrue(matched[0].with_wrapper)
self.assertFalse(matched[0].with_env_var)
- self.assertFalse(matched[1].standard)
- self.assertTrue(matched[2].with_wrapper)
- self.assertFalse(matched[2].with_env_var)
+ self.assertTrue(matched[1].with_wrapper)
+ self.assertFalse(matched[1].with_env_var)
class TestGitlabChangePaths(unittest.TestCase):
diff --git a/tasks/unit_tests/omnibus_tests.py b/tasks/unit_tests/omnibus_tests.py
index 1d7438303eefd..1aeaf35e411f7 100644
--- a/tasks/unit_tests/omnibus_tests.py
+++ b/tasks/unit_tests/omnibus_tests.py
@@ -41,7 +41,7 @@ def _run_calls_to_string(mock_calls):
'CI_PIPELINE_ID': '',
'RELEASE_VERSION_7': 'nightly',
'S3_OMNIBUS_CACHE_BUCKET': 'omnibus-cache',
- 'API_KEY_ORG2_SSM_NAME': 'api-key',
+ 'API_KEY_ORG2': 'api-key',
},
clear=True,
)
diff --git a/tasks/unit_tests/testdata/collector/valid_datadog_manifest.yaml b/tasks/unit_tests/testdata/collector/valid_datadog_manifest.yaml
index 3a5f0ec2d1bc7..74d4ee191925a 100644
--- a/tasks/unit_tests/testdata/collector/valid_datadog_manifest.yaml
+++ b/tasks/unit_tests/testdata/collector/valid_datadog_manifest.yaml
@@ -19,7 +19,6 @@ extensions:
exporters:
- gomod: go.opentelemetry.io/collector/exporter/debugexporter v0.104.0
- - gomod: go.opentelemetry.io/collector/exporter/loggingexporter v0.104.0
- gomod: go.opentelemetry.io/collector/exporter/nopexporter v0.104.0
- gomod: go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0
- gomod: go.opentelemetry.io/collector/exporter/otlphttpexporter v0.104.0
diff --git a/tasks/unit_tests/testdata/fake_gitlab-ci.yml b/tasks/unit_tests/testdata/fake_gitlab-ci.yml
index a07aa2828d10c..06478a78e1dcb 100644
--- a/tasks/unit_tests/testdata/fake_gitlab-ci.yml
+++ b/tasks/unit_tests/testdata/fake_gitlab-ci.yml
@@ -174,15 +174,15 @@ variables:
DATADOG_AGENT_EMBEDDED_PATH: /opt/datadog-agent/embedded
DEB_GPG_KEY_ID: ad9589b7
DEB_GPG_KEY_NAME: "Datadog, Inc. Master key"
- DEB_GPG_KEY_SSM_NAME: ci.datadog-agent.deb_signing_private_key_${DEB_GPG_KEY_ID}
- DEB_SIGNING_PASSPHRASE_SSM_NAME: ci.datadog-agent.deb_signing_key_passphrase_${DEB_GPG_KEY_ID}
+ DEB_GPG_KEY: ci.datadog-agent.deb_signing_private_key_${DEB_GPG_KEY_ID}
+ DEB_SIGNING_PASSPHRASE: ci.datadog-agent.deb_signing_key_passphrase_${DEB_GPG_KEY_ID}
RPM_GPG_KEY_ID: fd4bf915
RPM_GPG_KEY_NAME: "Datadog, Inc. RPM key"
- RPM_GPG_KEY_SSM_NAME: ci.datadog-agent.rpm_signing_private_key_${RPM_GPG_KEY_ID}
- RPM_SIGNING_PASSPHRASE_SSM_NAME: ci.datadog-agent.rpm_signing_key_passphrase_${RPM_GPG_KEY_ID}
+ RPM_GPG_KEY: ci.datadog-agent.rpm_signing_private_key_${RPM_GPG_KEY_ID}
+ RPM_SIGNING_PASSPHRASE: ci.datadog-agent.rpm_signing_key_passphrase_${RPM_GPG_KEY_ID}
# docker.io authentication
- DOCKER_REGISTRY_LOGIN_SSM_KEY: docker_hub_login
- DOCKER_REGISTRY_PWD_SSM_KEY: docker_hub_pwd
+ DOCKER_REGISTRY_LOGIN: docker_hub_login
+ DOCKER_REGISTRY_PWD: docker_hub_pwd
DOCKER_REGISTRY_URL: docker.io
KITCHEN_INFRASTRUCTURE_FLAKES_RETRY: 2
CLANG_LLVM_VER: 12.0.1
diff --git a/tasks/winbuildscripts/unittests.ps1 b/tasks/winbuildscripts/unittests.ps1
index 8323cdd0afe75..ad28ec0540ad6 100644
--- a/tasks/winbuildscripts/unittests.ps1
+++ b/tasks/winbuildscripts/unittests.ps1
@@ -60,9 +60,14 @@ $err = $LASTEXITCODE
# Ignore upload failures
$ErrorActionPreference = "Continue"
+$tmpfile = [System.IO.Path]::GetTempFileName()
# 1. Upload coverage reports to Codecov
-$Env:CODECOV_TOKEN=$(& "$UT_BUILD_ROOT\tools\ci\aws_ssm_get_wrapper.ps1" $Env:CODECOV_TOKEN_SSM_NAME)
+& "$UT_BUILD_ROOT\tools\ci\fetch_secret.ps1" "$Env:CODECOV_TOKEN" "$tmpfile"
+If ($LASTEXITCODE -ne "0") {
+ exit $LASTEXITCODE
+}
+$Env:CODECOV_TOKEN=$(cat "$tmpfile")
& inv -e coverage.upload-to-codecov $Env:COVERAGE_CACHE_FLAG
# 2. Upload junit files
@@ -70,10 +75,19 @@ $Env:CODECOV_TOKEN=$(& "$UT_BUILD_ROOT\tools\ci\aws_ssm_get_wrapper.ps1" $Env:CO
Get-ChildItem -Path "$UT_BUILD_ROOT" -Filter "junit-out-*.xml" -Recurse | ForEach-Object {
Copy-Item -Path $_.FullName -Destination C:\mnt
}
-$Env:DATADOG_API_KEY=$(& "$UT_BUILD_ROOT\tools\ci\aws_ssm_get_wrapper.ps1" $Env:API_KEY_ORG2_SSM_NAME)
-$Env:GITLAB_TOKEN=$(& "$UT_BUILD_ROOT\tools\ci\aws_ssm_get_wrapper.ps1" $Env:GITLAB_TOKEN_SSM_NAME)
-& inv -e junit-upload --tgz-path $Env:JUNIT_TAR
+& "$UT_BUILD_ROOT\tools\ci\fetch_secret.ps1" "$Env:API_KEY_ORG2" "$tmpfile"
+If ($LASTEXITCODE -ne "0") {
+ exit $LASTEXITCODE
+}
+$Env:DATADOG_API_KEY=$(cat "$tmpfile")
+& "$UT_BUILD_ROOT\tools\ci\fetch_secret.ps1" "$Env:GITLAB_TOKEN" "$tmpfile"
+If ($LASTEXITCODE -ne "0") {
+ exit $LASTEXITCODE
+}
+$Env:GITLAB_TOKEN=$(cat "$tmpfile")
+Remove-Item "$tmpfile"
+& inv -e junit-upload --tgz-path $Env:JUNIT_TAR
if($err -ne 0){
Write-Host -ForegroundColor Red "test failed $err"
[Environment]::Exit($err)
diff --git a/test/e2e/argo-workflows/otlp-workflow.yaml b/test/e2e/argo-workflows/otlp-workflow.yaml
deleted file mode 100644
index 9320d2ae9ad7c..0000000000000
--- a/test/e2e/argo-workflows/otlp-workflow.yaml
+++ /dev/null
@@ -1,156 +0,0 @@
-apiVersion: argoproj.io/v1alpha1
-kind: Workflow
-metadata:
- generateName: argo-datadog-agent-
-spec:
- entrypoint: main
- onExit: exit-handler
- arguments:
- parameters:
- - name: datadog-agent-image-repository
- - name: datadog-agent-image-tag
- - name: datadog-cluster-agent-image-repository
- - name: datadog-cluster-agent-image-tag
- - name: ci_commit_short_sha
- - name: ci_pipeline_id
- - name: ci_job_id
- volumes:
- - name: datadog-agent-volume
- hostPath:
- path: /host/datadog-agent
- templates:
- - name: main
- inputs:
- parameters:
- - name: datadog-agent-image-repository
- - name: datadog-agent-image-tag
- - name: datadog-cluster-agent-image-repository
- - name: datadog-cluster-agent-image-tag
- - name: ci_commit_short_sha
- - name: ci_pipeline_id
- - name: ci_job_id
- steps:
- - - name: start-fake-datadog
- templateRef:
- name: fake-datadog
- template: create
- arguments:
- parameters:
- - name: namespace
- value: "{{workflow.namespace}}"
-
- - name: start-otlp-test
- templateRef:
- name: otlp-test
- template: create
- arguments:
- parameters:
- - name: namespace
- value: "{{workflow.namespace}}"
-
- - - name: fake-dd-reset
- templateRef:
- name: fake-datadog
- template: reset
- arguments:
- parameters:
- - name: namespace
- value: "{{workflow.namespace}}"
-
- - - name: start-datadog-agent
- templateRef:
- name: datadog-agent
- template: create
- arguments:
- parameters:
- - name: namespace
- value: "{{workflow.namespace}}"
- - name: agent-image-repository
- value: "{{inputs.parameters.datadog-agent-image-repository}}"
- - name: agent-image-tag
- value: "{{inputs.parameters.datadog-agent-image-tag}}"
- - name: dd-url
- value: "http://fake-datadog.{{workflow.namespace}}.svc.cluster.local"
- - name: site
- value: ""
- - name: cluster-agent-image-repository
- value: "{{inputs.parameters.datadog-cluster-agent-image-repository}}"
- - name: cluster-agent-image-tag
- value: "{{inputs.parameters.datadog-cluster-agent-image-tag}}"
- - name: ci_commit_short_sha
- value: "{{inputs.parameters.ci_commit_short_sha}}"
- - name: ci_pipeline_id
- value: "{{inputs.parameters.ci_pipeline_id}}"
- - name: ci_job_id
- value: "{{inputs.parameters.ci_job_id}}"
- - name: remote_configuration_enabled
- value: "false"
- - name: networkmonitoring_enabled
- value: "false"
-
- - - name: wait-datadog-agent
- templateRef:
- name: datadog-agent
- template: wait
- arguments:
- parameters:
- - name: namespace
- value: "{{workflow.namespace}}"
-
- - - name: test-otlp
- templateRef:
- name: otlp-test
- template: test
- arguments:
- parameters:
- - name: namespace
- value: "{{workflow.namespace}}"
-
- - name: exit-handler
- steps:
- - - name: delete
- template: delete
- when: "{{workflow.status}} == Succeeded"
-
- - name: diagnose
- template: diagnose
- when: "{{workflow.status}} != Succeeded"
-
- - name: delete
- steps:
- - - name: stop-datadog-agent
- templateRef:
- name: datadog-agent
- template: delete
- arguments:
- parameters:
- - name: namespace
- value: "{{workflow.namespace}}"
-
- - name: stop-otlp-test
- templateRef:
- name: otlp-test
- template: delete
- arguments:
- parameters:
- - name: namespace
- value: "{{workflow.namespace}}"
-
- - name: diagnose
- steps:
- - - name: diagnose-datadog-agent
- templateRef:
- name: datadog-agent
- template: diagnose
- arguments:
- parameters:
- - name: namespace
- value: "{{workflow.namespace}}"
- - name: diagnose-otlp-test
- templateRef:
- name: otlp-test
- template: diagnose
- arguments:
- parameters:
- - name: namespace
- value: "{{workflow.namespace}}"
diff --git a/test/e2e/argo-workflows/templates/otlp-test.yaml b/test/e2e/argo-workflows/templates/otlp-test.yaml
deleted file mode 100644
index 9f9716ffd9fd2..0000000000000
--- a/test/e2e/argo-workflows/templates/otlp-test.yaml
+++ /dev/null
@@ -1,229 +0,0 @@
-apiVersion: argoproj.io/v1alpha1
-kind: WorkflowTemplate
-metadata:
- name: otlp-test
-spec:
- templates:
- - name: create-sender-config
- inputs:
- parameters:
- - name: namespace
- resource:
- action: apply
- manifest: |
- apiVersion: v1
- kind: ConfigMap
- metadata:
- name: sender-config
- namespace: {{inputs.parameters.namespace}}
- data:
- sender-config: |+
- receivers:
- file:
- path: /etc/data/metrics.data
- loop:
- enabled: true
- period: 10s
- exporters:
- otlp:
- endpoint: ${DD_AGENT_OTLP_ENDPOINT}
- tls:
- insecure: true
- service:
- pipelines:
- metrics:
- receivers: [file]
- exporters: [otlp]
- - name: create-metrics-data
- inputs:
- parameters:
- - name: namespace
- resource:
- action: apply
- manifest: |
- apiVersion: v1
- kind: ConfigMap
- metadata:
- name: metrics-data
- namespace: {{inputs.parameters.namespace}}
- data:
- metrics-data: |+
- {"resourceMetrics":[{"resource":{"attributes":[{"key":"telemetry.sdk.language","value":{"stringValue":"go"}},{"key":"telemetry.sdk.name","value":{"stringValue":"opentelemetry"}},{"key":"telemetry.sdk.version","value":{"stringValue":"1.0.0"}}]},"instrumentationLibraryMetrics":[{"instrumentationLibrary":{"name":"test-meter"},"metrics":[{"name":"an_important_metric","description":"Measures the cumulative epicness of the app","sum":{"dataPoints":[{"attributes":[{"key":"labelA","value":{"stringValue":"chocolate"}},{"key":"labelB","value":{"stringValue":"raspberry"}},{"key":"labelC","value":{"stringValue":"vanilla"}}],"startTimeUnixNano":"1637674530222121000","timeUnixNano":"1637674532223257300","asDouble":14}],"aggregationTemporality":"AGGREGATION_TEMPORALITY_CUMULATIVE","isMonotonic":true}},{"name":"test2.sendtodev.histogram","description":"IO read bytes","histogram":{"dataPoints":[{"attributes":[{"key":"labelA","value":{"stringValue":"chocolate"}},{"key":"labelB","value":{"stringValue":"raspberry"}},{"key":"labelC","value":{"stringValue":"vanilla"}}],"startTimeUnixNano":"1637674530222121000","timeUnixNano":"1637674532223257300","count":"42","sum":1541400,"bucketCounts":["14","0","14","0","0","14","0","0","0","0","0","0"],"explicitBounds":[5000,10000,25000,50000,100000,250000,500000,1000000,2500000,5000000,10000000]}],"aggregationTemporality":"AGGREGATION_TEMPORALITY_CUMULATIVE"}}]}],"schemaUrl":"https://opentelemetry.io/schemas/v1.4.0"}]}
- {"resourceMetrics":[{"resource":{"attributes":[{"key":"telemetry.sdk.language","value":{"stringValue":"go"}},{"key":"telemetry.sdk.name","value":{"stringValue":"opentelemetry"}},{"key":"telemetry.sdk.version","value":{"stringValue":"1.0.0"}}]},"instrumentationLibraryMetrics":[{"instrumentationLibrary":{"name":"test-meter"},"metrics":[{"name":"an_important_metric","description":"Measures the cumulative epicness of the app","sum":{"dataPoints":[{"attributes":[{"key":"labelA","value":{"stringValue":"chocolate"}},{"key":"labelB","value":{"stringValue":"raspberry"}},{"key":"labelC","value":{"stringValue":"vanilla"}}],"startTimeUnixNano":"1637674530222121000","timeUnixNano":"1637674534223387200","asDouble":27}],"aggregationTemporality":"AGGREGATION_TEMPORALITY_CUMULATIVE","isMonotonic":true}},{"name":"test2.sendtodev.histogram","description":"IO read bytes","histogram":{"dataPoints":[{"attributes":[{"key":"labelA","value":{"stringValue":"chocolate"}},{"key":"labelB","value":{"stringValue":"raspberry"}},{"key":"labelC","value":{"stringValue":"vanilla"}}],"startTimeUnixNano":"1637674530222121000","timeUnixNano":"1637674534223387200","count":"81","sum":2972700,"bucketCounts":["27","0","27","0","0","27","0","0","0","0","0","0"],"explicitBounds":[5000,10000,25000,50000,100000,250000,500000,1000000,2500000,5000000,10000000]}],"aggregationTemporality":"AGGREGATION_TEMPORALITY_CUMULATIVE"}}]}],"schemaUrl":"https://opentelemetry.io/schemas/v1.4.0"}]}
- - name: create-deployment
- inputs:
- parameters:
- - name: namespace
- resource:
- action: apply
- manifest: |
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: otlp-sender
- namespace: {{inputs.parameters.namespace}}
- spec:
- replicas: 1
- selector:
- matchLabels:
- app: otlp-sender
- template:
- metadata:
- labels:
- app: otlp-sender
- spec:
- containers:
- - name: sender
- image: datadog/docker-library:e2e-otlp-sender_latest
- resources:
- requests:
- memory: "32Mi"
- cpu: "100m"
- limits:
- memory: "32Mi"
- cpu: "100m"
- env:
- - name: DD_AGENT_HOST
- valueFrom:
- fieldRef:
- fieldPath: status.hostIP
- - name: DD_AGENT_OTLP_ENDPOINT
- value: http://$(DD_AGENT_HOST):4317
- volumeMounts:
- - name: "sender-config"
- mountPath: "/etc/otel"
- - name: "metrics-data"
- mountPath: "/etc/data"
- volumes:
- - name: "sender-config"
- configMap:
- name: "sender-config"
- items:
- - key: sender-config
- path: config.yaml
- - name: "metrics-data"
- configMap:
- name: "metrics-data"
- items:
- - key: metrics-data
- path: metrics.data
- - name: create
- inputs:
- parameters:
- - name: namespace
- steps:
- - - name: sender-config
- template: create-sender-config
- arguments:
- parameters:
- - name: namespace
- value: "{{inputs.parameters.namespace}}"
- - name: metrics-data
- template: create-metrics-data
- arguments:
- parameters:
- - name: namespace
- value: "{{inputs.parameters.namespace}}"
- - name: deployment
- template: create-deployment
- arguments:
- parameters:
- - name: namespace
- value: "{{inputs.parameters.namespace}}"
-
- - name: delete-deployment
- inputs:
- parameters:
- - name: namespace
- resource:
- action: delete
- manifest: |
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: otlp-sender
- namespace: {{inputs.parameters.namespace}}
- - name: delete-sender-config
- inputs:
- parameters:
- - name: namespace
- resource:
- action: delete
- manifest: |
- apiVersion: v1
- kind: ConfigMap
- metadata:
- name: sender-config
- namespace: {{inputs.parameters.namespace}}
- - name: delete-metrics-data
- inputs:
- parameters:
- - name: namespace
- resource:
- action: delete
- manifest: |
- apiVersion: v1
- kind: ConfigMap
- metadata:
- name: metrics-data
- namespace: {{inputs.parameters.namespace}}
- - name: delete
- inputs:
- parameters:
- - name: namespace
- steps:
- - - name: deployment
- template: delete-deployment
- arguments:
- parameters:
- - name: namespace
- value: "{{inputs.parameters.namespace}}"
- - name: sender-config
- template: delete-sender-config
- arguments:
- parameters:
- - name: namespace
- value: "{{inputs.parameters.namespace}}"
- - name: metrics-data
- template: delete-metrics-data
- arguments:
- parameters:
- - name: namespace
- value: "{{inputs.parameters.namespace}}"
-
- - name: test
- inputs:
- parameters:
- - name: namespace
- activeDeadlineSeconds: 300
- script:
- image: mongo:4.4.1
- command: [mongo, "fake-datadog.{{inputs.parameters.namespace}}.svc.cluster.local/datadog"]
- source: |
- while (1) {
- sleep(2000);
-
- // Gauges
- var nb = db.series.find({metric: "an_important_metric"}).count();
- if (nb == 0) {
- print("no 'an_important_metric' metric found");
- continue;
- }
-
- print("All good");
- break;
- }
- - name: diagnose
- inputs:
- parameters:
- - name: namespace
- activeDeadlineSeconds: 300
- script:
- image: alpine/k8s:1.27.1
- command: [sh]
- source: |
- set -euo pipefail
-
- kubectl --namespace {{inputs.parameters.namespace}} get pods -l app=otlp-sender -o custom-columns=name:metadata.name --no-headers | while read -r po; do
- kubectl --namespace {{inputs.parameters.namespace}} logs $po -c sender || true
- done
diff --git a/test/e2e/containers/otlp_sender/cmd/sender/main.go b/test/e2e/containers/otlp_sender/cmd/sender/main.go
index 242b6ef83f01d..b30813cc7f789 100644
--- a/test/e2e/containers/otlp_sender/cmd/sender/main.go
+++ b/test/e2e/containers/otlp_sender/cmd/sender/main.go
@@ -11,7 +11,7 @@ import (
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/exporter"
- "go.opentelemetry.io/collector/exporter/loggingexporter"
+ "go.opentelemetry.io/collector/exporter/debugexporter"
"go.opentelemetry.io/collector/exporter/otlpexporter"
"go.opentelemetry.io/collector/exporter/otlphttpexporter"
"go.opentelemetry.io/collector/extension"
@@ -40,7 +40,7 @@ func components() (
exporters, err := exporter.MakeFactoryMap(
otlpexporter.NewFactory(),
otlphttpexporter.NewFactory(),
- loggingexporter.NewFactory(),
+ debugexporter.NewFactory(),
)
errs = multierr.Append(errs, err)
diff --git a/test/e2e/containers/otlp_sender/go.mod b/test/e2e/containers/otlp_sender/go.mod
index 13a7945ebe571..3ae057624be2d 100644
--- a/test/e2e/containers/otlp_sender/go.mod
+++ b/test/e2e/containers/otlp_sender/go.mod
@@ -6,7 +6,7 @@ require (
go.opentelemetry.io/collector/component v0.104.0
go.opentelemetry.io/collector/consumer v0.104.0
go.opentelemetry.io/collector/exporter v0.104.0
- go.opentelemetry.io/collector/exporter/loggingexporter v0.104.0
+ go.opentelemetry.io/collector/exporter/debugexporter v0.104.0
go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0
go.opentelemetry.io/collector/exporter/otlphttpexporter v0.104.0
go.opentelemetry.io/collector/extension v0.104.0
diff --git a/test/e2e/containers/otlp_sender/go.sum b/test/e2e/containers/otlp_sender/go.sum
index 7ab785e653595..5f88f0d5eafab 100644
--- a/test/e2e/containers/otlp_sender/go.sum
+++ b/test/e2e/containers/otlp_sender/go.sum
@@ -175,8 +175,8 @@ go.opentelemetry.io/collector/consumer v0.104.0 h1:Z1ZjapFp5mUcbkGEL96ljpqLIUMhR
go.opentelemetry.io/collector/consumer v0.104.0/go.mod h1:60zcIb0W9GW0z9uJCv6NmjpSbCfBOeRUyrtEwqK6Hzo=
go.opentelemetry.io/collector/exporter v0.104.0 h1:C2HmnfBa05IQ2T+p9T7K7gXVxjrBLd+JxEtAWo7JNbg=
go.opentelemetry.io/collector/exporter v0.104.0/go.mod h1:Rx0oB0E4Ccg1JuAnEWwhtrq1ygRBkfx4mco1DpR3WaQ=
-go.opentelemetry.io/collector/exporter/loggingexporter v0.104.0 h1:MaBTuHmK/HAQ+/rLTrGf3tazKum8Sic3/CaXgNr5xnc=
-go.opentelemetry.io/collector/exporter/loggingexporter v0.104.0/go.mod h1:sXZhACvds6z71cf2fzKrojMgdJItJZxeClKlF/PI/l8=
+go.opentelemetry.io/collector/exporter/debugexporter v0.104.0 h1:1Z63H/xxv6IzMP7GPmI6v/lQAqZwYZCVC0rWYcYOomw=
+go.opentelemetry.io/collector/exporter/debugexporter v0.104.0/go.mod h1:NHVzTM0Z/bomgR7SAe3ysx4CZzh2UJ3TXWSCnaOB1Wo=
go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0 h1:EFOdhnc2yGhqou0Tud1HsM7fsgWo/H3tdQhYYytDprQ=
go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0/go.mod h1:fAF7Q3Xh0OkxYWUycdrNNDXkyz3nhHIRKDkez0aQ6zg=
go.opentelemetry.io/collector/exporter/otlphttpexporter v0.104.0 h1:JkNCOj7DdyJhcYIaRqtS/X+YtAPRjE4pcruyY6LoM7c=
diff --git a/test/fakeintake/aggregator/servicediscoveryAggregator.go b/test/fakeintake/aggregator/servicediscoveryAggregator.go
index 3ee0a522e01ec..0582205b34b41 100644
--- a/test/fakeintake/aggregator/servicediscoveryAggregator.go
+++ b/test/fakeintake/aggregator/servicediscoveryAggregator.go
@@ -20,17 +20,20 @@ type ServiceDiscoveryPayload struct {
RequestType string `json:"request_type"`
APIVersion string `json:"api_version"`
Payload struct {
- NamingSchemaVersion string `json:"naming_schema_version"`
- ServiceName string `json:"service_name"`
- HostName string `json:"host_name"`
- Env string `json:"env"`
- ServiceLanguage string `json:"service_language"`
- ServiceType string `json:"service_type"`
- StartTime int64 `json:"start_time"`
- LastSeen int64 `json:"last_seen"`
- APMInstrumentation string `json:"apm_instrumentation"`
- ServiceNameSource string `json:"service_name_source"`
- RSSMemory uint64 `json:"rss_memory"`
+ NamingSchemaVersion string `json:"naming_schema_version"`
+ ServiceName string `json:"service_name"`
+ GeneratedServiceName string `json:"generated_service_name"`
+ DDService string `json:"dd_service,omitempty"`
+ HostName string `json:"host_name"`
+ Env string `json:"env"`
+ ServiceLanguage string `json:"service_language"`
+ ServiceType string `json:"service_type"`
+ StartTime int64 `json:"start_time"`
+ LastSeen int64 `json:"last_seen"`
+ APMInstrumentation string `json:"apm_instrumentation"`
+ ServiceNameSource string `json:"service_name_source,omitempty"`
+ RSSMemory uint64 `json:"rss_memory"`
+ CPUCores float64 `json:"cpu_cores"`
} `json:"payload"`
}
diff --git a/test/integration/serverless/snapshots/error-csharp b/test/integration/serverless/snapshots/error-csharp
index c030934e86cae..b96bb73eee4ce 100644
--- a/test/integration/serverless/snapshots/error-csharp
+++ b/test/integration/serverless/snapshots/error-csharp
@@ -533,6 +533,102 @@
"version:integration-tests-version"
]
},
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_max",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-csharp",
+ "functionname:integration-tests-extension-XXXXXX-error-csharp",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-error-csharp",
+ "runtime:dotnet6",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_max",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-csharp",
+ "functionname:integration-tests-extension-XXXXXX-error-csharp",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-error-csharp",
+ "runtime:dotnet6",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_use",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-csharp",
+ "functionname:integration-tests-extension-XXXXXX-error-csharp",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-error-csharp",
+ "runtime:dotnet6",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_use",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-csharp",
+ "functionname:integration-tests-extension-XXXXXX-error-csharp",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-error-csharp",
+ "runtime:dotnet6",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
{
"distributions": null,
"dogsketches": [],
diff --git a/test/integration/serverless/snapshots/error-java b/test/integration/serverless/snapshots/error-java
index 9aaf96d543760..754e7f0733ffd 100644
--- a/test/integration/serverless/snapshots/error-java
+++ b/test/integration/serverless/snapshots/error-java
@@ -533,6 +533,102 @@
"version:integration-tests-version"
]
},
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_max",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-java",
+ "functionname:integration-tests-extension-XXXXXX-error-java",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-error-java",
+ "runtime:java8.al2",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_max",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-java",
+ "functionname:integration-tests-extension-XXXXXX-error-java",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-error-java",
+ "runtime:java8.al2",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_use",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-java",
+ "functionname:integration-tests-extension-XXXXXX-error-java",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-error-java",
+ "runtime:java8.al2",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_use",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-java",
+ "functionname:integration-tests-extension-XXXXXX-error-java",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-error-java",
+ "runtime:java8.al2",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
{
"distributions": null,
"dogsketches": [],
diff --git a/test/integration/serverless/snapshots/error-node b/test/integration/serverless/snapshots/error-node
index 58d2a83a13bd6..97b02cd4fd0dd 100644
--- a/test/integration/serverless/snapshots/error-node
+++ b/test/integration/serverless/snapshots/error-node
@@ -537,6 +537,102 @@
"version:integration-tests-version"
]
},
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_max",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-node",
+ "functionname:integration-tests-extension-XXXXXX-error-node",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-error-node",
+ "runtime:nodejs18.x",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_max",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-node",
+ "functionname:integration-tests-extension-XXXXXX-error-node",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-error-node",
+ "runtime:nodejs18.x",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_use",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-node",
+ "functionname:integration-tests-extension-XXXXXX-error-node",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-error-node",
+ "runtime:nodejs18.x",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_use",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-node",
+ "functionname:integration-tests-extension-XXXXXX-error-node",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-error-node",
+ "runtime:nodejs18.x",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
{
"distributions": null,
"dogsketches": [],
diff --git a/test/integration/serverless/snapshots/error-proxy b/test/integration/serverless/snapshots/error-proxy
index caf6b1aeedfb3..58e810c64e07a 100644
--- a/test/integration/serverless/snapshots/error-proxy
+++ b/test/integration/serverless/snapshots/error-proxy
@@ -533,6 +533,102 @@
"version:integration-tests-version"
]
},
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_max",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-proxy",
+ "functionname:integration-tests-extension-XXXXXX-error-proxy",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-error-proxy",
+ "runtime:nodejs18.x",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_max",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-proxy",
+ "functionname:integration-tests-extension-XXXXXX-error-proxy",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-error-proxy",
+ "runtime:nodejs18.x",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_use",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-proxy",
+ "functionname:integration-tests-extension-XXXXXX-error-proxy",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-error-proxy",
+ "runtime:nodejs18.x",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_use",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-proxy",
+ "functionname:integration-tests-extension-XXXXXX-error-proxy",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-error-proxy",
+ "runtime:nodejs18.x",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
{
"distributions": null,
"dogsketches": [],
diff --git a/test/integration/serverless/snapshots/error-python b/test/integration/serverless/snapshots/error-python
index e2c1e888a085c..e7bd220d86bf5 100644
--- a/test/integration/serverless/snapshots/error-python
+++ b/test/integration/serverless/snapshots/error-python
@@ -539,6 +539,102 @@
"version:integration-tests-version"
]
},
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_max",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-python",
+ "functionname:integration-tests-extension-XXXXXX-error-python",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-error-python",
+ "runtime:python3.8",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_max",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-python",
+ "functionname:integration-tests-extension-XXXXXX-error-python",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-error-python",
+ "runtime:python3.8",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_use",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-python",
+ "functionname:integration-tests-extension-XXXXXX-error-python",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-error-python",
+ "runtime:python3.8",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_use",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-python",
+ "functionname:integration-tests-extension-XXXXXX-error-python",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-error-python",
+ "runtime:python3.8",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
{
"distributions": null,
"dogsketches": [],
diff --git a/test/integration/serverless/snapshots/metric-csharp b/test/integration/serverless/snapshots/metric-csharp
index e8212c0f4a253..87d214181a82a 100644
--- a/test/integration/serverless/snapshots/metric-csharp
+++ b/test/integration/serverless/snapshots/metric-csharp
@@ -485,6 +485,102 @@
"version:integration-tests-version"
]
},
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_max",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-csharp",
+ "functionname:integration-tests-extension-XXXXXX-metric-csharp",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-metric-csharp",
+ "runtime:dotnet6",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_max",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-csharp",
+ "functionname:integration-tests-extension-XXXXXX-metric-csharp",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-metric-csharp",
+ "runtime:dotnet6",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_use",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-csharp",
+ "functionname:integration-tests-extension-XXXXXX-metric-csharp",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-metric-csharp",
+ "runtime:dotnet6",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_use",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-csharp",
+ "functionname:integration-tests-extension-XXXXXX-metric-csharp",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-metric-csharp",
+ "runtime:dotnet6",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
{
"distributions": null,
"dogsketches": [],
diff --git a/test/integration/serverless/snapshots/metric-go b/test/integration/serverless/snapshots/metric-go
index 24046cf6d6953..950cacf5bd7b6 100644
--- a/test/integration/serverless/snapshots/metric-go
+++ b/test/integration/serverless/snapshots/metric-go
@@ -485,6 +485,102 @@
"version:integration-tests-version"
]
},
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_max",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-go",
+ "functionname:integration-tests-extension-XXXXXX-metric-go",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-metric-go",
+ "runtime:provided.al2",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_max",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-go",
+ "functionname:integration-tests-extension-XXXXXX-metric-go",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-metric-go",
+ "runtime:provided.al2",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_use",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-go",
+ "functionname:integration-tests-extension-XXXXXX-metric-go",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-metric-go",
+ "runtime:provided.al2",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_use",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-go",
+ "functionname:integration-tests-extension-XXXXXX-metric-go",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-metric-go",
+ "runtime:provided.al2",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
{
"distributions": null,
"dogsketches": [],
diff --git a/test/integration/serverless/snapshots/metric-java b/test/integration/serverless/snapshots/metric-java
index bbb3debe64b28..168e525ffc408 100644
--- a/test/integration/serverless/snapshots/metric-java
+++ b/test/integration/serverless/snapshots/metric-java
@@ -485,6 +485,102 @@
"version:integration-tests-version"
]
},
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_max",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-java",
+ "functionname:integration-tests-extension-XXXXXX-metric-java",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-metric-java",
+ "runtime:java8.al2",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_max",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-java",
+ "functionname:integration-tests-extension-XXXXXX-metric-java",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-metric-java",
+ "runtime:java8.al2",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_use",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-java",
+ "functionname:integration-tests-extension-XXXXXX-metric-java",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-metric-java",
+ "runtime:java8.al2",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_use",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-java",
+ "functionname:integration-tests-extension-XXXXXX-metric-java",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-metric-java",
+ "runtime:java8.al2",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
{
"distributions": null,
"dogsketches": [],
diff --git a/test/integration/serverless/snapshots/metric-node b/test/integration/serverless/snapshots/metric-node
index 8bc4b04fa27e1..c74d07d228aea 100644
--- a/test/integration/serverless/snapshots/metric-node
+++ b/test/integration/serverless/snapshots/metric-node
@@ -485,6 +485,102 @@
"version:integration-tests-version"
]
},
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_max",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-node",
+ "functionname:integration-tests-extension-XXXXXX-metric-node",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-metric-node",
+ "runtime:nodejs18.x",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_max",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-node",
+ "functionname:integration-tests-extension-XXXXXX-metric-node",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-metric-node",
+ "runtime:nodejs18.x",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_use",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-node",
+ "functionname:integration-tests-extension-XXXXXX-metric-node",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-metric-node",
+ "runtime:nodejs18.x",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_use",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-node",
+ "functionname:integration-tests-extension-XXXXXX-metric-node",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-metric-node",
+ "runtime:nodejs18.x",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
{
"distributions": null,
"dogsketches": [],
diff --git a/test/integration/serverless/snapshots/metric-proxy b/test/integration/serverless/snapshots/metric-proxy
index 260310cc549e9..9156964619d76 100644
--- a/test/integration/serverless/snapshots/metric-proxy
+++ b/test/integration/serverless/snapshots/metric-proxy
@@ -485,6 +485,102 @@
"version:integration-tests-version"
]
},
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_max",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-proxy",
+ "functionname:integration-tests-extension-XXXXXX-metric-proxy",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-metric-proxy",
+ "runtime:nodejs18.x",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_max",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-proxy",
+ "functionname:integration-tests-extension-XXXXXX-metric-proxy",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-metric-proxy",
+ "runtime:nodejs18.x",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_use",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-proxy",
+ "functionname:integration-tests-extension-XXXXXX-metric-proxy",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-metric-proxy",
+ "runtime:nodejs18.x",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_use",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-proxy",
+ "functionname:integration-tests-extension-XXXXXX-metric-proxy",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-metric-proxy",
+ "runtime:nodejs18.x",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
{
"distributions": null,
"dogsketches": [],
diff --git a/test/integration/serverless/snapshots/metric-python b/test/integration/serverless/snapshots/metric-python
index f2c2492f9ae59..65d8ec77aa3f6 100644
--- a/test/integration/serverless/snapshots/metric-python
+++ b/test/integration/serverless/snapshots/metric-python
@@ -485,6 +485,102 @@
"version:integration-tests-version"
]
},
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_max",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-python",
+ "functionname:integration-tests-extension-XXXXXX-metric-python",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-metric-python",
+ "runtime:python3.8",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_max",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-python",
+ "functionname:integration-tests-extension-XXXXXX-metric-python",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-metric-python",
+ "runtime:python3.8",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_use",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-python",
+ "functionname:integration-tests-extension-XXXXXX-metric-python",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-metric-python",
+ "runtime:python3.8",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
+ {
+ "distributions": null,
+ "dogsketches": [],
+ "metric": "aws.lambda.enhanced.fd_use",
+ "tags": [
+ "account_id:############",
+ "architecture:XXX",
+ "aws_account:############",
+ "dd_extension_version:123",
+ "env:integration-tests-env",
+ "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-python",
+ "functionname:integration-tests-extension-XXXXXX-metric-python",
+ "memorysize:1024",
+ "region:eu-west-1",
+ "resource:integration-tests-extension-XXXXXX-metric-python",
+ "runtime:python3.8",
+ "service:integration-tests-service",
+ "taga:valuea",
+ "tagb:valueb",
+ "tagc:valuec",
+ "tagd:valued",
+ "version:integration-tests-version"
+ ]
+ },
{
"distributions": null,
"dogsketches": [],
diff --git a/test/integration/serverless_perf/write_message.sh b/test/integration/serverless_perf/write_message.sh
new file mode 100755
index 0000000000000..557426e9ac7c3
--- /dev/null
+++ b/test/integration/serverless_perf/write_message.sh
@@ -0,0 +1,46 @@
+#!/bin/bash -e
+
+filename="${HOME}/comment.md"
+echo "filename=$filename" >> "$GITHUB_OUTPUT"
+
+if [ "${VAR_COLD_START}" != 0 ]; then
+ echo -n ":warning::rotating_light: Warning, " > "$filename"
+else
+ echo -n ":inbox_tray: :loudspeaker: Info, " > "$filename"
+fi
+
+cat >> "$filename" << EOL
+this pull request increases the binary size of serverless extension by ${VAR_DIFF} bytes. Each MB of binary size increase means about 10ms of additional cold start time, so this pull request would increase cold start time by ${VAR_COLD_START}ms.
+
+
+Debug info
+
+If you have questions, we are happy to help, come visit us in the [#serverless](https://dd.slack.com/archives/CBWDFKWV8) slack channel and provide a link to this comment.
+
+EOL
+
+if [ -n "$VAR_DEPS" ]; then
+ cat >> "$filename" << EOL
+
+These dependencies were added to the serverless extension by this pull request:
+
+\`\`\`
+${VAR_DEPS}
+\`\`\`
+
+View dependency graphs for each added dependency in the [artifacts section](https://github.com/DataDog/datadog-agent/actions/runs/${VAR_RUN_ID}#artifacts) of the github action.
+
+EOL
+fi
+
+cat >> "$filename" << EOL
+
+We suggest you consider adding the \`!serverless\` build tag to remove any new dependencies not needed in the serverless extension.
+
+
+
+EOL
+
+echo "Will post comment with message:"
+echo
+cat "$filename"
diff --git a/test/kitchen/tasks/clean.sh b/test/kitchen/tasks/clean.sh
index 3aa774275039f..c351060cc08ca 100755
--- a/test/kitchen/tasks/clean.sh
+++ b/test/kitchen/tasks/clean.sh
@@ -8,19 +8,19 @@ set -euo pipefail
# These should not be printed out
if [ -z ${AZURE_CLIENT_ID+x} ]; then
- AZURE_CLIENT_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_ID_SSM_NAME)
+ AZURE_CLIENT_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_CLIENT_ID)
export AZURE_CLIENT_ID
fi
if [ -z ${AZURE_CLIENT_SECRET+x} ]; then
- AZURE_CLIENT_SECRET=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_SECRET_SSM_NAME)
+ AZURE_CLIENT_SECRET=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_CLIENT_SECRET)
export AZURE_CLIENT_SECRET
fi
if [ -z ${AZURE_TENANT_ID+x} ]; then
- AZURE_TENANT_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_TENANT_ID_SSM_NAME)
+ AZURE_TENANT_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_TENANT_ID)
export AZURE_TENANT_ID
fi
if [ -z ${AZURE_SUBSCRIPTION_ID+x} ]; then
- AZURE_SUBSCRIPTION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_SUBSCRIPTION_ID_SSM_NAME)
+ AZURE_SUBSCRIPTION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_SUBSCRIPTION_ID)
export AZURE_SUBSCRIPTION_ID
fi
if [ -z ${DD_PIPELINE_ID+x} ]; then
diff --git a/test/kitchen/tasks/run-test-kitchen.sh b/test/kitchen/tasks/run-test-kitchen.sh
index a88290161363e..6e51d4013da71 100755
--- a/test/kitchen/tasks/run-test-kitchen.sh
+++ b/test/kitchen/tasks/run-test-kitchen.sh
@@ -54,25 +54,25 @@ if [ "$KITCHEN_PROVIDER" == "azure" ]; then
# These should not be printed out
set +x
if [ -z ${AZURE_CLIENT_ID+x} ]; then
- AZURE_CLIENT_ID=$($PARENT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_ID_SSM_NAME)
+ AZURE_CLIENT_ID=$($PARENT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_CLIENT_ID)
# make sure whitespace is removed
AZURE_CLIENT_ID="$(echo -e "${AZURE_CLIENT_ID}" | tr -d '[:space:]')"
export AZURE_CLIENT_ID
fi
if [ -z ${AZURE_CLIENT_SECRET+x} ]; then
- AZURE_CLIENT_SECRET=$($PARENT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_SECRET_SSM_NAME)
+ AZURE_CLIENT_SECRET=$($PARENT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_CLIENT_SECRET)
# make sure whitespace is removed
AZURE_CLIENT_SECRET="$(echo -e "${AZURE_CLIENT_SECRET}" | tr -d '[:space:]')"
export AZURE_CLIENT_SECRET
fi
if [ -z ${AZURE_TENANT_ID+x} ]; then
- AZURE_TENANT_ID=$($PARENT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_TENANT_ID_SSM_NAME)
+ AZURE_TENANT_ID=$($PARENT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_TENANT_ID)
# make sure whitespace is removed
AZURE_TENANT_ID="$(echo -e "${AZURE_TENANT_ID}" | tr -d '[:space:]')"
export AZURE_TENANT_ID
fi
if [ -z ${AZURE_SUBSCRIPTION_ID+x} ]; then
- AZURE_SUBSCRIPTION_ID=$($PARENT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_SUBSCRIPTION_ID_SSM_NAME)
+ AZURE_SUBSCRIPTION_ID=$($PARENT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_SUBSCRIPTION_ID)
# make sure whitespace is removed
AZURE_SUBSCRIPTION_ID="$(echo -e "${AZURE_SUBSCRIPTION_ID}" | tr -d '[:space:]')"
export AZURE_SUBSCRIPTION_ID
@@ -101,7 +101,7 @@ elif [ "$KITCHEN_PROVIDER" == "ec2" ]; then
export KITCHEN_EC2_SSH_KEY_ID="datadog-agent-kitchen"
export KITCHEN_EC2_SSH_KEY_PATH="$(pwd)/aws-ssh-key"
touch $KITCHEN_EC2_SSH_KEY_PATH && chmod 600 $KITCHEN_EC2_SSH_KEY_PATH
- $PARENT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_EC2_SSH_KEY_SSM_NAME > $KITCHEN_EC2_SSH_KEY_PATH
+ $PARENT_DIR/tools/ci/fetch_secret.sh $KITCHEN_EC2_SSH_KEY > $KITCHEN_EC2_SSH_KEY_PATH
fi
fi
diff --git a/test/kitchen/tasks/show-strays.sh b/test/kitchen/tasks/show-strays.sh
index ab12b9a5edd5f..996c95f6ac04b 100755
--- a/test/kitchen/tasks/show-strays.sh
+++ b/test/kitchen/tasks/show-strays.sh
@@ -10,19 +10,19 @@ set -euo pipefail
# These should not be printed out
set +x
if [ -z ${AZURE_CLIENT_ID+x} ]; then
- AZURE_CLIENT_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_ID_SSM_NAME)
+ AZURE_CLIENT_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_CLIENT_ID)
export AZURE_CLIENT_ID
fi
if [ -z ${AZURE_CLIENT_SECRET+x} ]; then
- AZURE_CLIENT_SECRET=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_SECRET_SSM_NAME)
+ AZURE_CLIENT_SECRET=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_CLIENT_SECRET)
export AZURE_CLIENT_SECRET
fi
if [ -z ${AZURE_TENANT_ID+x} ]; then
- AZURE_TENANT_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_TENANT_ID_SSM_NAME)
+ AZURE_TENANT_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_TENANT_ID)
export AZURE_TENANT_ID
fi
if [ -z ${AZURE_SUBSCRIPTION_ID+x} ]; then
- AZURE_SUBSCRIPTION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_SUBSCRIPTION_ID_SSM_NAME)
+ AZURE_SUBSCRIPTION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_SUBSCRIPTION_ID)
export AZURE_SUBSCRIPTION_ID
fi
if [ -z ${DD_PIPELINE_ID+x} ]; then
diff --git a/test/new-e2e/examples/aks_test.go b/test/new-e2e/examples/aks_test.go
index 9b13ffb1fac02..4df8cfc7728c8 100644
--- a/test/new-e2e/examples/aks_test.go
+++ b/test/new-e2e/examples/aks_test.go
@@ -15,8 +15,6 @@ import (
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "github.com/DataDog/test-infra-definitions/scenarios/azure/fakeintake"
-
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments"
azurekubernetes "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/azure/kubernetes"
@@ -27,7 +25,7 @@ type aksSuite struct {
}
func TestAKSSuite(t *testing.T) {
- e2e.Run(t, &aksSuite{}, e2e.WithProvisioner(azurekubernetes.AKSProvisioner(azurekubernetes.WithFakeIntakeOptions(fakeintake.WithDDDevForwarding()))))
+ e2e.Run(t, &aksSuite{}, e2e.WithProvisioner(azurekubernetes.AKSProvisioner()))
}
func (v *aksSuite) TestAKS() {
diff --git a/test/new-e2e/examples/gcp_vm_test.go b/test/new-e2e/examples/gcp_vm_test.go
new file mode 100644
index 0000000000000..1f897a87bbfe3
--- /dev/null
+++ b/test/new-e2e/examples/gcp_vm_test.go
@@ -0,0 +1,32 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package examples
+
+import (
+ gcphost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/gcp/host/linux"
+ "testing"
+
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments"
+)
+
+type gcpVMSuite struct {
+ e2e.BaseSuite[environments.Host]
+}
+
+// TestGCPVMSuite runs tests for the VM interface to ensure its implementation is correct.
+func TestGCPVMSuite(t *testing.T) {
+ suiteParams := []e2e.SuiteOption{e2e.WithProvisioner(gcphost.ProvisionerNoAgentNoFakeIntake())}
+ e2e.Run(t, &gcpVMSuite{}, suiteParams...)
+}
+
+func (v *gcpVMSuite) TestExecute() {
+ vm := v.Env().RemoteHost
+
+ out, err := vm.Execute("whoami")
+ v.Require().NoError(err)
+ v.Require().NotEmpty(out)
+}
diff --git a/test/new-e2e/go.mod b/test/new-e2e/go.mod
index 3eaa3216f47de..bbe8063d8489a 100644
--- a/test/new-e2e/go.mod
+++ b/test/new-e2e/go.mod
@@ -32,8 +32,8 @@ require (
// `TEST_INFRA_DEFINITIONS_BUILDIMAGES` matches the commit sha in the module version
// Example: github.com/DataDog/test-infra-definitions v0.0.0-YYYYMMDDHHmmSS-0123456789AB
// => TEST_INFRA_DEFINITIONS_BUILDIMAGES: 0123456789AB
- github.com/DataDog/test-infra-definitions v0.0.0-20240828165228-c0ecdf254c23
- github.com/aws/aws-sdk-go-v2 v1.30.4
+ github.com/DataDog/test-infra-definitions v0.0.0-20240910143843-ce6a4aad9299
+ github.com/aws/aws-sdk-go-v2 v1.30.5
github.com/aws/aws-sdk-go-v2/config v1.27.19
github.com/aws/aws-sdk-go-v2/service/ec2 v1.164.2
github.com/aws/aws-sdk-go-v2/service/eks v1.44.1
@@ -50,9 +50,8 @@ require (
github.com/pulumi/pulumi-awsx/sdk/v2 v2.14.0
github.com/pulumi/pulumi-eks/sdk/v2 v2.7.8
github.com/pulumi/pulumi-kubernetes/sdk/v4 v4.17.1
- github.com/pulumi/pulumi/sdk/v3 v3.129.0
+ github.com/pulumi/pulumi/sdk/v3 v3.131.0
github.com/samber/lo v1.47.0
- github.com/sethvargo/go-retry v0.2.4
github.com/stretchr/testify v1.9.0
github.com/xeipuuv/gojsonschema v1.2.0
golang.org/x/crypto v0.26.0
@@ -88,11 +87,11 @@ require (
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.17.19 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.6 // indirect
- github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 // indirect
- github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.12 // indirect
- github.com/aws/aws-sdk-go-v2/service/ecr v1.32.1 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ecr v1.32.4 // indirect
github.com/aws/aws-sdk-go-v2/service/ecs v1.45.2
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.14 // indirect
@@ -217,7 +216,6 @@ require (
github.com/stretchr/objx v0.5.2 // indirect
github.com/texttheater/golang-levenshtein v1.0.1 // indirect
github.com/tinylib/msgp v1.1.8 // indirect
- github.com/tweekmonster/luser v0.0.0-20161003172636-3fa38070dbd7 // indirect
github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect
github.com/uber/jaeger-lib v2.4.1+incompatible // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect
@@ -266,9 +264,11 @@ require (
require github.com/hairyhenderson/go-codeowners v0.5.0
require (
- github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.57.0 // indirect
+ github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.60.0 // indirect
github.com/pulumi/pulumi-azure-native-sdk/compute/v2 v2.56.0 // indirect
- github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.51.0 // indirect
- github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.58.0 // indirect
- github.com/pulumi/pulumi-azure-native-sdk/v2 v2.58.0 // indirect
+ github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.59.0 // indirect
+ github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.59.0 // indirect
+ github.com/pulumi/pulumi-azure-native-sdk/v2 v2.60.0 // indirect
+ github.com/pulumi/pulumi-gcp/sdk/v6 v6.67.1 // indirect
+ github.com/pulumi/pulumi-gcp/sdk/v7 v7.38.0 // indirect
)
diff --git a/test/new-e2e/go.sum b/test/new-e2e/go.sum
index 2927dfa3597e9..e31485a46f4bb 100644
--- a/test/new-e2e/go.sum
+++ b/test/new-e2e/go.sum
@@ -14,8 +14,8 @@ github.com/DataDog/datadog-api-client-go/v2 v2.27.0 h1:AGZj41frjnjMufQHQbJH2fzmi
github.com/DataDog/datadog-api-client-go/v2 v2.27.0/go.mod h1:QKOu6vscsh87fMY1lHfLEmNSunyXImj8BUaUWJXOehc=
github.com/DataDog/mmh3 v0.0.0-20200805151601-30884ca2197a h1:m9REhmyaWD5YJ0P53ygRHxKKo+KM+nw+zz0hEdKztMo=
github.com/DataDog/mmh3 v0.0.0-20200805151601-30884ca2197a/go.mod h1:SvsjzyJlSg0rKsqYgdcFxeEVflx3ZNAyFfkUHP0TxXg=
-github.com/DataDog/test-infra-definitions v0.0.0-20240828165228-c0ecdf254c23 h1:43tVATnDYuG7xuAJAqvnre/3Hpbw+ZW6qoT3eTU1xbY=
-github.com/DataDog/test-infra-definitions v0.0.0-20240828165228-c0ecdf254c23/go.mod h1:i/i/Wn96wSbD3nKDr9pMe/R8+4Q1qps51tOg8+1q3NI=
+github.com/DataDog/test-infra-definitions v0.0.0-20240910143843-ce6a4aad9299 h1:lMzRshj0zEnNId74hiUsXSClnB0qKmQlC3VQ9kC6p+0=
+github.com/DataDog/test-infra-definitions v0.0.0-20240910143843-ce6a4aad9299/go.mod h1:orHExiPWWT9f68UJZ92oIVX1OcTNlKvtbX7b6HM9e0Q=
github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ=
github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f h1:5Vuo4niPKFkfwW55jV4vY0ih3VQ9RaQqeqY67fvRn8A=
@@ -45,8 +45,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4=
github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
-github.com/aws/aws-sdk-go-v2 v1.30.4 h1:frhcagrVNrzmT95RJImMHgabt99vkXGslubDaDagTk8=
-github.com/aws/aws-sdk-go-v2 v1.30.4/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0=
+github.com/aws/aws-sdk-go-v2 v1.30.5 h1:mWSRTwQAb0aLE17dSzztCVJWI9+cRMgqebndjwDyK0g=
+github.com/aws/aws-sdk-go-v2 v1.30.5/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 h1:x6xsQXGSmW6frevwDA+vi/wqhp1ct18mVXYN08/93to=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg=
github.com/aws/aws-sdk-go-v2/config v1.27.19 h1:+DBS8gJP6VsxYkZ6UEV0/VsRM2rYpbQCYsosW9RRmeQ=
@@ -55,18 +55,18 @@ github.com/aws/aws-sdk-go-v2/credentials v1.17.19 h1:R18G7nBBGLby51CFEqUBFF2IVl7
github.com/aws/aws-sdk-go-v2/credentials v1.17.19/go.mod h1:xr9kUMnaLTB866HItT6pg58JgiBP77fSQLBwIa//zk8=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.6 h1:vVOuhRyslJ6T/HteG71ZWCTas1q2w6f0NKsNbkXHs/A=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.6/go.mod h1:jimWaqLiT0sJGLh51dKCLLtExRYPtMU7MpxuCgtbkxg=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 h1:TNyt/+X43KJ9IJJMjKfa3bNTiZbUP7DeCxfbTROESwY=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16/go.mod h1:2DwJF39FlNAUiX5pAc0UNeiz16lK2t7IaFcm0LFHEgc=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 h1:jYfy8UPmd+6kJW5YhY0L1/KftReOGxI/4NtVSTh9O/I=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16/go.mod h1:7ZfEPZxkW42Afq4uQB8H2E2e6ebh6mXTueEpYzjCzcs=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 h1:pI7Bzt0BJtYA0N/JEC6B8fJ4RBrEMi1LBrkMdFYNSnQ=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17/go.mod h1:Dh5zzJYMtxfIjYW+/evjQ8uj2OyR/ve2KROHGHlSFqE=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 h1:Mqr/V5gvrhA2gvgnF42Zh5iMiQNcOYthFYwCyrnuWlc=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17/go.mod h1:aLJpZlCmjE+V+KtN1q1uyZkfnUWpQGpbsn89XPKyzfU=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.12 h1:DXFWyt7ymx/l1ygdyTTS0X923e+Q2wXIxConJzrgwc0=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.12/go.mod h1:mVOr/LbvaNySK1/BTy4cBOCjhCNY2raWBwK4v+WR5J4=
github.com/aws/aws-sdk-go-v2/service/ec2 v1.164.2 h1:Rts0EZgdi3tneJMXp+uKrZHbMxQIu0y5O/2MG6a2+hY=
github.com/aws/aws-sdk-go-v2/service/ec2 v1.164.2/go.mod h1:j0V2ahvdX3mGIyXQSe9vjdIQvSxz3uaMM0bR7Y+0WCE=
-github.com/aws/aws-sdk-go-v2/service/ecr v1.32.1 h1:PxM8EHsv1sd9eWGamMQCvqBEjxytK5kAwjrxlfG3tac=
-github.com/aws/aws-sdk-go-v2/service/ecr v1.32.1/go.mod h1:kdk+WJbHcGVbIlRQfSrKyuKkbWDdD8I9NScyS5vZ8eQ=
+github.com/aws/aws-sdk-go-v2/service/ecr v1.32.4 h1:nQAU2Yr+afkAvIV39mg7LrNYFNQP7ShwbmiJqx2fUKA=
+github.com/aws/aws-sdk-go-v2/service/ecr v1.32.4/go.mod h1:keOS9j4fv5ASh7dV29lIpGw2QgoJwGFAyMU0uPvfax4=
github.com/aws/aws-sdk-go-v2/service/ecs v1.45.2 h1:DSFxt4HBQjlgKNMyYdME9cbB11FFi7umpTGbqJaS9nw=
github.com/aws/aws-sdk-go-v2/service/ecs v1.45.2/go.mod h1:er8WHbgZAl17Dmu41ifKmUrV7JPpiQnRc+XSrnu4qR8=
github.com/aws/aws-sdk-go-v2/service/eks v1.44.1 h1:onUAzZXDsyXzyrmOGw/9p8Csl1NZkTDEs4URZ8covUY=
@@ -401,22 +401,26 @@ github.com/pulumi/pulumi-aws/sdk/v6 v6.47.0 h1:DEbHd7krLB3p3Qr4PlAaEScA5mQR85jif
github.com/pulumi/pulumi-aws/sdk/v6 v6.47.0/go.mod h1:gN/y6Gl/c6R2m1H0DlpyeyxpemtLJNhgHWcYz+vBPdo=
github.com/pulumi/pulumi-awsx/sdk/v2 v2.14.0 h1:GknlrxIweg8X65VcxJaUVdZIHhclZjdzEWxsLGnMR2Y=
github.com/pulumi/pulumi-awsx/sdk/v2 v2.14.0/go.mod h1:mB6jxy6GjMd1dmTA129GkHH5pyryYG/W0J1X2XznxW4=
-github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.57.0 h1:0QxN2F/yiylylNjYMqqXc5RQoKan/Pq/x1v43QaxE/c=
-github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.57.0/go.mod h1:pv7oEJtA6Tn8dnE8/xya/yCQd6GU0Br9c9nHRkW9LiQ=
+github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.60.0 h1:qCpKZQECnZWXVMWfuTk6nfPfQoP+7zXPS5bHdeIh5Mc=
+github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.60.0/go.mod h1:ILyyA8nuYMWOcU7sRqRVmakNeY4hxog7K4nMCL+IOjE=
github.com/pulumi/pulumi-azure-native-sdk/compute/v2 v2.56.0 h1:MFOd6X9FPlixzriy14fBHv7pFCCh/mu1pwHtSSjqfJ4=
github.com/pulumi/pulumi-azure-native-sdk/compute/v2 v2.56.0/go.mod h1:453Ff5wNscroYfq+zxME7Nbt7HdZv+dh0zLZwLyGBws=
-github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.51.0 h1:TZoU7xP9qI3M6eunVmyPtfqxgf+egUzc7GXVIeJdp5A=
-github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.51.0/go.mod h1:J/ZpHh7KUmcMiki7fnrCYlA4YIdr2pG7yTWdrwlnedY=
-github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.58.0 h1:joRRPeYxXSaCGF7we0NNAMsOy7HJFd7O4cWAjmKveRI=
-github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.58.0/go.mod h1:XFiuqPmtOASRrKU1q29sgzAuq9OcZ0bDzdBw9TSUyyo=
-github.com/pulumi/pulumi-azure-native-sdk/v2 v2.58.0 h1:pPJMmpz7eIlnmqDPBJkJ1U27QuB/E7yfZgV6m8s1kmI=
-github.com/pulumi/pulumi-azure-native-sdk/v2 v2.58.0/go.mod h1:M8QiZzL5yFrhRodptgpp8gZ9gLWEpqW7GkbWhMkvEzM=
+github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.59.0 h1:ijcCyi+SPlJn3aIEb4p23FTk6fxjPLtVMhfkRaKp85A=
+github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.59.0/go.mod h1:yQXpYXNeGVBcygd5Be/fzf+1Jcg4kDLAMZY6UDtIZvQ=
+github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.59.0 h1:mqs2dlpcyYn2LsA20bC8xN30YaVs7x8M6tC7BtDiY64=
+github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.59.0/go.mod h1:OTv2GUMWUktcvdjXFRaAdJDW1f/SuRSCKHdVCcQrN7U=
+github.com/pulumi/pulumi-azure-native-sdk/v2 v2.60.0 h1:Q+we+HFtnNGkeXIhdWIKJZWJRwaIBUuMcZKG70YIYyw=
+github.com/pulumi/pulumi-azure-native-sdk/v2 v2.60.0/go.mod h1:guTN5l9syK6v4+94APSi9np3rj1JPrPUEOG+B0dDaZE=
github.com/pulumi/pulumi-command/sdk v1.0.1 h1:ZuBSFT57nxg/fs8yBymUhKLkjJ6qmyN3gNvlY/idiN0=
github.com/pulumi/pulumi-command/sdk v1.0.1/go.mod h1:C7sfdFbUIoXKoIASfXUbP/U9xnwPfxvz8dBpFodohlA=
github.com/pulumi/pulumi-docker/sdk/v4 v4.5.5 h1:7OjAfgLz5PAy95ynbgPAlWls5WBe4I/QW/61TdPWRlQ=
github.com/pulumi/pulumi-docker/sdk/v4 v4.5.5/go.mod h1:XZKLFXbw13olxuztlWnmVUPYZp2a+BqzqhuMl0j/Ow8=
github.com/pulumi/pulumi-eks/sdk/v2 v2.7.8 h1:NeCKFxyOLpAaG4pJDk7+ewnCuV2IbXR7PggYSNujOno=
github.com/pulumi/pulumi-eks/sdk/v2 v2.7.8/go.mod h1:ARGNnIZENIpDUVSX21JEQJKrESj/0u0r0iT61rpb86I=
+github.com/pulumi/pulumi-gcp/sdk/v6 v6.67.1 h1:PUH/sUbJmBmHjNFNthJ/dW2+riFuJV0FhrGAwuUuRIg=
+github.com/pulumi/pulumi-gcp/sdk/v6 v6.67.1/go.mod h1:OmZeji3dNMwB1qldAlaQfcfJPc2BaZyweVGH7Ej4SJg=
+github.com/pulumi/pulumi-gcp/sdk/v7 v7.38.0 h1:21oSj+TKlKTzQcxN9Hik7iSNNHPUQXN4s3itOnahy/w=
+github.com/pulumi/pulumi-gcp/sdk/v7 v7.38.0/go.mod h1:YaEZms1NgXFqGhObKVofcAeWXu2V+3t/BAXdHQZq7fU=
github.com/pulumi/pulumi-kubernetes/sdk/v4 v4.17.1 h1:VDX+hu+qK3fbf2FodgG5kfh2h1bHK0FKirW1YqKWkRc=
github.com/pulumi/pulumi-kubernetes/sdk/v4 v4.17.1/go.mod h1:e69ohZtUePLLYNLXYgiOWp0FvRGg6ya/3fsq3o00nN0=
github.com/pulumi/pulumi-libvirt/sdk v0.4.7 h1:/BBnqqx/Gbg2vINvJxXIVb58THXzw2lSqFqxlRSXH9M=
@@ -425,8 +429,8 @@ github.com/pulumi/pulumi-random/sdk/v4 v4.16.3 h1:nlN42MRSIuDh5Pc5nLq4b0lwZaX2ZU
github.com/pulumi/pulumi-random/sdk/v4 v4.16.3/go.mod h1:yRfWJSLEAVZvkwgXajr3S9OmFkAZTxfO44Ef2HfixXQ=
github.com/pulumi/pulumi-tls/sdk/v4 v4.11.1 h1:tXemWrzeVTqG8zq6hBdv1TdPFXjgZ+dob63a/6GlF1o=
github.com/pulumi/pulumi-tls/sdk/v4 v4.11.1/go.mod h1:hODo3iEmmXDFOXqPK+V+vwI0a3Ww7BLjs5Tgamp86Ng=
-github.com/pulumi/pulumi/sdk/v3 v3.129.0 h1:uZpTTwWTx7Mk8UT9FgatzxzArim47vZ6hzNCKvgvX6A=
-github.com/pulumi/pulumi/sdk/v3 v3.129.0/go.mod h1:p1U24en3zt51agx+WlNboSOV8eLlPWYAkxMzVEXKbnY=
+github.com/pulumi/pulumi/sdk/v3 v3.131.0 h1:w6+XFt4ajz7ZEoCBFo+oMmrQ4DYYBKtzuj/zBe/uyoo=
+github.com/pulumi/pulumi/sdk/v3 v3.131.0/go.mod h1:J5kQEX8v87aeUhk6NdQXnjCo1DbiOnOiL3Sf2DuDda8=
github.com/pulumiverse/pulumi-time/sdk v0.0.17 h1:JNYVLglXeMAjyD3upIwKZ9o7MnNo7kc3FVsgxs7bc+A=
github.com/pulumiverse/pulumi-time/sdk v0.0.17/go.mod h1:NUa1zA74DF002WrM6iF111A6UjX9knPpXufVRvBwNyg=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
@@ -448,8 +452,6 @@ github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 h1:lZUw3E0/J3roVtGQ+SCrUrg3ON6Ng
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY=
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8=
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
-github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec=
-github.com/sethvargo/go-retry v0.2.4/go.mod h1:1afjQuvh7s4gflMObvjLPaWgluLLyhA1wmVZ6KLpICw=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
@@ -481,8 +483,6 @@ github.com/texttheater/golang-levenshtein v1.0.1 h1:+cRNoVrfiwufQPhoMzB6N0Yf/Mqa
github.com/texttheater/golang-levenshtein v1.0.1/go.mod h1:PYAKrbF5sAiq9wd+H82hs7gNaen0CplQ9uvm6+enD/8=
github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0=
github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw=
-github.com/tweekmonster/luser v0.0.0-20161003172636-3fa38070dbd7 h1:X9dsIWPuuEJlPX//UmRKophhOKCGXc46RVIGuttks68=
-github.com/tweekmonster/luser v0.0.0-20161003172636-3fa38070dbd7/go.mod h1:UxoP3EypF8JfGEjAII8jx1q8rQyDnX8qdTCs/UQBVIE=
github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o=
github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg=
diff --git a/test/new-e2e/pkg/environments/gcp/host/linux/host.go b/test/new-e2e/pkg/environments/gcp/host/linux/host.go
new file mode 100644
index 0000000000000..0e479d8a51bdf
--- /dev/null
+++ b/test/new-e2e/pkg/environments/gcp/host/linux/host.go
@@ -0,0 +1,124 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package gcphost contains the definition of the GCP Host environment.
+package gcphost
+
+import (
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
+ "github.com/DataDog/test-infra-definitions/resources/gcp"
+ "github.com/DataDog/test-infra-definitions/scenarios/gcp/compute"
+ "github.com/DataDog/test-infra-definitions/scenarios/gcp/fakeintake"
+
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments"
+
+ "github.com/DataDog/test-infra-definitions/components/datadog/agent"
+ "github.com/DataDog/test-infra-definitions/components/datadog/agentparams"
+ "github.com/DataDog/test-infra-definitions/components/datadog/updater"
+ "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
+)
+
+const (
+ provisionerBaseID = "gcp-vm-"
+ defaultVMName = "vm"
+)
+
+// Provisioner creates a VM environment with an VM, a FakeIntake and a Host Agent configured to talk to each other.
+// FakeIntake and Agent creation can be deactivated by using [WithoutFakeIntake] and [WithoutAgent] options.
+func Provisioner(opts ...ProvisionerOption) e2e.TypedProvisioner[environments.Host] {
+ // We need to build params here to be able to use params.name in the provisioner name
+ params := GetProvisionerParams(opts...)
+
+ provisioner := e2e.NewTypedPulumiProvisioner(provisionerBaseID+params.name, func(ctx *pulumi.Context, env *environments.Host) error {
+ // We ALWAYS need to make a deep copy of `params`, as the provisioner can be called multiple times.
+ // and it's easy to forget about it, leading to hard-to-debug issues.
+ params := GetProvisionerParams(opts...)
+ return Run(ctx, env, RunParams{ProvisionerParams: params})
+ }, params.extraConfigParams)
+
+ return provisioner
+}
+
+// Run deploys an environment given a pulumi.Context
+func Run(ctx *pulumi.Context, env *environments.Host, runParams RunParams) error {
+ var gcpEnv gcp.Environment
+ if runParams.Environment == nil {
+ var err error
+ gcpEnv, err = gcp.NewEnvironment(ctx)
+ if err != nil {
+ return err
+ }
+ } else {
+ gcpEnv = *runParams.Environment
+ }
+ params := runParams.ProvisionerParams
+
+ host, err := compute.NewVM(gcpEnv, params.name, params.instanceOptions...)
+ if err != nil {
+ return err
+ }
+ err = host.Export(ctx, &env.RemoteHost.HostOutput)
+ if err != nil {
+ return err
+ }
+
+ // Create FakeIntake if required
+ if params.fakeintakeOptions != nil {
+ fakeIntake, err := fakeintake.NewVMInstance(gcpEnv, params.fakeintakeOptions...)
+ if err != nil {
+ return err
+ }
+ err = fakeIntake.Export(ctx, &env.FakeIntake.FakeintakeOutput)
+ if err != nil {
+ return err
+ }
+
+ // Normally if FakeIntake is enabled, Agent is enabled, but just in case
+ if params.agentOptions != nil {
+ // Prepend in case it's overridden by the user
+ newOpts := []agentparams.Option{agentparams.WithFakeintake(fakeIntake)}
+ params.agentOptions = append(newOpts, params.agentOptions...)
+ }
+ } else {
+ // Suite inits all fields by default, so we need to explicitly set it to nil
+ env.FakeIntake = nil
+ }
+ if !params.installUpdater {
+ // Suite inits all fields by default, so we need to explicitly set it to nil
+ env.Updater = nil
+ }
+
+ // Create Agent if required
+ if params.installUpdater && params.agentOptions != nil {
+ updater, err := updater.NewHostUpdater(&gcpEnv, host, params.agentOptions...)
+ if err != nil {
+ return err
+ }
+
+ err = updater.Export(ctx, &env.Updater.HostUpdaterOutput)
+ if err != nil {
+ return err
+ }
+ // todo: add agent once updater installs agent on bootstrap
+ env.Agent = nil
+ } else if params.agentOptions != nil {
+ agent, err := agent.NewHostAgent(&gcpEnv, host, params.agentOptions...)
+ if err != nil {
+ return err
+ }
+
+ err = agent.Export(ctx, &env.Agent.HostAgentOutput)
+ if err != nil {
+ return err
+ }
+
+ env.Agent.ClientOptions = params.agentClientOptions
+ } else {
+ // Suite inits all fields by default, so we need to explicitly set it to nil
+ env.Agent = nil
+ }
+
+ return nil
+}
diff --git a/test/new-e2e/pkg/environments/gcp/host/linux/params.go b/test/new-e2e/pkg/environments/gcp/host/linux/params.go
new file mode 100644
index 0000000000000..442fd28b889b0
--- /dev/null
+++ b/test/new-e2e/pkg/environments/gcp/host/linux/params.go
@@ -0,0 +1,152 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package gcphost
+
+import (
+ "fmt"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclientparams"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/optional"
+ "github.com/DataDog/test-infra-definitions/components/datadog/agentparams"
+ "github.com/DataDog/test-infra-definitions/resources/gcp"
+ "github.com/DataDog/test-infra-definitions/scenarios/gcp/compute"
+ "github.com/DataDog/test-infra-definitions/scenarios/gcp/fakeintake"
+)
+
+// ProvisionerParams is a set of parameters for the Provisioner.
+type ProvisionerParams struct {
+ name string
+
+ instanceOptions []compute.VMOption
+ agentOptions []agentparams.Option
+ agentClientOptions []agentclientparams.Option
+ fakeintakeOptions []fakeintake.Option
+ extraConfigParams runner.ConfigMap
+ installUpdater bool
+}
+
+func newProvisionerParams() *ProvisionerParams {
+ // We use nil arrays to decide if we should create or not
+ return &ProvisionerParams{
+ name: defaultVMName,
+ instanceOptions: []compute.VMOption{},
+ agentOptions: []agentparams.Option{},
+ agentClientOptions: []agentclientparams.Option{},
+ fakeintakeOptions: []fakeintake.Option{},
+ extraConfigParams: runner.ConfigMap{},
+ }
+}
+
+// GetProvisionerParams return ProvisionerParams from options opts setup
+func GetProvisionerParams(opts ...ProvisionerOption) *ProvisionerParams {
+ params := newProvisionerParams()
+ err := optional.ApplyOptions(params, opts)
+ if err != nil {
+ panic(fmt.Errorf("unable to apply ProvisionerOption, err: %w", err))
+ }
+ return params
+}
+
+// ProvisionerOption is a provisioner option.
+type ProvisionerOption func(*ProvisionerParams) error
+
+// WithName sets the name of the provisioner.
+func WithName(name string) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.name = name
+ return nil
+ }
+}
+
+// WithInstanceOptions adds options to the EC2 VM.
+func WithInstanceOptions(opts ...compute.VMOption) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.instanceOptions = append(params.instanceOptions, opts...)
+ return nil
+ }
+}
+
+// WithAgentOptions adds options to the Agent.
+func WithAgentOptions(opts ...agentparams.Option) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.agentOptions = append(params.agentOptions, opts...)
+ return nil
+ }
+}
+
+// WithAgentClientOptions adds options to the Agent client.
+func WithAgentClientOptions(opts ...agentclientparams.Option) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.agentClientOptions = append(params.agentClientOptions, opts...)
+ return nil
+ }
+}
+
+// WithFakeIntakeOptions adds options to the FakeIntake.
+func WithFakeIntakeOptions(opts ...fakeintake.Option) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.fakeintakeOptions = append(params.fakeintakeOptions, opts...)
+ return nil
+ }
+}
+
+// WithExtraConfigParams adds extra config parameters to the ConfigMap.
+func WithExtraConfigParams(configMap runner.ConfigMap) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.extraConfigParams = configMap
+ return nil
+ }
+}
+
+// WithoutFakeIntake disables the creation of the FakeIntake.
+func WithoutFakeIntake() ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.fakeintakeOptions = nil
+ return nil
+ }
+}
+
+// WithoutAgent disables the creation of the Agent.
+func WithoutAgent() ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.agentOptions = nil
+ return nil
+ }
+}
+
+// WithUpdater installs the agent through the updater.
+func WithUpdater() ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.installUpdater = true
+ return nil
+ }
+}
+
+// ProvisionerNoAgentNoFakeIntake wraps Provisioner with hardcoded WithoutAgent and WithoutFakeIntake options.
+func ProvisionerNoAgentNoFakeIntake(opts ...ProvisionerOption) e2e.TypedProvisioner[environments.Host] {
+ mergedOpts := make([]ProvisionerOption, 0, len(opts)+2)
+ mergedOpts = append(mergedOpts, opts...)
+ mergedOpts = append(mergedOpts, WithoutAgent(), WithoutFakeIntake())
+
+ return Provisioner(mergedOpts...)
+}
+
+// ProvisionerNoFakeIntake wraps Provisioner with hardcoded WithoutFakeIntake option.
+func ProvisionerNoFakeIntake(opts ...ProvisionerOption) e2e.TypedProvisioner[environments.Host] {
+ mergedOpts := make([]ProvisionerOption, 0, len(opts)+1)
+ mergedOpts = append(mergedOpts, opts...)
+ mergedOpts = append(mergedOpts, WithoutFakeIntake())
+
+ return Provisioner(mergedOpts...)
+}
+
+// RunParams is a set of parameters for the Run function.
+type RunParams struct {
+ Environment *gcp.Environment
+ ProvisionerParams *ProvisionerParams
+}
diff --git a/test/new-e2e/pkg/runner/local_profile.go b/test/new-e2e/pkg/runner/local_profile.go
index 633e0ccf0972c..2cba95a568cd2 100644
--- a/test/new-e2e/pkg/runner/local_profile.go
+++ b/test/new-e2e/pkg/runner/local_profile.go
@@ -19,6 +19,7 @@ import (
var defaultLocalEnvironments = map[string]string{
"aws": "agent-sandbox",
"az": "agent-sandbox",
+ "gcp": "agent-sandbox",
}
// NewLocalProfile creates a new local profile
diff --git a/test/new-e2e/system-probe/connector/metric/metric.go b/test/new-e2e/system-probe/connector/metric/metric.go
index 4c674154402cd..dc90a56bfe270 100644
--- a/test/new-e2e/system-probe/connector/metric/metric.go
+++ b/test/new-e2e/system-probe/connector/metric/metric.go
@@ -15,6 +15,7 @@ import (
"os"
"github.com/DataDog/datadog-api-client-go/v2/api/datadog"
+ "github.com/DataDog/datadog-api-client-go/v2/api/datadogV1"
"github.com/DataDog/datadog-api-client-go/v2/api/datadogV2"
)
@@ -41,3 +42,27 @@ func SubmitExecutionMetric(metricBody datadogV2.MetricPayload) error {
return nil
}
+
+// SubmitExecutionEvent accepts events and sends it to Datadog.
+func SubmitExecutionEvent(eventBody datadogV1.EventCreateRequest) error {
+ if _, ok := os.LookupEnv("DD_API_KEY"); !ok {
+ fmt.Fprintf(os.Stderr, "skipping sending metric because DD_API_KEY not present")
+ return nil
+ }
+
+ ctx := datadog.NewDefaultContext(context.Background())
+ configuration := datadog.NewConfiguration()
+ apiClient := datadog.NewAPIClient(configuration)
+ api := datadogV1.NewEventsApi(apiClient)
+ resp, r, err := api.CreateEvent(ctx, eventBody)
+
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", r)
+ return fmt.Errorf("error when calling `MetricsApi.SubmitMetrics`: %v", err)
+ }
+
+ responseContent, _ := json.MarshalIndent(resp, "", " ")
+ fmt.Fprintf(os.Stdout, "Response from `MetricsApi.SubmitMetrics`:\n%s\n", responseContent)
+
+ return nil
+}
diff --git a/test/new-e2e/system-probe/errors.go b/test/new-e2e/system-probe/errors.go
index 2fb1b57595e9a..91cb123602fe2 100644
--- a/test/new-e2e/system-probe/errors.go
+++ b/test/new-e2e/system-probe/errors.go
@@ -13,13 +13,16 @@ import (
"log"
"os"
"path"
+ "regexp"
"strings"
"time"
"github.com/DataDog/datadog-api-client-go/api/v1/datadog"
"github.com/DataDog/datadog-api-client-go/v2/api/datadogV2"
- "github.com/sethvargo/go-retry"
+ "github.com/pulumi/pulumi/sdk/v3/go/auto"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/infra"
"github.com/DataDog/datadog-agent/test/new-e2e/system-probe/connector/metric"
)
@@ -27,6 +30,7 @@ const (
// bitmap of actions to take for an error
retryStack = 0x1 // 0b01
emitMetric = 0x2 // 0b10
+ changeAZ = 0x4 // 0b100
aria2cMissingStatusErrorStr = "error: wait: remote command exited without exit status or exit signal: running \" aria2c"
@@ -66,7 +70,7 @@ var handledErrorsLs = []handledError{
errorType: insufficientCapacityError,
errorString: "InsufficientInstanceCapacity",
metric: "insufficient-capacity",
- action: retryStack | emitMetric,
+ action: retryStack | emitMetric | changeAZ,
},
// Retry when ssh thinks aria2c exited without status. This may happen
// due to network connectivity issues if ssh keepalive mecahnism fails.
@@ -80,7 +84,7 @@ var handledErrorsLs = []handledError{
errorType: ec2StateChangeTimeoutError,
errorString: "timeout while waiting for state to become 'running'",
metric: "ec2-timeout-state-change",
- action: retryStack | emitMetric,
+ action: retryStack | emitMetric | changeAZ,
},
{
errorType: ioTimeout,
@@ -102,6 +106,15 @@ var handledErrorsLs = []handledError{
},
}
+type retryHandler struct {
+ currentAZ int
+ maxRetries int
+ retryDelay time.Duration
+ allErrors []error
+ configMap runner.ConfigMap
+ infraEnv string
+}
+
func errorMetric(errType string) datadogV2.MetricPayload {
tags := []string{
fmt.Sprintf("error:%s", errType),
@@ -123,15 +136,29 @@ func errorMetric(errType string) datadogV2.MetricPayload {
}
}
-func handleScenarioFailure(err error, changeRetryState func(handledError)) error {
+func (r *retryHandler) HandleError(err error, retryCount int) (infra.RetryType, []infra.GetStackOption) {
+ r.allErrors = append(r.allErrors, err)
+
+ if retryCount > r.maxRetries {
+ log.Printf("environment setup error: %v. Maximum number of retries (%d) exceeded, failing setup.\n", err, r.maxRetries)
+ return infra.NoRetry, nil
+ }
+
+ var newOpts []infra.GetStackOption
+ retry := infra.NoRetry
errStr := err.Error()
for _, e := range handledErrorsLs {
if !strings.Contains(errStr, e.errorString) {
continue
}
- // modify any state within the retry block
- changeRetryState(e)
+ if (e.action & changeAZ) != 0 {
+ r.currentAZ++
+ if az := getAvailabilityZone(r.infraEnv, r.currentAZ); az != "" {
+ r.configMap["ddinfra:aws/defaultSubnets"] = auto.ConfigValue{Value: az}
+ newOpts = append(newOpts, infra.WithConfigMap(r.configMap))
+ }
+ }
if (e.action & emitMetric) != 0 {
submitError := metric.SubmitExecutionMetric(errorMetric(e.metric))
@@ -145,15 +172,19 @@ func handleScenarioFailure(err error, changeRetryState func(handledError)) error
}
if (e.action & retryStack) != 0 {
- log.Printf("environment setup error: %v. Retrying stack.\n", err)
- return retry.RetryableError(err)
+ retry = infra.ReUp
}
break
}
- log.Printf("environment setup error: %v. Failing stack.\n", err)
- return err
+ log.Printf("environment setup error. Retry strategy: %s.\n", retry)
+ if retry != infra.NoRetry {
+ log.Printf("waiting %s before retrying...\n", r.retryDelay)
+ time.Sleep(r.retryDelay)
+ }
+
+ return retry, newOpts
}
func storeErrorReasonForCITags(reason string) error {
@@ -177,3 +208,62 @@ func storeNumberOfRetriesForCITags(retries int) error {
_, err = f.WriteString(fmt.Sprintf("%d", retries))
return err
}
+
+type pulumiError struct {
+ command string
+ arch string
+ vmCommand string
+ errorMessage string
+ vmName string
+}
+
+var commandRegex = regexp.MustCompile(`^ command:remote:Command \(([^\)]+)\):$`)
+var archRegex = regexp.MustCompile(`distro_(arm64|x86_64)`)
+var vmCmdRegex = regexp.MustCompile(`-cmd-.+-ddvm-\d+-\d+-(.+)$`)
+var vmNameRegex = regexp.MustCompile(`-([^-]+)-distro`)
+
+func parsePulumiDiagnostics(message string) *pulumiError {
+ var perr pulumiError
+ lines := strings.Split(message, "\n")
+ inDiagnostics := false
+ for _, line := range lines {
+ if !inDiagnostics {
+ if line == "Diagnostics:" {
+ // skip until next line
+ inDiagnostics = true
+ }
+ continue
+ }
+
+ if len(line) == 0 || line[0] != ' ' {
+ // Finished reading diagnostics, break out of the loop
+ return &perr
+ }
+
+ if perr.command == "" {
+ commandMatch := commandRegex.FindStringSubmatch(line)
+ if commandMatch != nil {
+ perr.command = commandMatch[1]
+
+ archMatch := archRegex.FindStringSubmatch(perr.command)
+ if archMatch != nil {
+ perr.arch = archMatch[1]
+ }
+
+ vmCmdMatch := vmCmdRegex.FindStringSubmatch(perr.command)
+ if vmCmdMatch != nil {
+ perr.vmCommand = vmCmdMatch[1]
+ }
+
+ vmNameMatch := vmNameRegex.FindStringSubmatch(perr.command)
+ if vmNameMatch != nil {
+ perr.vmName = vmNameMatch[1]
+ }
+ }
+ } else {
+ perr.errorMessage += strings.Trim(line, " ") + "\n"
+ }
+ }
+
+ return nil
+}
diff --git a/test/new-e2e/system-probe/errors_test.go b/test/new-e2e/system-probe/errors_test.go
new file mode 100644
index 0000000000000..eeee52d43fb4f
--- /dev/null
+++ b/test/new-e2e/system-probe/errors_test.go
@@ -0,0 +1,105 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build !windows
+
+package systemprobe
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+const output = `
+Updating (gjulian-guillermo.julian-e2e-report-all-errors-ddvm):
+
+ pulumi:pulumi:Stack e2elocal-gjulian-guillermo.julian-e2e-report-all-errors-ddvm running
+ pulumi:providers:random random
+@ updating....
+ dd:Host aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64
+ pulumi:providers:aws aws
+ pulumi:providers:command command
+ random:index:RandomShuffle aws-rnd-subnet
+ random:index:RandomString random-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-random-string-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192
+ command:local:Command local-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-gen-libvirt-sshkey
+ aws:ec2:Instance aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64
+ command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-wait-cloud-init
+ command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-write-ssh-key
+ command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-ubuntu_22.04-distro_arm64-arm64-write-vol-xml
+ command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-add-microvm-ssh-dir
+ command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-allow-ssh-env
+ command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-distro_arm64-download-with-curl
+ command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-docker-arm64.qcow2-distro_arm64-arm64-write-vol-xml
+ command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-add-microvm-ssh-config
+ command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-reload sshd
+ command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-docker-arm64.qcow2-distro_arm64-arm64-extract-base-volume-package
+ command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-ubuntu_22.04-distro_arm64-arm64-extract-base-volume-package
+ command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-read-microvm-ssh-key
+ command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-write-pool-xml
+ pulumi:providers:libvirt gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-libvirt-provider
+ command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-define-libvirt-pool
+ command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-build-libvirt-pool
+ command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-start-libvirt-pool
+ command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-refresh-libvirt-pool
+ command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-docker-arm64.qcow2-distro_arm64-arm64-build-libvirt-basevolume
+ command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-ubuntu_22.04-distro_arm64-arm64-build-libvirt-basevolume
+@ updating.....
+ libvirt:index:Volume gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-ubuntu_22.04-distro_arm64-arm64-overlay-ubuntu_22.04-4-8192
+ libvirt:index:Volume gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-docker-arm64.qcow2-distro_arm64-arm64-overlay-ubuntu_22.04-4-8192
+ command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-create-nvram
+ libvirt:index:Network gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-network-distro_arm64-arm64
+ command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-allow-nfs-ports-tcp-100.1.0.0/24
+ libvirt:index:Domain arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192
+ command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-allow-nfs-ports-udp-100.1.0.0/24
+ + command:remote:Command remote-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-conn-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-cmd-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-mount-disk-dev-vdb creating (0s)
+@ updating.....
+ + command:remote:Command remote-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-conn-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-cmd-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-mount-disk-dev-vdb creating (2s) bash: line 1: caca: command not found
+ + command:remote:Command remote-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-conn-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-cmd-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-mount-disk-dev-vdb creating (2s) error: Process exited with status 127: running " caca /mnt/docker && mount /dev/vdb /mnt/docker":
+ + command:remote:Command remote-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-conn-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-cmd-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-mount-disk-dev-vdb **creating failed** error: Process exited with status 127: running " caca /mnt/docker && mount /dev/vdb /mnt/docker":
+ pulumi:pulumi:Stack e2elocal-gjulian-guillermo.julian-e2e-report-all-errors-ddvm running error: update failed
+ pulumi:pulumi:Stack e2elocal-gjulian-guillermo.julian-e2e-report-all-errors-ddvm **failed** 1 error
+Diagnostics:
+ command:remote:Command (remote-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-conn-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-cmd-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-mount-disk-dev-vdb):
+ error: Process exited with status 127: running " nocommand /mnt/docker && mount /dev/vdb /mnt/docker":
+ bash: line 1: nocommand: command not found
+
+ pulumi:pulumi:Stack (e2elocal-gjulian-guillermo.julian-e2e-report-all-errors-ddvm):
+ error: update failed
+
+Outputs:
+ kmt-stack: (json) {
+ arm64: {
+ ip : "172.29.176.14"
+ microvms: [
+ [0]: {
+ id : "arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192"
+ ip : "100.1.0.2"
+ ssh-key-path: "/home/kernel-version-testing/ddvm_rsa"
+ tag : "ubuntu_22.04"
+ vmset-tags : [
+ [0]: "distro_arm64"
+ ]
+ }
+ ]
+ }
+ }
+
+
+Resources:
+ 36 unchanged
+
+Duration: 6s
+`
+
+func TestParseDiagnostics(t *testing.T) {
+ result := parsePulumiDiagnostics(output)
+ require.NotNil(t, result)
+ require.Equal(t, "remote-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-conn-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-cmd-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-mount-disk-dev-vdb", result.command)
+ require.Equal(t, "arm64", result.arch)
+ require.Equal(t, "mount-disk-dev-vdb", result.vmCommand)
+ require.Equal(t, "error: Process exited with status 127: running \" nocommand /mnt/docker && mount /dev/vdb /mnt/docker\":\nbash: line 1: nocommand: command not found\n", result.errorMessage)
+ require.Equal(t, "ubuntu_22.04", result.vmName)
+}
diff --git a/test/new-e2e/system-probe/system-probe-test-env.go b/test/new-e2e/system-probe/system-probe-test-env.go
index 5a7a00321707d..516f47d76244a 100644
--- a/test/new-e2e/system-probe/system-probe-test-env.go
+++ b/test/new-e2e/system-probe/system-probe-test-env.go
@@ -21,12 +21,13 @@ import (
"syscall"
"time"
+ "github.com/DataDog/datadog-api-client-go/v2/api/datadogV1"
"github.com/DataDog/test-infra-definitions/scenarios/aws/microVMs/microvms"
- "github.com/sethvargo/go-retry"
"golang.org/x/term"
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner"
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/infra"
+ "github.com/DataDog/datadog-agent/test/new-e2e/system-probe/connector/metric"
"github.com/pulumi/pulumi/sdk/v3/go/auto"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
@@ -176,6 +177,10 @@ func NewTestEnv(name, x86InstanceType, armInstanceType string, opts *EnvOpts) (*
return nil, fmt.Errorf("No API Key for datadog-agent provided")
}
+ ciJob := getEnv("CI_JOB_ID", "")
+ ciPipeline := getEnv("CI_PIPELINE_ID", "")
+ ciBranch := getEnv("CI_COMMIT_REF_NAME", "")
+
var customAMILocalWorkingDir string
// Remote AMI working dir is always on Linux
@@ -239,60 +244,43 @@ func NewTestEnv(name, x86InstanceType, armInstanceType string, opts *EnvOpts) (*
config["ddinfra:extraResourcesTags"] = auto.ConfigValue{Value: envVars}
}
- var upResult auto.UpResult
- var pulumiStack *auto.Stack
- ctx := context.Background()
- currentAZ := 0 // PrimaryAZ
- b := retry.NewConstant(3 * time.Second)
- // Retry 4 times. This allows us to cycle through all AZs, and handle libvirt
- // connection issues in the worst case.
- b = retry.WithMaxRetries(4, b)
- numRetries := 0
- retryErr := retry.Do(ctx, b, func(_ context.Context) error {
- if az := getAvailabilityZone(opts.InfraEnv, currentAZ); az != "" {
- config["ddinfra:aws/defaultSubnets"] = auto.ConfigValue{Value: az}
- }
-
- pulumiStack, upResult, err = stackManager.GetStackNoDeleteOnFailure(
- systemProbeTestEnv.context,
- systemProbeTestEnv.name,
- func(ctx *pulumi.Context) error {
- if err := microvms.Run(ctx); err != nil {
- return fmt.Errorf("setup micro-vms in remote instance: %w", err)
- }
- return nil
- },
- infra.WithFailOnMissing(opts.FailOnMissing),
- infra.WithConfigMap(config),
- )
+ retryHandler := retryHandler{
+ currentAZ: 0,
+ maxRetries: 4,
+ retryDelay: 3 * time.Second,
+ configMap: config,
+ infraEnv: opts.InfraEnv,
+ }
- if err != nil {
- numRetries++
- return handleScenarioFailure(err, func(possibleError handledError) {
- // handle the following errors by trying in a different availability zone
- if possibleError.errorType == insufficientCapacityError ||
- possibleError.errorType == ec2StateChangeTimeoutError {
- currentAZ++
- }
- })
- }
+ stackManager.RetryStrategy = retryHandler.HandleError
+ pulumiStack, upResult, pulumiErr := stackManager.GetStackNoDeleteOnFailure(
+ systemProbeTestEnv.context,
+ systemProbeTestEnv.name,
+ func(ctx *pulumi.Context) error {
+ if err := microvms.Run(ctx); err != nil {
+ return fmt.Errorf("setup micro-vms in remote instance: %w", err)
+ }
+ return nil
+ },
+ infra.WithFailOnMissing(opts.FailOnMissing),
+ infra.WithConfigMap(config),
+ )
+ if pulumiErr != nil {
// Mark the test as successful, just in case we succeeded after a retry
- err = storeErrorReasonForCITags("")
+ err := storeErrorReasonForCITags("")
if err != nil {
log.Printf("failed to store error reason for CI tags: %v", err)
}
+ }
- return nil
- })
-
- err = storeNumberOfRetriesForCITags(numRetries)
+ err = storeNumberOfRetriesForCITags(len(retryHandler.allErrors))
if err != nil {
log.Printf("failed to store number of retries for CI tags: %v", err)
}
outputs := upResult.Outputs
- if retryErr != nil {
+ if pulumiErr != nil {
// pulumi does not populate `UpResult` with the stack output if the
// update process failed. In this case we must manually fetch the outputs.
outputs, err = pulumiStack.Outputs(context.Background())
@@ -305,8 +293,47 @@ func NewTestEnv(name, x86InstanceType, armInstanceType string, opts *EnvOpts) (*
if err != nil {
err = fmt.Errorf("failed to write stack output to file: %w", err)
}
- if retryErr != nil {
- return nil, errors.Join(fmt.Errorf("failed to create stack: %w", retryErr), err)
+ if pulumiErr != nil {
+ for i, retryErr := range retryHandler.allErrors {
+ pulumiError := parsePulumiDiagnostics(retryErr.Error())
+ if pulumiError != nil {
+ log.Printf("pulumi error on retry %d:\n\tcommand: %s\n\tvm-command: %s\n\terror message:\n%s\n\n", i, pulumiError.command, pulumiError.vmCommand, pulumiError.errorMessage)
+
+ // Send the error as a DD event so we can track it
+ event := datadogV1.EventCreateRequest{
+ Title: "[KMT] Environment setup error",
+ Text: pulumiError.errorMessage,
+ Tags: []string{
+ "test:kmt",
+ "source:pulumi",
+ "repository:datadog/datadog-agent",
+ "team:ebpf-platform",
+ fmt.Sprintf("vm.name:%s", pulumiError.vmName),
+ fmt.Sprintf("vm.arch:%s", pulumiError.arch),
+ fmt.Sprintf("vm.command:%s", pulumiError.vmCommand),
+ },
+ }
+
+ if ciJob != "" {
+ event.Tags = append(event.Tags, fmt.Sprintf("ci.job.id:%s", ciJob))
+ }
+
+ if ciPipeline != "" {
+ event.Tags = append(event.Tags, fmt.Sprintf("ci.pipeline.id:%s", ciPipeline))
+ }
+
+ if ciBranch != "" {
+ event.Tags = append(event.Tags, fmt.Sprintf("ci.branch:%s", ciBranch))
+ }
+
+ if err = metric.SubmitExecutionEvent(event); err != nil {
+ log.Printf("failed to submit environment setup error event: %v", err)
+ }
+ } else {
+ log.Printf("unknown/couldn't parse error on retry %d", i)
+ }
+ }
+ return nil, errors.Join(fmt.Errorf("failed to create stack: %w", pulumiErr), err)
}
systemProbeTestEnv.StackOutput = upResult
diff --git a/test/new-e2e/tests/agent-metrics-logs/log-agent/linux-log/journald/journald_tailing_test.go b/test/new-e2e/tests/agent-metrics-logs/log-agent/linux-log/journald/journald_tailing_test.go
index 23aaedb2f92bd..14b22696bf37f 100644
--- a/test/new-e2e/tests/agent-metrics-logs/log-agent/linux-log/journald/journald_tailing_test.go
+++ b/test/new-e2e/tests/agent-metrics-logs/log-agent/linux-log/journald/journald_tailing_test.go
@@ -82,8 +82,13 @@ func (s *LinuxJournaldFakeintakeSuite) journaldLogCollection() {
_, err := s.Env().RemoteHost.Execute("sudo usermod -a -G systemd-journal dd-agent")
require.NoErrorf(t, err, "Unable to adjust permissions for dd-agent user: %s", err)
- // Restart agent
- s.Env().RemoteHost.Execute("sudo systemctl restart datadog-agent")
+ // Restart agent and make sure it's ready before adding logs
+ _, err = s.Env().RemoteHost.Execute("sudo systemctl restart datadog-agent")
+ assert.NoErrorf(t, err, "Failed to restart the agent: %s", err)
+ s.EventuallyWithT(func(_ *assert.CollectT) {
+ agentReady := s.Env().Agent.Client.IsReady()
+ assert.True(t, agentReady)
+ }, 1*time.Minute, 5*time.Second, "Agent was not ready")
// Generate log
appendJournaldLog(s, "hello-world", 1)
diff --git a/test/new-e2e/tests/discovery/linux_test.go b/test/new-e2e/tests/discovery/linux_test.go
index 2ede187079619..d48cc30962b7c 100644
--- a/test/new-e2e/tests/discovery/linux_test.go
+++ b/test/new-e2e/tests/discovery/linux_test.go
@@ -29,9 +29,6 @@ var agentConfigStr string
//go:embed testdata/config/system_probe_config.yaml
var systemProbeConfigStr string
-//go:embed testdata/config/check_config.yaml
-var checkConfigStr string
-
type linuxTestSuite struct {
e2e.BaseSuite[environments.Host]
}
@@ -42,7 +39,6 @@ func TestLinuxTestSuite(t *testing.T) {
agentParams := []func(*agentparams.Params) error{
agentparams.WithAgentConfig(agentConfigStr),
agentparams.WithSystemProbeConfig(systemProbeConfigStr),
- agentparams.WithFile("/etc/datadog-agent/conf.d/service_discovery.d/conf.yaml", checkConfigStr, true),
}
options := []e2e.SuiteOption{
e2e.WithProvisioner(awshost.Provisioner(awshost.WithAgentOptions(agentParams...))),
@@ -92,28 +88,40 @@ func (s *linuxTestSuite) TestServiceDiscoveryCheck() {
found := foundMap["json-server"]
if assert.NotNil(c, found) {
assert.Equal(c, "none", found.Payload.APMInstrumentation)
- assert.Equal(c, "generated", found.Payload.ServiceNameSource)
+ assert.Equal(c, "json-server", found.Payload.ServiceName)
+ assert.Equal(c, "json-server", found.Payload.GeneratedServiceName)
+ assert.Empty(c, found.Payload.DDService)
+ assert.Empty(c, found.Payload.ServiceNameSource)
assert.NotZero(c, found.Payload.RSSMemory)
}
found = foundMap["node-instrumented"]
if assert.NotNil(c, found) {
assert.Equal(c, "provided", found.Payload.APMInstrumentation)
- assert.Equal(c, "generated", found.Payload.ServiceNameSource)
+ assert.Equal(c, "node-instrumented", found.Payload.ServiceName)
+ assert.Equal(c, "node-instrumented", found.Payload.GeneratedServiceName)
+ assert.Empty(c, found.Payload.DDService)
+ assert.Empty(c, found.Payload.ServiceNameSource)
assert.NotZero(c, found.Payload.RSSMemory)
}
- found = foundMap["python.server"]
+ found = foundMap["python-svc-dd"]
if assert.NotNil(c, found) {
assert.Equal(c, "none", found.Payload.APMInstrumentation)
- assert.Equal(c, "generated", found.Payload.ServiceNameSource)
+ assert.Equal(c, "python-svc-dd", found.Payload.ServiceName)
+ assert.Equal(c, "python.server", found.Payload.GeneratedServiceName)
+ assert.Equal(c, "python-svc-dd", found.Payload.DDService)
+ assert.Equal(c, "provided", found.Payload.ServiceNameSource)
assert.NotZero(c, found.Payload.RSSMemory)
}
found = foundMap["python.instrumented"]
if assert.NotNil(c, found) {
assert.Equal(c, "provided", found.Payload.APMInstrumentation)
- assert.Equal(c, "generated", found.Payload.ServiceNameSource)
+ assert.Equal(c, "python.instrumented", found.Payload.ServiceName)
+ assert.Equal(c, "python.instrumented", found.Payload.GeneratedServiceName)
+ assert.Empty(c, found.Payload.DDService)
+ assert.Empty(c, found.Payload.ServiceNameSource)
assert.NotZero(c, found.Payload.RSSMemory)
}
diff --git a/test/new-e2e/tests/discovery/testdata/config/check_config.yaml b/test/new-e2e/tests/discovery/testdata/config/check_config.yaml
deleted file mode 100644
index acab3a6421cab..0000000000000
--- a/test/new-e2e/tests/discovery/testdata/config/check_config.yaml
+++ /dev/null
@@ -1 +0,0 @@
-instances: [{}]
diff --git a/test/new-e2e/tests/discovery/testdata/provision/provision.sh b/test/new-e2e/tests/discovery/testdata/provision/provision.sh
index 9d3c61f06a425..93bb7a0deeec1 100755
--- a/test/new-e2e/tests/discovery/testdata/provision/provision.sh
+++ b/test/new-e2e/tests/discovery/testdata/provision/provision.sh
@@ -33,6 +33,7 @@ install_systemd_unit () {
name=$1
command=$2
port=$3
+ extraenv=$4
cat > "/etc/systemd/system/${name}.service" <<- EOM
[Unit]
@@ -48,6 +49,7 @@ User=root
ExecStart=${command}
Environment="PORT=${port}"
Environment="NODE_VERSION=20"
+Environment="${extraenv}"
[Install]
WantedBy=multi-user.target
@@ -55,12 +57,12 @@ EOM
}
# Node
-install_systemd_unit "node-json-server" "$NVM_DIR/nvm-exec npx json-server --port 8084 /home/ubuntu/e2e-test/node/json-server/db.json" "8084"
-install_systemd_unit "node-instrumented" "$NVM_DIR/nvm-exec node /home/ubuntu/e2e-test/node/instrumented/server.js" "8085"
+install_systemd_unit "node-json-server" "$NVM_DIR/nvm-exec npx json-server --port 8084 /home/ubuntu/e2e-test/node/json-server/db.json" "8084" ""
+install_systemd_unit "node-instrumented" "$NVM_DIR/nvm-exec node /home/ubuntu/e2e-test/node/instrumented/server.js" "8085" ""
# Python
-install_systemd_unit "python-svc" "/usr/bin/python3 /home/ubuntu/e2e-test/python/server.py" "8082"
-install_systemd_unit "python-instrumented" "/usr/bin/python3 /home/ubuntu/e2e-test/python/instrumented.py" "8083"
+install_systemd_unit "python-svc" "/usr/bin/python3 /home/ubuntu/e2e-test/python/server.py" "8082" "DD_SERVICE=python-svc-dd"
+install_systemd_unit "python-instrumented" "/usr/bin/python3 /home/ubuntu/e2e-test/python/instrumented.py" "8083" ""
systemctl daemon-reload
diff --git a/test/new-e2e/tests/installer/windows/base_suite.go b/test/new-e2e/tests/installer/windows/base_suite.go
index 3d9a9307c29c6..2daac352d8e14 100644
--- a/test/new-e2e/tests/installer/windows/base_suite.go
+++ b/test/new-e2e/tests/installer/windows/base_suite.go
@@ -6,7 +6,6 @@
package installer
import (
- "fmt"
agentVersion "github.com/DataDog/datadog-agent/pkg/version"
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments"
@@ -113,7 +112,7 @@ func (s *BaseInstallerSuite) BeforeTest(suiteName, testName string) {
outputDir, err := runner.GetTestOutputDir(runner.GetProfile(), s.T())
s.Require().NoError(err, "should get output dir")
s.T().Logf("Output dir: %s", outputDir)
- s.installer = NewDatadogInstaller(s.Env(), fmt.Sprintf("%s/install.log", outputDir))
+ s.installer = NewDatadogInstaller(s.Env(), outputDir)
}
// Require instantiates a suiteAssertions for the current suite.
diff --git a/test/new-e2e/tests/installer/windows/datadog_installer.go b/test/new-e2e/tests/installer/windows/datadog_installer.go
index e246a29a5bbd1..28e7e186d20a3 100644
--- a/test/new-e2e/tests/installer/windows/datadog_installer.go
+++ b/test/new-e2e/tests/installer/windows/datadog_installer.go
@@ -49,16 +49,20 @@ var (
type DatadogInstaller struct {
binaryPath string
env *environments.WindowsHost
- logPath string
+ outputDir string
}
// NewDatadogInstaller instantiates a new instance of the Datadog Installer running
// on a remote Windows host.
-func NewDatadogInstaller(env *environments.WindowsHost, logPath string) *DatadogInstaller {
+func NewDatadogInstaller(env *environments.WindowsHost, outputDir string) *DatadogInstaller {
+ if outputDir == "" {
+ outputDir = os.TempDir()
+ }
+
return &DatadogInstaller{
binaryPath: path.Join(Path, BinaryName),
env: env,
- logPath: logPath,
+ outputDir: outputDir,
}
}
@@ -130,8 +134,9 @@ func (d *DatadogInstaller) RemoveExperiment(packageName string) (string, error)
// Params contains the optional parameters for the Datadog Installer Install command
type Params struct {
- installerURL string
- msiArgs []string
+ installerURL string
+ msiArgs []string
+ msiLogFilename string
}
// Option is an optional function parameter type for the Datadog Installer Install command
@@ -153,6 +158,14 @@ func WithMSIArg(arg string) Option {
}
}
+// WithMSILogFile sets the filename for the MSI log file, to be stored in the output directory.
+func WithMSILogFile(filename string) Option {
+ return func(params *Params) error {
+ params.msiLogFilename = filename
+ return nil
+ }
+}
+
// WithInstallerURLFromInstallersJSON uses a specific URL for the Datadog Installer from an installers_v2.json
// file.
// bucket: The S3 bucket to look for the installers_v2.json file, i.e. "dd-agent-mstesting"
@@ -176,7 +189,9 @@ func WithInstallerURLFromInstallersJSON(bucket, channel, version string) Option
// Install will attempt to install the Datadog Installer on the remote host.
// By default, it will use the installer from the current pipeline.
func (d *DatadogInstaller) Install(opts ...Option) error {
- params := Params{}
+ params := Params{
+ msiLogFilename: "install.log",
+ }
err := optional.ApplyOptions(¶ms, opts)
if err != nil {
return nil
@@ -201,9 +216,9 @@ func (d *DatadogInstaller) Install(opts ...Option) error {
params.installerURL = artifactURL
msiPath = params.installerURL
}
- logPath := d.logPath
- if logPath == "" {
- logPath = filepath.Join(os.TempDir(), "install.log")
+ logPath := filepath.Join(d.outputDir, params.msiLogFilename)
+ if _, err := os.Stat(logPath); err == nil {
+ return fmt.Errorf("log file %s already exists", logPath)
}
msiArgs := ""
if params.msiArgs != nil {
@@ -214,7 +229,9 @@ func (d *DatadogInstaller) Install(opts ...Option) error {
// Uninstall will attempt to uninstall the Datadog Installer on the remote host.
func (d *DatadogInstaller) Uninstall(opts ...Option) error {
- params := Params{}
+ params := Params{
+ msiLogFilename: "uninstall.log",
+ }
err := optional.ApplyOptions(¶ms, opts)
if err != nil {
return nil
@@ -225,9 +242,9 @@ func (d *DatadogInstaller) Uninstall(opts ...Option) error {
return err
}
- logPath := d.logPath
- if logPath == "" {
- logPath = filepath.Join(os.TempDir(), "uninstall.log")
+ logPath := filepath.Join(d.outputDir, params.msiLogFilename)
+ if _, err := os.Stat(logPath); err == nil {
+ return fmt.Errorf("log file %s already exists", logPath)
}
msiArgs := ""
if params.msiArgs != nil {
diff --git a/test/new-e2e/tests/installer/windows/suites/installer-package/base_suite.go b/test/new-e2e/tests/installer/windows/suites/installer-package/base_suite.go
index 240fcc1035f51..de9df45e634d8 100644
--- a/test/new-e2e/tests/installer/windows/suites/installer-package/base_suite.go
+++ b/test/new-e2e/tests/installer/windows/suites/installer-package/base_suite.go
@@ -24,7 +24,9 @@ func (s *baseInstallerSuite) freshInstall() {
// Arrange
// Act
- s.Require().NoError(s.Installer().Install())
+ s.Require().NoError(s.Installer().Install(
+ installerwindows.WithMSILogFile("fresh-install.log"),
+ ))
// Assert
s.requireInstalled()
diff --git a/test/new-e2e/tests/installer/windows/suites/installer-package/install_test.go b/test/new-e2e/tests/installer/windows/suites/installer-package/install_test.go
index 25f3df70dca0b..c4109055ae218 100644
--- a/test/new-e2e/tests/installer/windows/suites/installer-package/install_test.go
+++ b/test/new-e2e/tests/installer/windows/suites/installer-package/install_test.go
@@ -67,7 +67,9 @@ func (s *testInstallerSuite) installWithExistingConfigFile() {
// Arrange
// Act
- s.Require().NoError(s.Installer().Install())
+ s.Require().NoError(s.Installer().Install(
+ installerwindows.WithMSILogFile("with-config-install.log"),
+ ))
// Assert
s.requireInstalled()
@@ -82,7 +84,9 @@ func (s *testInstallerSuite) repair() {
s.Require().NoError(s.Env().RemoteHost.Remove(installerwindows.BinaryPath))
// Act
- s.Require().NoError(s.Installer().Install())
+ s.Require().NoError(s.Installer().Install(
+ installerwindows.WithMSILogFile("repair.log"),
+ ))
// Assert
s.requireInstalled()
diff --git a/test/new-e2e/tests/installer/windows/suites/installer-package/rollback_test.go b/test/new-e2e/tests/installer/windows/suites/installer-package/rollback_test.go
index 9b2dadb31de29..b211dce84ead1 100644
--- a/test/new-e2e/tests/installer/windows/suites/installer-package/rollback_test.go
+++ b/test/new-e2e/tests/installer/windows/suites/installer-package/rollback_test.go
@@ -33,7 +33,10 @@ func (s *testInstallerRollbackSuite) installRollback() {
// Arrange
// Act
- msiErr := s.Installer().Install(installerwindows.WithMSIArg("WIXFAILWHENDEFERRED=1"))
+ msiErr := s.Installer().Install(
+ installerwindows.WithMSIArg("WIXFAILWHENDEFERRED=1"),
+ installerwindows.WithMSILogFile("install-rollback.log"),
+ )
s.Require().Error(msiErr)
// Assert
@@ -45,7 +48,10 @@ func (s *testInstallerRollbackSuite) uninstallRollback() {
// Arrange
// Act
- msiErr := s.Installer().Uninstall(installerwindows.WithMSIArg("WIXFAILWHENDEFERRED=1"))
+ msiErr := s.Installer().Uninstall(
+ installerwindows.WithMSIArg("WIXFAILWHENDEFERRED=1"),
+ installerwindows.WithMSILogFile("uninstall-rollback.log"),
+ )
s.Require().Error(msiErr)
// Assert
diff --git a/test/new-e2e/tests/installer/windows/suites/installer-package/upgrade_test.go b/test/new-e2e/tests/installer/windows/suites/installer-package/upgrade_test.go
index e9925be832a49..d4db167b2e2c6 100644
--- a/test/new-e2e/tests/installer/windows/suites/installer-package/upgrade_test.go
+++ b/test/new-e2e/tests/installer/windows/suites/installer-package/upgrade_test.go
@@ -27,7 +27,10 @@ func TestInstallerUpgrades(t *testing.T) {
// TestUpgrades tests upgrading the stable version of the Datadog installer to the latest from the pipeline.
func (s *testInstallerUpgradesSuite) TestUpgrades() {
// Arrange
- s.Require().NoError(s.Installer().Install(installerwindows.WithInstallerURLFromInstallersJSON(pipeline.AgentS3BucketTesting, pipeline.StableChannel, s.StableInstallerVersion().PackageVersion())))
+ s.Require().NoError(s.Installer().Install(
+ installerwindows.WithInstallerURLFromInstallersJSON(pipeline.AgentS3BucketTesting, pipeline.StableChannel, s.StableInstallerVersion().PackageVersion())),
+ installerwindows.WithMSILogFile("install.log"),
+ )
// sanity check: make sure we did indeed install the stable version
s.Require().Host(s.Env().RemoteHost).
HasBinary(installerwindows.BinaryPath).
@@ -36,7 +39,9 @@ func (s *testInstallerUpgradesSuite) TestUpgrades() {
// Act
// Install "latest" from the pipeline
- s.Require().NoError(s.Installer().Install())
+ s.Require().NoError(s.Installer().Install(
+ installerwindows.WithMSILogFile("upgrade.log"),
+ ))
// Assert
s.Require().Host(s.Env().RemoteHost).
diff --git a/test/new-e2e/tests/otel/otel_test.go b/test/new-e2e/tests/otel/otel_test.go
index d67c79aefcd90..1a00bf90d9e51 100644
--- a/test/new-e2e/tests/otel/otel_test.go
+++ b/test/new-e2e/tests/otel/otel_test.go
@@ -38,6 +38,7 @@ type linuxTestSuite struct {
var collectorConfig string
func TestOTel(t *testing.T) {
+ t.Parallel()
e2e.Run(t, &linuxTestSuite{}, e2e.WithProvisioner(awskubernetes.KindProvisioner(awskubernetes.WithAgentOptions(kubernetesagentparams.WithoutDualShipping(), kubernetesagentparams.WithOTelAgent(), kubernetesagentparams.WithOTelConfig(collectorConfig)))))
}
diff --git a/test/new-e2e/tests/otel/otlp-ingest/pipelines_test.go b/test/new-e2e/tests/otel/otlp-ingest/pipelines_test.go
new file mode 100644
index 0000000000000..f15c74e858220
--- /dev/null
+++ b/test/new-e2e/tests/otel/otlp-ingest/pipelines_test.go
@@ -0,0 +1,148 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package localkubernetes contains the provisioner for the local Kubernetes based environments
+
+package otel
+
+import (
+ "context"
+ _ "embed"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/DataDog/test-infra-definitions/components/datadog/kubernetesagentparams"
+ "github.com/stretchr/testify/assert"
+ batchv1 "k8s.io/api/batch/v1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "github.com/DataDog/datadog-agent/test/fakeintake/aggregator"
+ fakeintake "github.com/DataDog/datadog-agent/test/fakeintake/client"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments"
+ awskubernetes "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/kubernetes"
+)
+
+type otelIngestTestSuite struct {
+ e2e.BaseSuite[environments.Kubernetes]
+}
+
+func TestOTelIngest(t *testing.T) {
+ values := `
+datadog:
+ otlp:
+ receiver:
+ protocols:
+ grpc:
+ enabled: true
+ logs:
+ enabled: true
+`
+ t.Parallel()
+ e2e.Run(t, &otelIngestTestSuite{}, e2e.WithProvisioner(awskubernetes.KindProvisioner(awskubernetes.WithAgentOptions(kubernetesagentparams.WithoutDualShipping(), kubernetesagentparams.WithHelmValues(values)))))
+}
+
+func (s *otelIngestTestSuite) TestOTLPTraces() {
+ ctx := context.Background()
+ s.Env().FakeIntake.Client().FlushServerAndResetAggregators()
+ service := "telemetrygen-job"
+ numTraces := 10
+
+ s.T().Log("Starting telemetrygen")
+ s.createTelemetrygenJob(ctx, "traces", []string{"--service", service, "--traces", fmt.Sprint(numTraces)})
+
+ s.T().Log("Waiting for traces")
+ s.EventuallyWithT(func(c *assert.CollectT) {
+ traces, err := s.Env().FakeIntake.Client().GetTraces()
+ assert.NoError(c, err)
+ assert.NotEmpty(c, traces)
+ trace := traces[0]
+ assert.Equal(c, "none", trace.Env)
+ assert.NotEmpty(c, trace.TracerPayloads)
+ tp := trace.TracerPayloads[0]
+ assert.NotEmpty(c, tp.Chunks)
+ assert.NotEmpty(c, tp.Chunks[0].Spans)
+ spans := tp.Chunks[0].Spans
+ for _, sp := range spans {
+ assert.Equal(c, service, sp.Service)
+ assert.Equal(c, "telemetrygen", sp.Meta["otel.library.name"])
+ }
+ }, 2*time.Minute, 10*time.Second)
+}
+
+func (s *otelIngestTestSuite) TestOTLPMetrics() {
+ ctx := context.Background()
+ s.Env().FakeIntake.Client().FlushServerAndResetAggregators()
+ service := "telemetrygen-job"
+ serviceAttribute := fmt.Sprintf("service.name=\"%v\"", service)
+ numMetrics := 10
+
+ s.T().Log("Starting telemetrygen")
+ s.createTelemetrygenJob(ctx, "metrics", []string{"--metrics", fmt.Sprint(numMetrics), "--otlp-attributes", serviceAttribute})
+
+ s.T().Log("Waiting for metrics")
+ s.EventuallyWithT(func(c *assert.CollectT) {
+ serviceTag := "service:" + service
+ metrics, err := s.Env().FakeIntake.Client().FilterMetrics("gen", fakeintake.WithTags[*aggregator.MetricSeries]([]string{serviceTag}))
+ assert.NoError(c, err)
+ assert.NotEmpty(c, metrics)
+ }, 2*time.Minute, 10*time.Second)
+}
+
+func (s *otelIngestTestSuite) TestOTLPLogs() {
+ ctx := context.Background()
+ s.Env().FakeIntake.Client().FlushServerAndResetAggregators()
+ service := "telemetrygen-job"
+ serviceAttribute := fmt.Sprintf("service.name=\"%v\"", service)
+ numLogs := 10
+ logBody := "telemetrygen log"
+
+ s.T().Log("Starting telemetrygen")
+ s.createTelemetrygenJob(ctx, "logs", []string{"--logs", fmt.Sprint(numLogs), "--otlp-attributes", serviceAttribute, "--body", logBody})
+
+ s.T().Log("Waiting for logs")
+ s.EventuallyWithT(func(c *assert.CollectT) {
+ logs, err := s.Env().FakeIntake.Client().FilterLogs(service)
+ assert.NoError(c, err)
+ assert.NotEmpty(c, logs)
+ for _, log := range logs {
+ assert.Contains(c, log.Message, logBody)
+ }
+ }, 2*time.Minute, 10*time.Second)
+}
+
+func (s *otelIngestTestSuite) createTelemetrygenJob(ctx context.Context, telemetry string, options []string) {
+ var ttlSecondsAfterFinished int32 = 600 //nolint:revive // We want to see this is explicitly set to 0
+ var backOffLimit int32 = 4
+
+ otlpEndpoint := fmt.Sprintf("%v:4317", s.Env().Agent.LinuxNodeAgent.LabelSelectors["app"])
+ jobSpec := &batchv1.Job{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: fmt.Sprintf("telemetrygen-job-%v", telemetry),
+ Namespace: "datadog",
+ },
+ Spec: batchv1.JobSpec{
+ TTLSecondsAfterFinished: &ttlSecondsAfterFinished,
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "telemetrygen-job",
+ Image: "ghcr.io/open-telemetry/opentelemetry-collector-contrib/telemetrygen:latest",
+ Command: append([]string{"/telemetrygen", telemetry, "--otlp-endpoint", otlpEndpoint, "--otlp-insecure"}, options...),
+ },
+ },
+ RestartPolicy: corev1.RestartPolicyNever,
+ },
+ },
+ BackoffLimit: &backOffLimit,
+ },
+ }
+
+ _, err := s.Env().KubernetesCluster.Client().BatchV1().Jobs("datadog").Create(ctx, jobSpec, metav1.CreateOptions{})
+ assert.NoError(s.T(), err, "Could not properly start job")
+}
diff --git a/test/new-e2e/tests/process/ecs_test.go b/test/new-e2e/tests/process/ecs_test.go
index 84ee94ec56d2d..10c42d696a512 100644
--- a/test/new-e2e/tests/process/ecs_test.go
+++ b/test/new-e2e/tests/process/ecs_test.go
@@ -6,6 +6,7 @@
package process
import (
+ "fmt"
"testing"
"time"
@@ -25,7 +26,7 @@ import (
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/ecs"
)
-type ECSSuite struct {
+type ECSEC2Suite struct {
e2e.BaseSuite[ecsCPUStressEnv]
}
@@ -33,7 +34,7 @@ type ecsCPUStressEnv struct {
environments.ECS
}
-func ecsCPUStressProvisioner() e2e.PulumiEnvRunFunc[ecsCPUStressEnv] {
+func ecsEC2CPUStressProvisioner(runInCoreAgent bool) e2e.PulumiEnvRunFunc[ecsCPUStressEnv] {
return func(ctx *pulumi.Context, env *ecsCPUStressEnv) error {
awsEnv, err := aws.NewEnvironment(ctx)
if err != nil {
@@ -45,6 +46,7 @@ func ecsCPUStressProvisioner() e2e.PulumiEnvRunFunc[ecsCPUStressEnv] {
ecs.WithECSLinuxECSOptimizedNodeGroup(),
ecs.WithAgentOptions(
ecsagentparams.WithAgentServiceEnvVariable("DD_PROCESS_CONFIG_PROCESS_COLLECTION_ENABLED", "true"),
+ ecsagentparams.WithAgentServiceEnvVariable("DD_PROCESS_CONFIG_RUN_IN_CORE_AGENT_ENABLED", fmt.Sprintf("%t", runInCoreAgent)),
),
ecs.WithWorkloadApp(func(e aws.Environment, clusterArn pulumi.StringInput) (*ecsComp.Workload, error) {
return cpustress.EcsAppDefinition(e, clusterArn)
@@ -59,16 +61,16 @@ func ecsCPUStressProvisioner() e2e.PulumiEnvRunFunc[ecsCPUStressEnv] {
}
}
-func TestECSTestSuite(t *testing.T) {
+func TestECSEC2TestSuite(t *testing.T) {
t.Parallel()
- s := ECSSuite{}
+ s := ECSEC2Suite{}
e2eParams := []e2e.SuiteOption{e2e.WithProvisioner(
- e2e.NewTypedPulumiProvisioner("ecsCPUStress", ecsCPUStressProvisioner(), nil))}
+ e2e.NewTypedPulumiProvisioner("ecsEC2CPUStress", ecsEC2CPUStressProvisioner(false), nil))}
e2e.Run(t, &s, e2eParams...)
}
-func (s *ECSSuite) TestECSProcessCheck() {
+func (s *ECSEC2Suite) TestProcessCheck() {
t := s.T()
// PROCS-4219
flake.Mark(t)
@@ -86,3 +88,28 @@ func (s *ECSSuite) TestECSProcessCheck() {
assertProcessCollected(t, payloads, false, "stress-ng-cpu [run]")
assertContainersCollected(t, payloads, []string{"stress-ng"})
}
+
+func (s *ECSEC2Suite) TestProcessCheckInCoreAgent() {
+ t := s.T()
+ // PROCS-4219
+ flake.Mark(t)
+
+ s.UpdateEnv(e2e.NewTypedPulumiProvisioner("ecsEC2CPUStress", ecsEC2CPUStressProvisioner(true), nil))
+
+ // Flush fake intake to remove any payloads which may have
+ s.Env().FakeIntake.Client().FlushServerAndResetAggregators()
+
+ var payloads []*aggregator.ProcessPayload
+ assert.EventuallyWithT(t, func(c *assert.CollectT) {
+ var err error
+ payloads, err = s.Env().FakeIntake.Client().GetProcesses()
+ assert.NoError(c, err, "failed to get process payloads from fakeintake")
+
+ // Wait for two payloads, as processes must be detected in two check runs to be returned
+ assert.GreaterOrEqual(c, len(payloads), 2, "fewer than 2 payloads returned")
+ }, 2*time.Minute, 10*time.Second)
+
+ assertProcessCollected(t, payloads, false, "stress-ng-cpu [run]")
+ requireProcessNotCollected(t, payloads, "process-agent")
+ assertContainersCollected(t, payloads, []string{"stress-ng"})
+}
diff --git a/test/new-e2e/tests/windows/service-test/fixtures/system-probe-nofim.yaml b/test/new-e2e/tests/windows/service-test/fixtures/system-probe-nofim.yaml
new file mode 100644
index 0000000000000..bbfbac7b00d97
--- /dev/null
+++ b/test/new-e2e/tests/windows/service-test/fixtures/system-probe-nofim.yaml
@@ -0,0 +1,8 @@
+# enable NPM
+network_config:
+ enabled: true
+
+# enable security agent
+runtime_security_config:
+ enabled: true
+ fim_enabled: false
diff --git a/test/new-e2e/tests/windows/service-test/startstop_test.go b/test/new-e2e/tests/windows/service-test/startstop_test.go
index 14bd97dbabfbd..8d7a8f6dacb87 100644
--- a/test/new-e2e/tests/windows/service-test/startstop_test.go
+++ b/test/new-e2e/tests/windows/service-test/startstop_test.go
@@ -36,13 +36,28 @@ var agentConfig string
//go:embed fixtures/system-probe.yaml
var systemProbeConfig string
+//go:embed fixtures/system-probe-nofim.yaml
+var systemProbeNoFIMConfig string
+
//go:embed fixtures/security-agent.yaml
var securityAgentConfig string
+// TestServiceBehaviorAgentCommandNoFIM tests the service behavior when controlled by Agent commands
+func TestNoFIMServiceBehaviorAgentCommand(t *testing.T) {
+ s := &agentServiceCommandSuite{}
+ run(t, s, systemProbeNoFIMConfig)
+}
+
+// TestServiceBehaviorPowerShellNoFIM tests the service behavior when controlled by PowerShell commands
+func TestNoFIMServiceBehaviorPowerShell(t *testing.T) {
+ s := &powerShellServiceCommandSuite{}
+ run(t, s, systemProbeNoFIMConfig)
+}
+
// TestServiceBehaviorAgentCommand tests the service behavior when controlled by Agent commands
func TestServiceBehaviorAgentCommand(t *testing.T) {
s := &agentServiceCommandSuite{}
- run(t, s)
+ run(t, s, systemProbeConfig)
}
type agentServiceCommandSuite struct {
@@ -78,7 +93,7 @@ func (s *agentServiceCommandSuite) SetupSuite() {
// TestServiceBehaviorAgentCommand tests the service behavior when controlled by PowerShell commands
func TestServiceBehaviorPowerShell(t *testing.T) {
s := &powerShellServiceCommandSuite{}
- run(t, s)
+ run(t, s, systemProbeConfig)
}
type powerShellServiceCommandSuite struct {
@@ -204,7 +219,7 @@ func (s *powerShellServiceCommandSuite) TestHardExitEventLogEntry() {
}, 1*time.Minute, 1*time.Second, "should have hard exit messages in the event log")
}
-func run[Env any](t *testing.T, s e2e.Suite[Env]) {
+func run[Env any](t *testing.T, s e2e.Suite[Env], systemProbeConfig string) {
opts := []e2e.SuiteOption{e2e.WithProvisioner(awsHostWindows.ProvisionerNoFakeIntake(
awsHostWindows.WithAgentOptions(
agentparams.WithAgentConfig(agentConfig),
diff --git a/test/otel/go.mod b/test/otel/go.mod
index 1eaab0c1aa48e..1a1e56a250a8a 100644
--- a/test/otel/go.mod
+++ b/test/otel/go.mod
@@ -171,7 +171,7 @@ require (
github.com/DataDog/datadog-api-client-go/v2 v2.26.0 // indirect
github.com/DataDog/datadog-go/v5 v5.5.0 // indirect
github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect
- github.com/DataDog/go-sqllexer v0.0.13 // indirect
+ github.com/DataDog/go-sqllexer v0.0.14 // indirect
github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect
github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 // indirect
github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.18.0 // indirect
diff --git a/test/otel/go.sum b/test/otel/go.sum
index e60a093490cb1..cdf2e4f5f136d 100644
--- a/test/otel/go.sum
+++ b/test/otel/go.sum
@@ -8,8 +8,8 @@ github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI
github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw=
github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 h1:RoH7VLzTnxHEugRPIgnGlxwDFszFGI7b3WZZUtWuPRM=
github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42/go.mod h1:TX7CTOQ3LbQjfAi4SwqUoR5gY1zfUk7VRBDTuArjaDc=
-github.com/DataDog/go-sqllexer v0.0.13 h1:9mKfe+3s73GI/7dWBxi2Ds7+xZynJqMKK9cIUBrutak=
-github.com/DataDog/go-sqllexer v0.0.13/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc=
+github.com/DataDog/go-sqllexer v0.0.14 h1:xUQh2tLr/95LGxDzLmttLgTo/1gzFeOyuwrQa/Iig4Q=
+github.com/DataDog/go-sqllexer v0.0.14/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc=
github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4=
github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0=
github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 h1:EbzDX8HPk5uE2FsJYxD74QmMw0/3CqSKhEr6teh0ncQ=
diff --git a/test/regression/cases/basic_py_check/datadog-agent/datadog.yaml b/test/regression/cases/basic_py_check/datadog-agent/datadog.yaml
index 88543e76b074d..7c77a5477d6a5 100644
--- a/test/regression/cases/basic_py_check/datadog-agent/datadog.yaml
+++ b/test/regression/cases/basic_py_check/datadog-agent/datadog.yaml
@@ -6,6 +6,7 @@ auth_token_file_path: /tmp/agent-auth-token
cloud_provider_metadata: []
telemetry.enabled: true
+telemetry.checks: '*'
dd_url: http://localhost:9091
process_config.process_dd_url: http://localhost:9092
diff --git a/test/regression/cases/file_tree/datadog-agent/datadog.yaml b/test/regression/cases/file_tree/datadog-agent/datadog.yaml
index 2d30e86cc40d5..986d0adcf6d84 100644
--- a/test/regression/cases/file_tree/datadog-agent/datadog.yaml
+++ b/test/regression/cases/file_tree/datadog-agent/datadog.yaml
@@ -9,4 +9,5 @@ logs_enabled: true
dd_url: http://127.0.0.1:9092
telemetry.enabled: true
+telemetry.checks: '*'
process_collection.enabled: false
diff --git a/test/regression/cases/idle/datadog-agent/datadog.yaml b/test/regression/cases/idle/datadog-agent/datadog.yaml
index 3b61ada83da6e..1b9b15d83f17a 100644
--- a/test/regression/cases/idle/datadog-agent/datadog.yaml
+++ b/test/regression/cases/idle/datadog-agent/datadog.yaml
@@ -9,3 +9,4 @@ process_config.process_dd_url: http://localhost:9092
cloud_provider_metadata: []
telemetry.enabled: true
+telemetry.checks: '*'
diff --git a/test/regression/cases/idle/experiment.yaml b/test/regression/cases/idle/experiment.yaml
index 8cbad50293983..07fdaa0c45fc3 100644
--- a/test/regression/cases/idle/experiment.yaml
+++ b/test/regression/cases/idle/experiment.yaml
@@ -32,4 +32,4 @@ checks:
description: "Memory usage quality gate. This puts a bound on the total agent memory usage."
bounds:
series: total_rss_bytes
- upper_bound: "424.0 MiB"
+ upper_bound: "465.0 MiB"
diff --git a/test/regression/cases/idle/lading/lading.yaml b/test/regression/cases/idle/lading/lading.yaml
index 0ae7c415523d8..5e2eb2566ef45 100644
--- a/test/regression/cases/idle/lading/lading.yaml
+++ b/test/regression/cases/idle/lading/lading.yaml
@@ -7,5 +7,170 @@ blackhole:
binding_addr: "127.0.0.1:9092"
target_metrics:
- - prometheus:
+ - prometheus: #core agent telemetry
uri: "http://127.0.0.1:5000/telemetry"
+ tags:
+ sub_agent: "core"
+ - prometheus: #process agent telemetry
+ uri: "http://127.0.0.1:6062/telemetry"
+ tags:
+ sub_agent: "process"
+ - expvar: #trace agent telemetry
+ uri: "http://127.0.0.1:5012/debug/vars"
+ vars:
+ - "/Event"
+ - "/ServiceCheck"
+ - "/check_run_v1"
+ - "/cmdline"
+ - "/compressor/BytesIn"
+ - "/compressor/BytesOut"
+ - "/compressor/TotalCompressCycles"
+ - "/compressor/TotalPayloads"
+ - "/connections"
+ - "/container"
+ - "/events_v2"
+ - "/forwarder/APIKeyFailure"
+ - "/forwarder/APIKeyStatus"
+ - "/forwarder/FileStorage/CurrentSizeInBytes"
+ - "/forwarder/FileStorage/DeserializeCount"
+ - "/forwarder/FileStorage/DeserializeErrorsCount"
+ - "/forwarder/FileStorage/DeserializeTransactionsCount"
+ - "/forwarder/FileStorage/FileSize"
+ - "/forwarder/FileStorage/FilesCount"
+ - "/forwarder/FileStorage/FilesRemovedCount"
+ - "/forwarder/FileStorage/PointsDroppedCount"
+ - "/forwarder/FileStorage/SerializeCount"
+ - "/forwarder/FileStorage/StartupReloadedRetryFilesCount"
+ - "/forwarder/RemovalPolicy/FilesFromUnknownDomainCount"
+ - "/forwarder/RemovalPolicy/NewRemovalPolicyCount"
+ - "/forwarder/RemovalPolicy/OutdatedFilesCount"
+ - "/forwarder/RemovalPolicy/RegisteredDomainCount"
+ - "/forwarder/TransactionContainer/CurrentMemSizeInBytes"
+ - "/forwarder/TransactionContainer/ErrorsCount"
+ - "/forwarder/TransactionContainer/PointsDroppedCount"
+ - "/forwarder/TransactionContainer/TransactionsCount"
+ - "/forwarder/TransactionContainer/TransactionsDroppedCount"
+ - "/forwarder/Transactions/Cluster"
+ - "/forwarder/Transactions/ClusterRole"
+ - "/forwarder/Transactions/ClusterRoleBinding"
+ - "/forwarder/Transactions/ConnectionEvents/ConnectSuccess"
+ - "/forwarder/Transactions/ConnectionEvents/DNSSuccess"
+ - "/forwarder/Transactions/CronJob"
+ - "/forwarder/Transactions/CustomResource"
+ - "/forwarder/Transactions/CustomResourceDefinition"
+ - "/forwarder/Transactions/DaemonSet"
+ - "/forwarder/Transactions/Deployment"
+ - "/forwarder/Transactions/Dropped"
+ - "/forwarder/Transactions/DroppedByEndpoint"
+ - "/forwarder/Transactions/ECSTask"
+ - "/forwarder/Transactions/Errors"
+ - "/forwarder/Transactions/ErrorsByType/ConnectionErrors"
+ - "/forwarder/Transactions/ErrorsByType/DNSErrors"
+ - "/forwarder/Transactions/ErrorsByType/SentRequestErrors"
+ - "/forwarder/Transactions/ErrorsByType/TLSErrors"
+ - "/forwarder/Transactions/ErrorsByType/WroteRequestErrors"
+ - "/forwarder/Transactions/HTTPErrors"
+ - "/forwarder/Transactions/HTTPErrorsByCode"
+ - "/forwarder/Transactions/HighPriorityQueueFull"
+ - "/forwarder/Transactions/HorizontalPodAutoscaler"
+ - "/forwarder/Transactions/Ingress"
+ - "/forwarder/Transactions/InputBytesByEndpoint"
+ - "/forwarder/Transactions/InputCountByEndpoint"
+ - "/forwarder/Transactions/Job"
+ - "/forwarder/Transactions/LimitRange"
+ - "/forwarder/Transactions/Namespace"
+ - "/forwarder/Transactions/NetworkPolicy"
+ - "/forwarder/Transactions/Node"
+ - "/forwarder/Transactions/OrchestratorManifest"
+ - "/forwarder/Transactions/PersistentVolume"
+ - "/forwarder/Transactions/PersistentVolumeClaim"
+ - "/forwarder/Transactions/Pod"
+ - "/forwarder/Transactions/ReplicaSet"
+ - "/forwarder/Transactions/Requeued"
+ - "/forwarder/Transactions/RequeuedByEndpoint"
+ - "/forwarder/Transactions/Retried"
+ - "/forwarder/Transactions/RetriedByEndpoint"
+ - "/forwarder/Transactions/RetryQueueSize"
+ - "/forwarder/Transactions/Role"
+ - "/forwarder/Transactions/RoleBinding"
+ - "/forwarder/Transactions/Service"
+ - "/forwarder/Transactions/ServiceAccount"
+ - "/forwarder/Transactions/StatefulSet"
+ - "/forwarder/Transactions/StorageClass"
+ - "/forwarder/Transactions/Success"
+ - "/forwarder/Transactions/SuccessByEndpoint/check_run_v1"
+ - "/forwarder/Transactions/SuccessByEndpoint/connections"
+ - "/forwarder/Transactions/SuccessByEndpoint/container"
+ - "/forwarder/Transactions/SuccessByEndpoint/events_v2"
+ - "/forwarder/Transactions/SuccessByEndpoint/host_metadata_v2"
+ - "/forwarder/Transactions/SuccessByEndpoint/intake"
+ - "/forwarder/Transactions/SuccessByEndpoint/orchestrator"
+ - "/forwarder/Transactions/SuccessByEndpoint/process"
+ - "/forwarder/Transactions/SuccessByEndpoint/rtcontainer"
+ - "/forwarder/Transactions/SuccessByEndpoint/rtprocess"
+ - "/forwarder/Transactions/SuccessByEndpoint/series_v1"
+ - "/forwarder/Transactions/SuccessByEndpoint/series_v2"
+ - "/forwarder/Transactions/SuccessByEndpoint/services_checks_v2"
+ - "/forwarder/Transactions/SuccessByEndpoint/sketches_v1"
+ - "/forwarder/Transactions/SuccessByEndpoint/sketches_v2"
+ - "/forwarder/Transactions/SuccessByEndpoint/validate_v1"
+ - "/forwarder/Transactions/SuccessBytesByEndpoint"
+ - "/forwarder/Transactions/VerticalPodAutoscaler"
+ - "/host_metadata_v2"
+ - "/hostname/errors"
+ - "/hostname/provider"
+ - "/intake"
+ - "/jsonstream/CompressorLocks"
+ - "/jsonstream/ItemDrops"
+ - "/jsonstream/PayloadFulls"
+ - "/jsonstream/TotalCalls"
+ - "/jsonstream/TotalItems"
+ - "/jsonstream/TotalLockTime"
+ - "/jsonstream/TotalSerializationTime"
+ - "/jsonstream/WriteItemErrors"
+ - "/kubeletQueries"
+ - "/orchestrator"
+ - "/pid"
+ - "/process"
+ - "/rtcontainer"
+ - "/rtprocess"
+ - "/serializer/SendEventsErrItemTooBigs"
+ - "/serializer/SendEventsErrItemTooBigsFallback"
+ - "/series"
+ - "/series_v1"
+ - "/series_v2"
+ - "/services_checks_v2"
+ - "/sketch_series/ItemTooBig"
+ - "/sketch_series/PayloadFull"
+ - "/sketch_series/UnexpectedItemDrops"
+ - "/sketches_v1"
+ - "/sketches_v2"
+ - "/splitter/NotTooBig"
+ - "/splitter/PayloadDrops"
+ - "/splitter/TooBig"
+ - "/splitter/TotalLoops"
+ - "/stats_writer/Bytes"
+ - "/stats_writer/ClientPayloads"
+ - "/stats_writer/Errors"
+ - "/stats_writer/Payloads"
+ - "/stats_writer/Retries"
+ - "/stats_writer/Splits"
+ - "/stats_writer/StatsBuckets"
+ - "/stats_writer/StatsEntries"
+ - "/trace_writer/Bytes"
+ - "/trace_writer/BytesUncompressed"
+ - "/trace_writer/Errors"
+ - "/trace_writer/Events"
+ - "/trace_writer/Payloads"
+ - "/trace_writer/Retries"
+ - "/trace_writer/SingleMaxSize"
+ - "/trace_writer/Spans"
+ - "/trace_writer/Traces"
+ - "/uptime"
+ - "/validate_v1"
+ - "/version/Version"
+ - "/version/GitCommit"
+ - "/watchdog/CPU/UserAvg"
+ - "/watchdog/Mem/Alloc"
+ tags:
+ sub_agent: "trace"
diff --git a/test/regression/cases/otel_to_otel_logs/datadog-agent/datadog.yaml b/test/regression/cases/otel_to_otel_logs/datadog-agent/datadog.yaml
index 66b8ab0b4da1a..755c07c819559 100644
--- a/test/regression/cases/otel_to_otel_logs/datadog-agent/datadog.yaml
+++ b/test/regression/cases/otel_to_otel_logs/datadog-agent/datadog.yaml
@@ -9,6 +9,7 @@ dd_url: http://127.0.0.1:9092
process_config.process_dd_url: http://localhost:9093
telemetry.enabled: true
+telemetry.checks: '*'
apm_config:
enabled: true
@@ -35,4 +36,4 @@ otlp_config:
traces:
enabled: true
debug:
- loglevel: info
+ verbosity: normal
diff --git a/test/regression/cases/pycheck_lots_of_tags/datadog-agent/datadog.yaml b/test/regression/cases/pycheck_lots_of_tags/datadog-agent/datadog.yaml
index 661f27dda68dc..3847663675241 100644
--- a/test/regression/cases/pycheck_lots_of_tags/datadog-agent/datadog.yaml
+++ b/test/regression/cases/pycheck_lots_of_tags/datadog-agent/datadog.yaml
@@ -6,6 +6,7 @@ auth_token_file_path: /tmp/agent-auth-token
cloud_provider_metadata: []
telemetry.enabled: true
+telemetry.checks: '*'
memtrack_enabled: false
diff --git a/test/regression/cases/tcp_dd_logs_filter_exclude/datadog-agent/datadog.yaml b/test/regression/cases/tcp_dd_logs_filter_exclude/datadog-agent/datadog.yaml
index 01c444918dbe5..e6d51841b1586 100644
--- a/test/regression/cases/tcp_dd_logs_filter_exclude/datadog-agent/datadog.yaml
+++ b/test/regression/cases/tcp_dd_logs_filter_exclude/datadog-agent/datadog.yaml
@@ -4,6 +4,7 @@ dd_url: http://localhost:9092
process_config.process_dd_url: http://localhost:9093
telemetry.enabled: true
+telemetry.checks: '*'
# Disable cloud detection. This stops the Agent from poking around the
# execution environment & network. This is particularly important if the target
diff --git a/test/regression/cases/tcp_syslog_to_blackhole/datadog-agent/datadog.yaml b/test/regression/cases/tcp_syslog_to_blackhole/datadog-agent/datadog.yaml
index 6320ce2701c40..96e2d4f537c0e 100644
--- a/test/regression/cases/tcp_syslog_to_blackhole/datadog-agent/datadog.yaml
+++ b/test/regression/cases/tcp_syslog_to_blackhole/datadog-agent/datadog.yaml
@@ -4,6 +4,7 @@ dd_url: http://localhost:9091
process_config.process_dd_url: http://localhost:9093
telemetry.enabled: true
+telemetry.checks: '*'
# Disable cloud detection. This stops the Agent from poking around the
# execution environment & network. This is particularly important if the target
diff --git a/test/regression/cases/uds_dogstatsd_to_api/datadog-agent/datadog.yaml b/test/regression/cases/uds_dogstatsd_to_api/datadog-agent/datadog.yaml
index cadf5be194ac6..5eb8f41ad2588 100644
--- a/test/regression/cases/uds_dogstatsd_to_api/datadog-agent/datadog.yaml
+++ b/test/regression/cases/uds_dogstatsd_to_api/datadog-agent/datadog.yaml
@@ -5,6 +5,7 @@ dd_url: http://127.0.0.1:9091
process_config.process_dd_url: http://localhost:9092
telemetry.enabled: true
+telemetry.checks: '*'
# Disable cloud detection. This stops the Agent from poking around the
# execution environment & network. This is particularly important if the target
diff --git a/test/regression/cases/uds_dogstatsd_to_api_cpu/datadog-agent/datadog.yaml b/test/regression/cases/uds_dogstatsd_to_api_cpu/datadog-agent/datadog.yaml
index 28f7330c6d81d..a9f1cb85f20b1 100644
--- a/test/regression/cases/uds_dogstatsd_to_api_cpu/datadog-agent/datadog.yaml
+++ b/test/regression/cases/uds_dogstatsd_to_api_cpu/datadog-agent/datadog.yaml
@@ -3,6 +3,9 @@ auth_token_file_path: /tmp/agent-auth-token
dd_url: http://127.0.0.1:9091
process_config.process_dd_url: http://localhost:9092
+telemetry.enabled: true
+telemetry.checks: '*'
+
# Disable cloud detection. This stops the Agent from poking around the
# execution environment & network. This is particularly important if the target
# has network access.
diff --git a/tools/ci/docker-login.ps1 b/tools/ci/docker-login.ps1
index c0d4194bafa5a..e85da22733afa 100644
--- a/tools/ci/docker-login.ps1
+++ b/tools/ci/docker-login.ps1
@@ -6,8 +6,18 @@ If ($lastExitCode -ne "0") {
throw "Previous command returned $lastExitCode"
}
# DockerHub login
-$DOCKER_REGISTRY_LOGIN = $(& "C:\mnt\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:DOCKER_REGISTRY_LOGIN_SSM_KEY")
-$DOCKER_REGISTRY_PWD = $(& "C:\mnt\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:DOCKER_REGISTRY_PWD_SSM_KEY")
+$tmpfile = [System.IO.Path]::GetTempFileName()
+& "C:\mnt\tools\ci\fetch_secret.ps1" "$Env:DOCKER_REGISTRY_LOGIN" "$tmpfile"
+If ($lastExitCode -ne "0") {
+ throw "Previous command returned $lastExitCode"
+}
+$DOCKER_REGISTRY_LOGIN = $(cat "$tmpfile")
+& "C:\mnt\tools\ci\fetch_secret.ps1" "$Env:DOCKER_REGISTRY_PWD" "$tmpfile"
+If ($lastExitCode -ne "0") {
+ throw "Previous command returned $lastExitCode"
+}
+$DOCKER_REGISTRY_PWD = $(cat "$tmpfile")
+Remove-Item "$tmpfile"
docker login --username "${DOCKER_REGISTRY_LOGIN}" --password "${DOCKER_REGISTRY_PWD}" "docker.io"
If ($lastExitCode -ne "0") {
throw "Previous command returned $lastExitCode"
diff --git a/tools/ci/aws_ssm_get_wrapper.ps1 b/tools/ci/fetch_secret.ps1
similarity index 73%
rename from tools/ci/aws_ssm_get_wrapper.ps1
rename to tools/ci/fetch_secret.ps1
index 158301f4a08c6..0e345e80ff32a 100644
--- a/tools/ci/aws_ssm_get_wrapper.ps1
+++ b/tools/ci/fetch_secret.ps1
@@ -1,5 +1,6 @@
param (
- [string]$parameterName
+ [string]$parameterName,
+ [string]$tempFile
)
$retryCount = 0
@@ -9,15 +10,18 @@ while ($retryCount -lt $maxRetries) {
$result = (aws ssm get-parameter --region us-east-1 --name $parameterName --with-decryption --query "Parameter.Value" --output text 2> awsErrorFile.txt)
$error = Get-Content awsErrorFile.txt
if ($result) {
- $result
- break
+ "$result" | Out-File -FilePath "$tempFile" -Encoding ASCII
+ exit 0
}
if ($error -match "Unable to locate credentials") {
# See 5th row in https://docs.google.com/spreadsheets/d/1JvdN0N-RdNEeOJKmW_ByjBsr726E3ZocCKU8QoYchAc
Write-Error "Permanent error: unable to locate AWS credentials, not retrying"
- exit 1
+ exit 42
}
$retryCount++
Start-Sleep -Seconds ([math]::Pow(2, $retryCount))
}
+
+Write-Error "Failed to retrieve $parameterName after $maxRetries retries"
+exit 1
diff --git a/tools/ci/aws_ssm_get_wrapper.sh b/tools/ci/fetch_secret.sh
similarity index 92%
rename from tools/ci/aws_ssm_get_wrapper.sh
rename to tools/ci/fetch_secret.sh
index 2fb8298145d9b..d7b8406a7c458 100755
--- a/tools/ci/aws_ssm_get_wrapper.sh
+++ b/tools/ci/fetch_secret.sh
@@ -4,9 +4,6 @@ retry_count=0
max_retries=10
parameter_name="$1"
-# shellcheck disable=SC1091
-source /root/.bashrc > /dev/null 2>&1
-
set +x
while [[ $retry_count -lt $max_retries ]]; do
diff --git a/tools/ci/junit_upload.sh b/tools/ci/junit_upload.sh
index 8d13895f0e912..e4ab90ee9b70a 100755
--- a/tools/ci/junit_upload.sh
+++ b/tools/ci/junit_upload.sh
@@ -1,14 +1,13 @@
#!/bin/bash
# shellcheck source=/dev/null
-source /root/.bashrc
# junit file name can differ in kitchen or macos context
junit_files="junit-*.tgz"
if [[ -n "$1" ]]; then
junit_files="$1"
fi
-GITLAB_TOKEN="$("$CI_PROJECT_DIR"/tools/ci/aws_ssm_get_wrapper.sh "$GITLAB_READ_API_TOKEN_SSM_NAME")"
-DATADOG_API_KEY="$("$CI_PROJECT_DIR"/tools/ci/aws_ssm_get_wrapper.sh "$API_KEY_ORG2_SSM_NAME")"
+GITLAB_TOKEN="$("$CI_PROJECT_DIR"/tools/ci/fetch_secret.sh "$GITLAB_READ_API_TOKEN")"
+DATADOG_API_KEY="$("$CI_PROJECT_DIR"/tools/ci/fetch_secret.sh "$API_KEY_ORG2")"
export DATADOG_API_KEY
export GITLAB_TOKEN
error=0