diff --git a/.copyright-overrides.yml b/.copyright-overrides.yml index 8861edfc8782e..1e31d179ece4c 100644 --- a/.copyright-overrides.yml +++ b/.copyright-overrides.yml @@ -216,6 +216,7 @@ github.com/gogo/protobuf: ["Copyright (c) 2013, The GoGo Authors. All rights res github.com/moby/sys/mountinfo: Copyright (c) 2014-2018 The Docker & Go Authors. All rights reserved. github.com/moby/sys/signal: Copyright (c) 2014-2018 The Docker & Go Authors. All rights reserved. github.com/moby/sys/user: Copyright (c) 2014-2018 The Docker & Go Authors. All rights reserved. +github.com/moby/sys/userns: Copyright (c) 2014-2018 The Docker & Go Authors. All rights reserved. github.com/modern-go/concurrent: Copyright (c) 2018 Tao Wen github.com/modern-go/reflect2: Copyright (c) 2018 Tao Wen github.com/opencontainers/selinux/*: Copyright (c) 2017 The Authors diff --git a/.github/workflows/create_rc_pr.yml b/.github/workflows/create_rc_pr.yml index 9503651db52be..0d190cb7bb606 100644 --- a/.github/workflows/create_rc_pr.yml +++ b/.github/workflows/create_rc_pr.yml @@ -77,6 +77,7 @@ jobs: ATLASSIAN_USERNAME: ${{ secrets.ATLASSIAN_USERNAME }} ATLASSIAN_PASSWORD: ${{ secrets.ATLASSIAN_PASSWORD }} run: | + export SLACK_API_TOKEN="${{ secrets.SLACK_API_TOKEN }}" echo "CHANGES=$(inv -e release.check-for-changes -r ${{ matrix.value }} ${{ needs.find_release_branches.outputs.warning }})" >> $GITHUB_OUTPUT - name: Create RC PR diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 76b0f7ab0af50..c50880e298007 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -739,9 +739,6 @@ workflow: - pkg/network/**/* - pkg/process/monitor/* - pkg/util/kernel/**/* - - test/kitchen/site-cookbooks/dd-system-probe-check/**/* - - test/kitchen/test/integration/win-sysprobe-test/**/* - - .gitlab/functional_test/system_probe_windows.yml - .gitlab/kernel_matrix_testing/system_probe.yml - .gitlab/kernel_matrix_testing/common.yml - test/new-e2e/system-probe/**/* @@ -1030,6 +1027,28 @@ workflow: - when: manual allow_failure: true +.on_windows_systemprobe_or_e2e_changes: + - !reference [.on_e2e_main_release_or_rc] + - changes: + paths: + - pkg/collector/corechecks/servicediscovery/module/* + - pkg/network/**/* + - pkg/process/monitor/* + - pkg/util/kernel/**/* + - test/new-e2e/tests/sysprobe-functional/**/* + compare_to: main # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916 + when: on_success + +.on_windows_security_or_e2e_changes: + - !reference [.on_e2e_main_release_or_rc] + - changes: + paths: + - pkg/security/**/* + - pkg/eventmonitor/**/* + - test/new-e2e/tests/security-agent-functional/**/* + compare_to: main # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916 + when: on_success + .on_scheduled_main: - <<: *if_scheduled_main diff --git a/.gitlab/e2e/e2e.yml b/.gitlab/e2e/e2e.yml index a1d14879d7055..257e8d255a35b 100644 --- a/.gitlab/e2e/e2e.yml +++ b/.gitlab/e2e/e2e.yml @@ -368,6 +368,32 @@ new-e2e-ndm-snmp: TARGETS: ./tests/ndm/snmp TEAM: network-device-monitoring +new-e2e-windows-systemprobe: + extends: .new_e2e_template + rules: + - !reference [.on_windows_systemprobe_or_e2e_changes] + - !reference [.manual] + needs: + - !reference [.needs_new_e2e_template] + - deploy_windows_testing-a7 + - tests_windows_sysprobe_x64 + variables: + TARGETS: ./tests/sysprobe-functional + TEAM: windows-kernel-integrations + +new-e2e-windows-security-agent: + extends: .new_e2e_template + rules: + - !reference [.on_windows_security_or_e2e_changes] + - !reference [.manual] + needs: + - !reference [.needs_new_e2e_template] + - deploy_windows_testing-a7 + - tests_windows_secagent_x64 + variables: + TARGETS: ./tests/security-agent-functional + TEAM: windows-kernel-integrations + new-e2e-otel: extends: .new_e2e_template rules: diff --git a/.gitlab/functional_test/include.yml b/.gitlab/functional_test/include.yml index b5a2e851ac2c6..23fa37d22fdc7 100644 --- a/.gitlab/functional_test/include.yml +++ b/.gitlab/functional_test/include.yml @@ -7,7 +7,6 @@ include: - .gitlab/functional_test/security_agent.yml - .gitlab/functional_test/serverless.yml - .gitlab/functional_test/regression_detector.yml - - .gitlab/functional_test/system_probe_windows.yml - .gitlab/kernel_matrix_testing/common.yml - .gitlab/kernel_matrix_testing/system_probe.yml - .gitlab/kernel_matrix_testing/security_agent.yml diff --git a/.gitlab/functional_test/security_agent.yml b/.gitlab/functional_test/security_agent.yml index 4bd5af8b4c7d9..1842a51081f2d 100644 --- a/.gitlab/functional_test/security_agent.yml +++ b/.gitlab/functional_test/security_agent.yml @@ -157,25 +157,3 @@ kitchen_stress_security_agent: matrix: - KITCHEN_PLATFORM: "ubuntu" KITCHEN_OSVERS: "ubuntu-20-04" - -kitchen_test_security_agent_windows_x64: - extends: - - .kitchen_test_security_agent - - .kitchen_os_windows - - .kitchen_azure_location_north_central_us - rules: - !reference [.on_security_agent_changes_or_manual] - stage: functional_test - needs: [ "tests_windows_secagent_x64" ] - variables: - KITCHEN_ARCH: x86_64 - KITCHEN_OSVERS: "win2016" - CHEF_VERSION: 14.12.9 # newer versions error out during kitchen setup of azure VM - before_script: - - export WINDOWS_DDPROCMON_DRIVER=$(inv release.get-release-json-value "$RELEASE_VERSION_7::WINDOWS_DDPROCMON_DRIVER") - - export WINDOWS_DDPROCMON_VERSION=$(inv release.get-release-json-value "$RELEASE_VERSION_7::WINDOWS_DDPROCMON_VERSION") - - export WINDOWS_DDPROCMON_SHASUM=$(inv release.get-release-json-value "$RELEASE_VERSION_7::WINDOWS_DDPROCMON_SHASUM") - - pushd $DD_AGENT_TESTING_DIR - - tasks/kitchen_setup.sh - script: - - tasks/run-test-kitchen.sh windows-secagent-test $AGENT_MAJOR_VERSION diff --git a/.gitlab/functional_test/system_probe_windows.yml b/.gitlab/functional_test/system_probe_windows.yml deleted file mode 100644 index a26fa66f27f3b..0000000000000 --- a/.gitlab/functional_test/system_probe_windows.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -# FIXME: our current Gitlab version doesn't support importing a file more than once -# For now, the workaround is to include "common" files once in the top-level .gitlab-ci.yml file -# See: https://gitlab.com/gitlab-org/gitlab/-/issues/28987 -# include: -# - .gitlab/kitchen_testing/testing.yml -# - .gitlab/functional_test/common.yml - -kitchen_test_system_probe_windows_x64: - extends: - - .kitchen_test_system_probe - - .kitchen_os_windows - - .kitchen_azure_location_north_central_us - stage: functional_test - needs: [ "tests_windows_sysprobe_x64" ] - variables: - KITCHEN_ARCH: x86_64 - KITCHEN_OSVERS: "win2016" - CHEF_VERSION: 14.12.9 # newer versions error out during kitchen setup of azure VM - before_script: - - export WINDOWS_DDNPM_DRIVER=$(inv release.get-release-json-value "$RELEASE_VERSION_7::WINDOWS_DDNPM_DRIVER") - - export WINDOWS_DDNPM_VERSION=$(inv release.get-release-json-value "$RELEASE_VERSION_7::WINDOWS_DDNPM_VERSION") - - export WINDOWS_DDNPM_SHASUM=$(inv release.get-release-json-value "$RELEASE_VERSION_7::WINDOWS_DDNPM_SHASUM") - - pushd $DD_AGENT_TESTING_DIR - - tasks/kitchen_setup.sh - script: - - tasks/run-test-kitchen.sh windows-sysprobe-test $AGENT_MAJOR_VERSION diff --git a/.gitlab/source_test/linux.yml b/.gitlab/source_test/linux.yml index 36cdcff390115..c4d57130833a5 100644 --- a/.gitlab/source_test/linux.yml +++ b/.gitlab/source_test/linux.yml @@ -282,3 +282,4 @@ new-e2e-unit-tests: KUBERNETES_MEMORY_REQUEST: 12Gi KUBERNETES_MEMORY_LIMIT: 16Gi KUBERNETES_CPU_REQUEST: 6 + timeout: 10m diff --git a/CHANGELOG-DCA.rst b/CHANGELOG-DCA.rst index 6c4acb405c730..7a45c65b38675 100644 --- a/CHANGELOG-DCA.rst +++ b/CHANGELOG-DCA.rst @@ -2,6 +2,32 @@ Release Notes ============= +.. _Release Notes_7.56.2: + +7.56.2 +====== + +.. _Release Notes_7.56.2_Prelude: + +Prelude +------- + +Released on: 2024-09-02 +Pinned to datadog-agent v7.56.2: `CHANGELOG `_. + +.. _Release Notes_7.56.1: + +7.56.1 +====== + +.. _Release Notes_7.56.1_Prelude: + +Prelude +------- + +Released on: 2024-08-29 +Pinned to datadog-agent v7.56.1: `CHANGELOG `_. + .. _Release Notes_7.56.0: 7.56.0 diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 075bf271c7b79..10ea12b5fa3ac 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,52 @@ Release Notes ============= +.. _Release Notes_7.56.2: + +7.56.2 +====== + +.. _Release Notes_7.56.2_Prelude: + +Prelude +------- + +Release on: 2024-09-02 + +- Please refer to the `7.56.2 tag on integrations-core `_ for the list of changes on the Core Checks + + +.. _Release Notes_7.56.2_Bug Fixes: + +Bug Fixes +--------- + +- Fix issue causing GUI to fail when opening with Internet Explorer on Windows. + + +.. _Release Notes_7.56.1: + +7.56.1 +====== + +.. _Release Notes_7.56.1_Prelude: + +Prelude +------- + +Release on: 2024-08-29 + +- Please refer to the `7.56.1 tag on integrations-core `_ for the list of changes on the Core Checks + + +.. _Release Notes_7.56.1_Bug Fixes: + +Bug Fixes +--------- + +- Fixed a nil pointer dereference issue in the `Tailer.DidRotate` function that was causing the Agent to panic. + + .. _Release Notes_7.56.0: 7.56.0 diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index 607e1ba6da4cd..79e35081214b1 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -792,7 +792,6 @@ core,github.com/containerd/containerd/pkg/transfer,Apache-2.0,"Copyright 2012-20 core,github.com/containerd/containerd/pkg/transfer/proxy,Apache-2.0,"Copyright 2012-2015 Docker, Inc." core,github.com/containerd/containerd/pkg/transfer/streaming,Apache-2.0,"Copyright 2012-2015 Docker, Inc." core,github.com/containerd/containerd/pkg/unpack,Apache-2.0,"Copyright 2012-2015 Docker, Inc." -core,github.com/containerd/containerd/pkg/userns,Apache-2.0,"Copyright 2012-2015 Docker, Inc." core,github.com/containerd/containerd/platforms,Apache-2.0,"Copyright 2012-2015 Docker, Inc." core,github.com/containerd/containerd/plugin,Apache-2.0,"Copyright 2012-2015 Docker, Inc." core,github.com/containerd/containerd/protobuf,Apache-2.0,"Copyright 2012-2015 Docker, Inc." @@ -1498,6 +1497,7 @@ core,github.com/moby/sys/mountinfo,Apache-2.0,Copyright (c) 2014-2018 The Docker core,github.com/moby/sys/sequential,Apache-2.0,Kir Kolyshkin |Sebastiaan van Stijn |Sebastiaan van Stijn |Tibor Vass |Brian Goff |John Howard |Victor Vieux |Michael Crosby |Daniel Nephin |Tianon Gravi |Vincent Batts |Akihiro Suda |Michael Crosby |Yong Tang |Kir Kolyshkin |Christopher Jones |Guillaume J. Charmes |Kato Kazuyoshi |Manu Gupta |Michael Crosby |Vincent Demeester |Aleksa Sarai |Amit Krishnan |Arnaud Porterie |Brian Goff |Brian Goff |Dan Walsh |Michael Crosby |Phil Estes |Shengjing Zhu |Solomon Hykes |Tobias Klauser |lalyos |unclejack |Akihiro Suda |Alexander Morozov |Jessica Frazelle |Jessica Frazelle |Jessie Frazelle |Justas Brazauskas |Justin Cormack |Kazuyoshi Kato |Naveed Jamil |Vincent Demeester |shuai-z |Ahmet Alp Balkan |Aleksa Sarai |Alexander Larsson |Alexander Morozov |Alexandr Morozov |Alexandr Morozov |Antonio Murdaca |Antonio Murdaca |Antonio Murdaca |Artem Khramov |Cezar Sa Espinola |Chen Hanxiao |Darren Stahl |David Calavera |Derek McGowan |Eng Zer Jun |Erik Dubbelboer |Fabian Kramm |Guillaume Dufour |Guillaume J. Charmes |Hajime Tazaki |Jamie Hannaford |Jason A. Donenfeld |Jhon Honce |Josh Soref |Kasper Fabæch Brandt |Kathryn Baldauf |Kenfe-Mickael Laventure |Kirill Kolyshkin |Muhammad Kaisar Arkhan |Oli |Olli Janatuinen |Paul Nasrat |Peter Bourgon |Peter Waller |Phil Estes |Samuel Karp |Stefan J. Wernli |Steven Hartland |Stig Larsson |Tim Wang |Victor Vieux |Victor Vieux |Yan Feng |jhowardmsft |liuxiaodong |phineas |unclejack |yuexiao-wang |谢致邦 (XIE Zhibang) core,github.com/moby/sys/signal,Apache-2.0,Copyright (c) 2014-2018 The Docker & Go Authors. All rights reserved. core,github.com/moby/sys/user,Apache-2.0,Copyright (c) 2014-2018 The Docker & Go Authors. All rights reserved. +core,github.com/moby/sys/userns,Apache-2.0,Copyright (c) 2014-2018 The Docker & Go Authors. All rights reserved. core,github.com/modern-go/concurrent,Apache-2.0,Copyright (c) 2018 Tao Wen core,github.com/modern-go/reflect2,Apache-2.0,Copyright (c) 2018 Tao Wen core,github.com/mohae/deepcopy,MIT,Copyright (c) 2014 Joel @@ -1896,6 +1896,11 @@ core,github.com/pjbgf/sha1cd/ubc,Apache-2.0,Copyright 2023 pjbgf core,github.com/pkg/browser,BSD-2-Clause,"Copyright (c) 2014, Dave Cheney " core,github.com/pkg/errors,BSD-2-Clause,"Copyright (c) 2015, Dave Cheney " core,github.com/planetscale/vtprotobuf/protohelpers,BSD-3-Clause,"Copyright (c) 2013, The GoGo Authors. All rights reserved | Copyright (c) 2018 The Go Authors. All rights reserved | Copyright (c) 2021, PlanetScale Inc. All rights reserved" +core,github.com/planetscale/vtprotobuf/types/known/anypb,BSD-3-Clause,"Copyright (c) 2013, The GoGo Authors. All rights reserved | Copyright (c) 2018 The Go Authors. All rights reserved | Copyright (c) 2021, PlanetScale Inc. All rights reserved" +core,github.com/planetscale/vtprotobuf/types/known/durationpb,BSD-3-Clause,"Copyright (c) 2013, The GoGo Authors. All rights reserved | Copyright (c) 2018 The Go Authors. All rights reserved | Copyright (c) 2021, PlanetScale Inc. All rights reserved" +core,github.com/planetscale/vtprotobuf/types/known/emptypb,BSD-3-Clause,"Copyright (c) 2013, The GoGo Authors. All rights reserved | Copyright (c) 2018 The Go Authors. All rights reserved | Copyright (c) 2021, PlanetScale Inc. All rights reserved" +core,github.com/planetscale/vtprotobuf/types/known/structpb,BSD-3-Clause,"Copyright (c) 2013, The GoGo Authors. All rights reserved | Copyright (c) 2018 The Go Authors. All rights reserved | Copyright (c) 2021, PlanetScale Inc. All rights reserved" +core,github.com/planetscale/vtprotobuf/types/known/wrapperspb,BSD-3-Clause,"Copyright (c) 2013, The GoGo Authors. All rights reserved | Copyright (c) 2018 The Go Authors. All rights reserved | Copyright (c) 2021, PlanetScale Inc. All rights reserved" core,github.com/pmezard/go-difflib/difflib,BSD-3-Clause,"Copyright (c) 2013, Patrick Mezard" core,github.com/power-devops/perfstat,MIT,Copyright (c) 2020 Power DevOps core,github.com/prometheus-community/pro-bing,MIT,Copyright 2016 Cameron Sparr and contributors | Copyright 2022 The Prometheus Authors @@ -2796,7 +2801,9 @@ core,google.golang.org/grpc/encoding/gzip,Apache-2.0,Copyright 2014 gRPC authors core,google.golang.org/grpc/encoding/proto,Apache-2.0,Copyright 2014 gRPC authors. core,google.golang.org/grpc/examples/helloworld/helloworld,Apache-2.0,Copyright 2014 gRPC authors. core,google.golang.org/grpc/examples/route_guide/routeguide,Apache-2.0,Copyright 2014 gRPC authors. +core,google.golang.org/grpc/experimental/stats,Apache-2.0,Copyright 2014 gRPC authors. core,google.golang.org/grpc/grpclog,Apache-2.0,Copyright 2014 gRPC authors. +core,google.golang.org/grpc/grpclog/internal,Apache-2.0,Copyright 2014 gRPC authors. core,google.golang.org/grpc/health/grpc_health_v1,Apache-2.0,Copyright 2014 gRPC authors. core,google.golang.org/grpc/internal,Apache-2.0,Copyright 2014 gRPC authors. core,google.golang.org/grpc/internal/backoff,Apache-2.0,Copyright 2014 gRPC authors. @@ -2820,12 +2827,14 @@ core,google.golang.org/grpc/internal/resolver/dns/internal,Apache-2.0,Copyright core,google.golang.org/grpc/internal/resolver/passthrough,Apache-2.0,Copyright 2014 gRPC authors. core,google.golang.org/grpc/internal/resolver/unix,Apache-2.0,Copyright 2014 gRPC authors. core,google.golang.org/grpc/internal/serviceconfig,Apache-2.0,Copyright 2014 gRPC authors. +core,google.golang.org/grpc/internal/stats,Apache-2.0,Copyright 2014 gRPC authors. core,google.golang.org/grpc/internal/status,Apache-2.0,Copyright 2014 gRPC authors. core,google.golang.org/grpc/internal/syscall,Apache-2.0,Copyright 2014 gRPC authors. core,google.golang.org/grpc/internal/transport,Apache-2.0,Copyright 2014 gRPC authors. core,google.golang.org/grpc/internal/transport/networktype,Apache-2.0,Copyright 2014 gRPC authors. core,google.golang.org/grpc/internal/xds,Apache-2.0,Copyright 2014 gRPC authors. core,google.golang.org/grpc/keepalive,Apache-2.0,Copyright 2014 gRPC authors. +core,google.golang.org/grpc/mem,Apache-2.0,Copyright 2014 gRPC authors. core,google.golang.org/grpc/metadata,Apache-2.0,Copyright 2014 gRPC authors. core,google.golang.org/grpc/peer,Apache-2.0,Copyright 2014 gRPC authors. core,google.golang.org/grpc/resolver,Apache-2.0,Copyright 2014 gRPC authors. diff --git a/cmd/agent/dist/conf.d/service_discovery.d/conf.yaml.default b/cmd/agent/dist/conf.d/service_discovery.d/conf.yaml.default new file mode 100644 index 0000000000000..00d9a2dbba2c8 --- /dev/null +++ b/cmd/agent/dist/conf.d/service_discovery.d/conf.yaml.default @@ -0,0 +1,2 @@ +instances: + - {} diff --git a/cmd/agent/subcommands/run/command.go b/cmd/agent/subcommands/run/command.go index 37a197d47fcc9..b26ba80c58a9a 100644 --- a/cmd/agent/subcommands/run/command.go +++ b/cmd/agent/subcommands/run/command.go @@ -338,16 +338,8 @@ func getSharedFxOption() fx.Option { )), core.Bundle(), lsof.Module(), - fx.Supply(dogstatsdServer.Params{ - Serverless: false, - }), - forwarder.BundleWithProvider(func(config config.Component, log log.Component) defaultforwarder.Params { - params := defaultforwarder.NewParams(config, log) - // Enable core agent specific features like persistence-to-disk - params.Options.EnabledFeatures = defaultforwarder.SetFeature(params.Options.EnabledFeatures, defaultforwarder.CoreFeatures) - return params - }), - + // Enable core agent specific features like persistence-to-disk + forwarder.Bundle(defaultforwarder.NewParams(defaultforwarder.WithFeatures(defaultforwarder.CoreFeatures))), // workloadmeta setup wmcatalog.GetCatalog(), workloadmetafx.Module(defaults.DefaultParams()), @@ -386,7 +378,7 @@ func getSharedFxOption() fx.Option { compressionimpl.Module(), demultiplexerimpl.Module(), demultiplexerendpointfx.Module(), - dogstatsd.Bundle(), + dogstatsd.Bundle(dogstatsdServer.Params{Serverless: false}), fx.Provide(func(logsagent optional.Option[logsAgent.Component]) optional.Option[logsagentpipeline.Component] { if la, ok := logsagent.Get(); ok { return optional.NewOption[logsagentpipeline.Component](la) diff --git a/cmd/agent/subcommands/run/internal/settings/runtime_settings_test.go b/cmd/agent/subcommands/run/internal/settings/runtime_settings_test.go index a710a4b259306..547bf4d4940b2 100644 --- a/cmd/agent/subcommands/run/internal/settings/runtime_settings_test.go +++ b/cmd/agent/subcommands/run/internal/settings/runtime_settings_test.go @@ -41,11 +41,8 @@ func TestDogstatsdMetricsStats(t *testing.T) { deps := fxutil.Test[testDeps](t, fx.Options( core.MockBundle(), fx.Supply(core.BundleParams{}), - fx.Supply(server.Params{ - Serverless: false, - }), demultiplexerimpl.MockModule(), - dogstatsd.Bundle(), + dogstatsd.Bundle(server.Params{Serverless: false}), defaultforwarder.MockModule(), workloadmetafxmock.MockModule(workloadmeta.NewParams()), )) diff --git a/cmd/agent/subcommands/snmp/command.go b/cmd/agent/subcommands/snmp/command.go index 53848d363e43a..b19402aec82f0 100644 --- a/cmd/agent/subcommands/snmp/command.go +++ b/cmd/agent/subcommands/snmp/command.go @@ -190,7 +190,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { LogParams: log.ForOneShot(command.LoggerName, "off", true)}), core.Bundle(), aggregator.Bundle(), - forwarder.BundleWithProvider(defaultforwarder.NewParams), + forwarder.Bundle(defaultforwarder.NewParams()), eventplatformimpl.Module(eventplatformimpl.NewDefaultParams()), eventplatformreceiverimpl.Module(), orchestratorimpl.Module(), diff --git a/cmd/cluster-agent-cloudfoundry/subcommands/run/command.go b/cmd/cluster-agent-cloudfoundry/subcommands/run/command.go index 9b8494e8781f7..b78a1c9223f7f 100644 --- a/cmd/cluster-agent-cloudfoundry/subcommands/run/command.go +++ b/cmd/cluster-agent-cloudfoundry/subcommands/run/command.go @@ -83,7 +83,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { LogParams: log.ForDaemon(command.LoggerName, "log_file", path.DefaultDCALogFile), }), core.Bundle(), - forwarder.BundleWithProvider(defaultforwarder.NewParamsWithResolvers), + forwarder.Bundle(defaultforwarder.NewParams(defaultforwarder.WithResolvers())), compressionimpl.Module(), demultiplexerimpl.Module(), orchestratorForwarderImpl.Module(), diff --git a/cmd/cluster-agent/subcommands/start/command.go b/cmd/cluster-agent/subcommands/start/command.go index 8d35057f9aa31..abc7704e601bc 100644 --- a/cmd/cluster-agent/subcommands/start/command.go +++ b/cmd/cluster-agent/subcommands/start/command.go @@ -134,11 +134,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { LogParams: log.ForDaemon(command.LoggerName, "log_file", path.DefaultDCALogFile), }), core.Bundle(), - forwarder.BundleWithProvider(func(config config.Component, log log.Component) defaultforwarder.Params { - params := defaultforwarder.NewParamsWithResolvers(config, log) - params.Options.DisableAPIKeyChecking = true - return params - }), + forwarder.Bundle(defaultforwarder.NewParams(defaultforwarder.WithResolvers(), defaultforwarder.WithDisableAPIKeyChecking())), compressionimpl.Module(), demultiplexerimpl.Module(), orchestratorForwarderImpl.Module(), diff --git a/cmd/dogstatsd/subcommands/start/command.go b/cmd/dogstatsd/subcommands/start/command.go index 42898c1f47236..103c2314d10d2 100644 --- a/cmd/dogstatsd/subcommands/start/command.go +++ b/cmd/dogstatsd/subcommands/start/command.go @@ -132,11 +132,8 @@ func RunDogstatsdFct(cliParams *CLIParams, defaultConfPath string, defaultLogFil fx.Supply(log.ForDaemon(string(loggerName), "log_file", params.DefaultLogFile)), config.Module(), logfx.Module(), - fx.Supply(dogstatsdServer.Params{ - Serverless: false, - }), - dogstatsd.Bundle(), - forwarder.BundleWithProvider(defaultforwarder.NewParams), + dogstatsd.Bundle(dogstatsdServer.Params{Serverless: false}), + forwarder.Bundle(defaultforwarder.NewParams()), // workloadmeta setup wmcatalog.GetCatalog(), workloadmetafx.ModuleWithProvider(func(config config.Component) workloadmeta.Params { diff --git a/cmd/installer/subcommands/daemon/status.tmpl b/cmd/installer/subcommands/daemon/status.tmpl index ba1d000c17bce..714ed431f3214 100644 --- a/cmd/installer/subcommands/daemon/status.tmpl +++ b/cmd/installer/subcommands/daemon/status.tmpl @@ -14,7 +14,9 @@ Datadog Installer v{{ htmlSafe .Version }} {{- end }} {{- if eq $name "datadog-apm-inject" }}{{ template "datadog-apm-inject" $.ApmInjectionStatus }}{{ end }} {{ end -}} - +{{- if .RemoteConfigState }} +{{ template "remote-config-state" $.RemoteConfigState }} +{{- end -}} {{- define "datadog-apm-inject" }} Instrumentation status: @@ -31,3 +33,25 @@ Datadog Installer v{{ htmlSafe .Version }} {{ redText "●" }} Docker: Not instrumented {{- end }} {{- end -}} + +{{- define "remote-config-state" }} + Remote configuration client state: + {{ range . }} + {{ boldText .Package }} + StableVersion: {{ .StableVersion }} + ExperimentVersion: {{ .ExperimentVersion }} + StableConfigVersion: {{ .StableConfigVersion }} + ExperimentConfigVersion: {{ .ExperimentConfigVersion }} + RemoteConfigVersion: {{ .RemoteConfigVersion }} + Task: + {{- if .Task }} + Id: {{ .Task.Id }} + State: {{ .Task.State }} + {{- if .Task.Error }} + Error: {{ .Task.Error }} + {{- end }} + {{- else }} + No task available + {{- end }} + {{ end }} +{{- end }} diff --git a/cmd/installer/user/user_darwin.go b/cmd/installer/user/user_darwin.go new file mode 100644 index 0000000000000..8701d984ce683 --- /dev/null +++ b/cmd/installer/user/user_darwin.go @@ -0,0 +1,26 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build darwin + +// Package user provides helpers to change the user of the process. +package user + +import "syscall" + +// IsRoot always returns true on darwin. +func IsRoot() bool { + return syscall.Getuid() == 0 +} + +// RootToDatadogAgent is a noop on darwin. +func RootToDatadogAgent() error { + return nil +} + +// DatadogAgentToRoot is a noop on darwin. +func DatadogAgentToRoot() error { + return nil +} diff --git a/cmd/installer/user/user.go b/cmd/installer/user/user_nix.go similarity index 98% rename from cmd/installer/user/user.go rename to cmd/installer/user/user_nix.go index 02c23d6ddb244..bf10f18a1248a 100644 --- a/cmd/installer/user/user.go +++ b/cmd/installer/user/user_nix.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build !windows +//go:build !windows && !darwin // Package user provides helpers to change the user of the process. package user diff --git a/cmd/otel-agent/config/agent_config.go b/cmd/otel-agent/config/agent_config.go index 21098e497d5ab..98695334c1b3f 100644 --- a/cmd/otel-agent/config/agent_config.go +++ b/cmd/otel-agent/config/agent_config.go @@ -25,6 +25,40 @@ import ( "go.opentelemetry.io/collector/service" ) +type logLevel int + +const ( + trace logLevel = iota - 1 + debug + info + warn + err + critical + off +) + +// datadog agent log levels: trace, debug, info, warn, error, critical, and off +// otel log levels: disabled, debug, info, warn, error +var logLevelMap = map[string]logLevel{ + "off": off, + "disabled": off, + "trace": trace, + "debug": debug, + "info": info, + "warn": warn, + "error": err, + "critical": critical, +} + +var logLevelReverseMap = func(src map[string]logLevel) map[logLevel]string { + reverse := map[logLevel]string{} + for k, v := range src { + reverse[v] = k + } + + return reverse +}(logLevelMap) + // NewConfigComponent creates a new config component from the given URIs func NewConfigComponent(ctx context.Context, ddCfg string, uris []string) (config.Component, error) { // Load the configuration from the fileName @@ -60,6 +94,13 @@ func NewConfigComponent(ctx context.Context, ddCfg string, uris []string) (confi apiKey := string(ddc.API.Key) // Set the global agent config pkgconfig := pkgconfigsetup.Datadog() + + pkgconfig.SetConfigName("OTel") + pkgconfig.SetEnvPrefix("DD") + pkgconfig.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) + pkgconfig.BindEnvAndSetDefault("log_level", "info") + + activeLogLevel := critical if len(ddCfg) != 0 { // if the configuration file path was supplied via CLI flags or env vars, // add that first so it's first in line @@ -73,49 +114,63 @@ func NewConfigComponent(ctx context.Context, ddCfg string, uris []string) (confi if err != nil { return nil, err } + var ok bool + activeLogLevel, ok = logLevelMap[pkgconfig.GetString("log_level")] + if !ok { + return nil, fmt.Errorf("invalid log level (%v) set in the Datadog Agent configuration", pkgconfig.GetString("log_level")) + } } - pkgconfig.SetConfigName("OTel") - pkgconfig.SetEnvPrefix("DD") - pkgconfig.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) + // Set the right log level. The most verbose setting takes precedence. + telemetryLogLevel := sc.Telemetry.Logs.Level + telemetryLogMapping, ok := logLevelMap[telemetryLogLevel.String()] + if !ok { + return nil, fmt.Errorf("invalid log level (%v) set in the OTel Telemetry configuration", telemetryLogLevel.String()) + } + if telemetryLogMapping < activeLogLevel { + activeLogLevel = telemetryLogMapping + } // Override config read (if any) with Default values pkgconfigsetup.InitConfig(pkgconfig) - pkgconfig.Set("api_key", apiKey, pkgconfigmodel.SourceLocalConfigProcess) - pkgconfig.Set("site", site, pkgconfigmodel.SourceLocalConfigProcess) + pkgconfigmodel.ApplyOverrideFuncs(pkgconfig) + + pkgconfig.Set("log_level", logLevelReverseMap[activeLogLevel], pkgconfigmodel.SourceFile) + + pkgconfig.Set("api_key", apiKey, pkgconfigmodel.SourceFile) + pkgconfig.Set("site", site, pkgconfigmodel.SourceFile) - pkgconfig.Set("dd_url", ddc.Metrics.Endpoint, pkgconfigmodel.SourceLocalConfigProcess) + pkgconfig.Set("dd_url", ddc.Metrics.Endpoint, pkgconfigmodel.SourceFile) // Log configs - pkgconfig.Set("logs_enabled", true, pkgconfigmodel.SourceLocalConfigProcess) - pkgconfig.Set("logs_config.force_use_http", true, pkgconfigmodel.SourceLocalConfigProcess) - pkgconfig.Set("logs_config.logs_dd_url", ddc.Logs.Endpoint, pkgconfigmodel.SourceLocalConfigProcess) - pkgconfig.Set("logs_config.batch_wait", ddc.Logs.BatchWait, pkgconfigmodel.SourceLocalConfigProcess) - pkgconfig.Set("logs_config.use_compression", ddc.Logs.UseCompression, pkgconfigmodel.SourceLocalConfigProcess) - pkgconfig.Set("logs_config.compression_level", ddc.Logs.CompressionLevel, pkgconfigmodel.SourceLocalConfigProcess) - pkgconfig.Set("log_level", sc.Telemetry.Logs.Level, pkgconfigmodel.SourceLocalConfigProcess) + pkgconfig.Set("logs_enabled", true, pkgconfigmodel.SourceDefault) + pkgconfig.Set("logs_config.force_use_http", true, pkgconfigmodel.SourceDefault) + pkgconfig.Set("logs_config.logs_dd_url", ddc.Logs.Endpoint, pkgconfigmodel.SourceFile) + pkgconfig.Set("logs_config.batch_wait", ddc.Logs.BatchWait, pkgconfigmodel.SourceFile) + pkgconfig.Set("logs_config.use_compression", ddc.Logs.UseCompression, pkgconfigmodel.SourceFile) + pkgconfig.Set("logs_config.compression_level", ddc.Logs.CompressionLevel, pkgconfigmodel.SourceFile) // APM & OTel trace configs - pkgconfig.Set("apm_config.enabled", true, pkgconfigmodel.SourceLocalConfigProcess) - pkgconfig.Set("apm_config.apm_non_local_traffic", true, pkgconfigmodel.SourceLocalConfigProcess) + pkgconfig.Set("apm_config.enabled", true, pkgconfigmodel.SourceDefault) + pkgconfig.Set("apm_config.apm_non_local_traffic", true, pkgconfigmodel.SourceDefault) - pkgconfig.Set("apm_config.debug.port", 0, pkgconfigmodel.SourceLocalConfigProcess) // Disabled in the otel-agent - pkgconfig.Set(pkgconfigsetup.OTLPTracePort, 0, pkgconfigmodel.SourceLocalConfigProcess) // Disabled in the otel-agent + pkgconfig.Set("apm_config.debug.port", 0, pkgconfigmodel.SourceDefault) // Disabled in the otel-agent + pkgconfig.Set(pkgconfigsetup.OTLPTracePort, 0, pkgconfigmodel.SourceDefault) // Disabled in the otel-agent - pkgconfig.Set("otlp_config.traces.span_name_as_resource_name", ddc.Traces.SpanNameAsResourceName, pkgconfigmodel.SourceLocalConfigProcess) - pkgconfig.Set("otlp_config.traces.span_name_remappings", ddc.Traces.SpanNameRemappings, pkgconfigmodel.SourceLocalConfigProcess) + pkgconfig.Set("otlp_config.traces.span_name_as_resource_name", ddc.Traces.SpanNameAsResourceName, pkgconfigmodel.SourceFile) + pkgconfig.Set("otlp_config.traces.span_name_remappings", ddc.Traces.SpanNameRemappings, pkgconfigmodel.SourceFile) - pkgconfig.Set("apm_config.receiver_enabled", false, pkgconfigmodel.SourceLocalConfigProcess) // disable HTTP receiver - pkgconfig.Set("apm_config.ignore_resources", ddc.Traces.IgnoreResources, pkgconfigmodel.SourceLocalConfigProcess) - pkgconfig.Set("apm_config.skip_ssl_validation", ddc.ClientConfig.TLSSetting.InsecureSkipVerify, pkgconfigmodel.SourceLocalConfigProcess) + pkgconfig.Set("apm_config.receiver_enabled", false, pkgconfigmodel.SourceDefault) // disable HTTP receiver + pkgconfig.Set("apm_config.ignore_resources", ddc.Traces.IgnoreResources, pkgconfigmodel.SourceFile) + pkgconfig.Set("apm_config.skip_ssl_validation", ddc.ClientConfig.TLSSetting.InsecureSkipVerify, pkgconfigmodel.SourceFile) if v := ddc.Traces.TraceBuffer; v > 0 { - pkgconfig.Set("apm_config.trace_buffer", v, pkgconfigmodel.SourceLocalConfigProcess) + pkgconfig.Set("apm_config.trace_buffer", v, pkgconfigmodel.SourceFile) } if addr := ddc.Traces.Endpoint; addr != "" { - pkgconfig.Set("apm_config.apm_dd_url", addr, pkgconfigmodel.SourceLocalConfigProcess) + pkgconfig.Set("apm_config.apm_dd_url", addr, pkgconfigmodel.SourceFile) } if ddc.Traces.ComputeTopLevelBySpanKind { - pkgconfig.Set("apm_config.features", []string{"enable_otlp_compute_top_level_by_span_kind"}, pkgconfigmodel.SourceLocalConfigProcess) + pkgconfig.Set("apm_config.features", []string{"enable_otlp_compute_top_level_by_span_kind"}, pkgconfigmodel.SourceFile) } return pkgconfig, nil diff --git a/cmd/otel-agent/config/agent_config_test.go b/cmd/otel-agent/config/agent_config_test.go index 8e7c54726850d..c985c24859da2 100644 --- a/cmd/otel-agent/config/agent_config_test.go +++ b/cmd/otel-agent/config/agent_config_test.go @@ -7,12 +7,28 @@ package config import ( "context" + "os" + "strings" "testing" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" ) -func TestAgentConfig(t *testing.T) { +type ConfigTestSuite struct { + suite.Suite +} + +func (suite *ConfigTestSuite) SetupTest() { + datadog := pkgconfigmodel.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) + pkgconfigsetup.SetDatadog(datadog) +} + +func (suite *ConfigTestSuite) TestAgentConfig() { + t := suite.T() fileName := "testdata/config.yaml" c, err := NewConfigComponent(context.Background(), "", []string{fileName}) if err != nil { @@ -37,7 +53,8 @@ func TestAgentConfig(t *testing.T) { assert.Equal(t, nil, c.Get("apm_config.features")) } -func TestAgentConfigDefaults(t *testing.T) { +func (suite *ConfigTestSuite) TestAgentConfigDefaults() { + t := suite.T() fileName := "testdata/config_default.yaml" c, err := NewConfigComponent(context.Background(), "", []string{fileName}) if err != nil { @@ -58,14 +75,131 @@ func TestAgentConfigDefaults(t *testing.T) { assert.Equal(t, []string{"enable_otlp_compute_top_level_by_span_kind"}, c.Get("apm_config.features")) } -func TestNoDDExporter(t *testing.T) { +func (suite *ConfigTestSuite) TestAgentConfigWithDatadogYamlDefaults() { + t := suite.T() + fileName := "testdata/config_default.yaml" + ddFileName := "testdata/datadog.yaml" + c, err := NewConfigComponent(context.Background(), ddFileName, []string{fileName}) + if err != nil { + t.Errorf("Failed to load agent config: %v", err) + } + + // all expected defaults + assert.Equal(t, "DATADOG_API_KEY", c.Get("api_key")) + assert.Equal(t, "datadoghq.com", c.Get("site")) + assert.Equal(t, "https://api.datadoghq.com", c.Get("dd_url")) + assert.Equal(t, true, c.Get("logs_enabled")) + assert.Equal(t, "https://agent-http-intake.logs.datadoghq.com", c.Get("logs_config.logs_dd_url")) + assert.Equal(t, 5, c.Get("logs_config.batch_wait")) + assert.Equal(t, true, c.Get("logs_config.use_compression")) + assert.Equal(t, true, c.Get("logs_config.force_use_http")) + assert.Equal(t, 6, c.Get("logs_config.compression_level")) + assert.Equal(t, "https://trace.agent.datadoghq.com", c.Get("apm_config.apm_dd_url")) + assert.Equal(t, false, c.Get("apm_config.receiver_enabled")) + assert.Equal(t, true, c.Get("otlp_config.traces.span_name_as_resource_name")) + assert.Equal(t, []string{"enable_otlp_compute_top_level_by_span_kind"}, c.Get("apm_config.features")) + + // log_level from datadog.yaml takes precedence -> more verbose + assert.Equal(t, "debug", c.Get("log_level")) +} + +func (suite *ConfigTestSuite) TestAgentConfigWithDatadogYamlKeysAvailable() { + t := suite.T() + fileName := "testdata/config_default.yaml" + ddFileName := "testdata/datadog.yaml" + c, err := NewConfigComponent(context.Background(), ddFileName, []string{fileName}) + if err != nil { + t.Errorf("Failed to load agent config: %v", err) + } + + // log_level from datadog.yaml takes precedence -> more verbose + assert.Equal(t, "debug", c.Get("log_level")) + assert.True(t, c.GetBool("otelcollector.enabled")) + assert.Equal(t, "https://localhost:7777", c.GetString("otelcollector.extension_url")) + assert.Equal(t, 5009, c.GetInt("agent_ipc.port")) + assert.Equal(t, 60, c.GetInt("agent_ipc.config_refresh_interval")) +} + +func (suite *ConfigTestSuite) TestLogLevelPrecedence() { + t := suite.T() + fileName := "testdata/config_default.yaml" + ddFileName := "testdata/datadog_low_log_level.yaml" + c, err := NewConfigComponent(context.Background(), ddFileName, []string{fileName}) + if err != nil { + t.Errorf("Failed to load agent config: %v", err) + } + + // log_level from service config takes precedence -> more verbose + // ddFlleName configures level warn, Telemetry defaults to info + assert.Equal(t, "info", c.Get("log_level")) +} + +func (suite *ConfigTestSuite) TestEnvLogLevelPrecedence() { + t := suite.T() + oldval, exists := os.LookupEnv("DD_LOG_LEVEL") + os.Setenv("DD_LOG_LEVEL", "debug") + defer func() { + if !exists { + os.Unsetenv("DD_LOG_LEVEL") + } else { + os.Setenv("DD_LOG_LEVEL", oldval) + } + }() + fileName := "testdata/config_default.yaml" + ddFileName := "testdata/datadog_low_log_level.yaml" + c, err := NewConfigComponent(context.Background(), ddFileName, []string{fileName}) + if err != nil { + t.Errorf("Failed to load agent config: %v", err) + } + + // log_level from service config takes precedence -> more verbose + // ddFlleName configures level warn, Telemetry defaults to info, env sets debug + assert.Equal(t, "debug", c.Get("log_level")) +} + +func (suite *ConfigTestSuite) TestEnvBadLogLevel() { + t := suite.T() + oldval, exists := os.LookupEnv("DD_LOG_LEVEL") + os.Setenv("DD_LOG_LEVEL", "yabadabadooo") + defer func() { + if !exists { + os.Unsetenv("DD_LOG_LEVEL") + } else { + os.Setenv("DD_LOG_LEVEL", oldval) + } + }() + fileName := "testdata/config_default.yaml" + ddFileName := "testdata/datadog_low_log_level.yaml" + _, err := NewConfigComponent(context.Background(), ddFileName, []string{fileName}) + assert.Error(t, err) +} + +func (suite *ConfigTestSuite) TestBadLogLevel() { + t := suite.T() + fileName := "testdata/config_default.yaml" + ddFileName := "testdata/datadog_bad_log_level.yaml" + _, err := NewConfigComponent(context.Background(), ddFileName, []string{fileName}) + + // log_level from service config takes precedence -> more verbose + // ddFlleName configures level warn, Telemetry defaults to info + assert.Error(t, err) +} + +func (suite *ConfigTestSuite) TestNoDDExporter() { + t := suite.T() fileName := "testdata/config_no_dd_exporter.yaml" _, err := NewConfigComponent(context.Background(), "", []string{fileName}) assert.EqualError(t, err, "no datadog exporter found") } -func TestMultipleDDExporters(t *testing.T) { +func (suite *ConfigTestSuite) TestMultipleDDExporters() { + t := suite.T() fileName := "testdata/config_multiple_dd_exporters.yaml" _, err := NewConfigComponent(context.Background(), "", []string{fileName}) assert.EqualError(t, err, "multiple datadog exporters found") } + +// TestSuite runs the CalculatorTestSuite +func TestSuite(t *testing.T) { + suite.Run(t, new(ConfigTestSuite)) +} diff --git a/cmd/otel-agent/config/testdata/datadog.yaml b/cmd/otel-agent/config/testdata/datadog.yaml new file mode 100644 index 0000000000000..a20be007070d6 --- /dev/null +++ b/cmd/otel-agent/config/testdata/datadog.yaml @@ -0,0 +1,12 @@ +api_key: deadbeef + +log_level: debug + +otelcollector: + enabled: true + extension_url: "https://localhost:7777" + +agent_ipc: + port: 5009 + config_refresh_interval: 60 + diff --git a/cmd/otel-agent/config/testdata/datadog_low_log_level.yaml b/cmd/otel-agent/config/testdata/datadog_low_log_level.yaml new file mode 100644 index 0000000000000..a52070a0dbe24 --- /dev/null +++ b/cmd/otel-agent/config/testdata/datadog_low_log_level.yaml @@ -0,0 +1,10 @@ +log_level: warn + +otelcollector: + enabled: true + extension_url: "https://localhost:7777" + +agent_ipc: + port: 5009 + config_refresh_interval: 60 + diff --git a/cmd/otel-agent/subcommands/run/command.go b/cmd/otel-agent/subcommands/run/command.go index 5fef050e1945d..2373bc4453363 100644 --- a/cmd/otel-agent/subcommands/run/command.go +++ b/cmd/otel-agent/subcommands/run/command.go @@ -31,7 +31,6 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" workloadmetafx "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx" "github.com/DataDog/datadog-agent/comp/dogstatsd/statsd" - "github.com/DataDog/datadog-agent/comp/forwarder" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" "github.com/DataDog/datadog-agent/comp/forwarder/orchestrator/orchestratorinterface" "github.com/DataDog/datadog-agent/comp/metadata/inventoryagent/inventoryagentimpl" @@ -94,7 +93,7 @@ func (o *orchestratorinterfaceimpl) Reset() { func runOTelAgentCommand(ctx context.Context, params *subcommands.GlobalParams, opts ...fx.Option) error { err := fxutil.Run( - forwarder.BundleWithProvider(newForwarderParams), + ForwarderBundle(), logtracefx.Module(), inventoryagentimpl.Module(), fx.Supply(metricsclient.NewStatsdClientWrapper(&ddgostatsd.NoOpClient{})), @@ -150,13 +149,6 @@ func runOTelAgentCommand(ctx context.Context, params *subcommands.GlobalParams, return hn, nil }), - fx.Provide(func(c coreconfig.Component, l log.Component) forwarderDeps { - return forwarderDeps{ - config: c, - log: l, - } - }), - fx.Provide(func(c defaultforwarder.Component) (defaultforwarder.Forwarder, error) { return defaultforwarder.Forwarder(c), nil }), @@ -207,16 +199,15 @@ func runOTelAgentCommand(ctx context.Context, params *subcommands.GlobalParams, return nil } +// ForwarderBundle returns the fx.Option for the forwarder bundle. // TODO: cleanup the forwarder instantiation with fx. // This is a bit of a hack because we need to enforce optional.Option[configsync.Component] // is passed to newForwarder to enforce the correct instantiation order. Currently, the // new forwarder.BundleWithProvider makes a few assumptions in its generic prototype, and // this is the current workaround to leverage it. -type forwarderDeps struct { - config coreconfig.Component - log log.Component -} - -func newForwarderParams(f forwarderDeps, _ optional.Option[configsync.Component]) defaultforwarder.Params { - return defaultforwarder.NewParams(f.config, f.log) +func ForwarderBundle() fx.Option { + return defaultforwarder.ModulWithOptionTMP( + fx.Provide(func(_ optional.Option[configsync.Component]) defaultforwarder.Params { + return defaultforwarder.NewParams() + })) } diff --git a/cmd/security-agent/subcommands/runtime/command_test.go b/cmd/security-agent/subcommands/runtime/command_test.go index e9ce8ae681cd4..d769d0b0dcc60 100644 --- a/cmd/security-agent/subcommands/runtime/command_test.go +++ b/cmd/security-agent/subcommands/runtime/command_test.go @@ -9,7 +9,6 @@ package runtime import ( "bytes" - "math" "testing" secagent "github.com/DataDog/datadog-agent/pkg/security/agent" @@ -58,13 +57,11 @@ func newMockRSClient(t *testing.T) secagent.SecurityModuleClientWrapper { { EventType: "exec", Mode: 1, - Flags: math.MaxUint8, Approvers: nil, }, { EventType: "open", Mode: 2, - Flags: math.MaxUint8, Approvers: &api.Approvers{ ApproverDetails: []*api.ApproverDetails{ { @@ -114,20 +111,10 @@ func Test_checkPoliciesLoaded(t *testing.T) { "Policies": { "exec": { "Mode": "accept", - "Flags": [ - "basename", - "flags", - "mode" - ], "Approvers": null }, "open": { "Mode": "deny", - "Flags": [ - "basename", - "flags", - "mode" - ], "Approvers": { "open.file.path": [ { diff --git a/cmd/serverless-init/log/log.go b/cmd/serverless-init/log/log.go index 2505a76aa68f8..1e1d42eb0ce87 100644 --- a/cmd/serverless-init/log/log.go +++ b/cmd/serverless-init/log/log.go @@ -9,13 +9,14 @@ package log import ( - "github.com/DataDog/datadog-agent/pkg/logs/sources" "os" "strings" "time" + "github.com/DataDog/datadog-agent/comp/core/tagger" logsAgent "github.com/DataDog/datadog-agent/comp/logs/agent" logConfig "github.com/DataDog/datadog-agent/comp/logs/agent/config" + "github.com/DataDog/datadog-agent/pkg/logs/sources" serverlessLogs "github.com/DataDog/datadog-agent/pkg/serverless/logs" serverlessTag "github.com/DataDog/datadog-agent/pkg/serverless/tags" ) @@ -52,8 +53,8 @@ func CreateConfig(origin string) *Config { } // SetupLogAgent creates the log agent and sets the base tags -func SetupLogAgent(conf *Config, tags map[string]string) logsAgent.ServerlessLogsAgent { - logsAgent, _ := serverlessLogs.SetupLogAgent(conf.Channel, sourceName, conf.source) +func SetupLogAgent(conf *Config, tags map[string]string, tagger tagger.Component) logsAgent.ServerlessLogsAgent { + logsAgent, _ := serverlessLogs.SetupLogAgent(conf.Channel, sourceName, conf.source, tagger) tagsArray := serverlessTag.MapToArray(tags) diff --git a/cmd/serverless-init/main.go b/cmd/serverless-init/main.go index 00516dcef3466..82493f7f9b5a6 100644 --- a/cmd/serverless-init/main.go +++ b/cmd/serverless-init/main.go @@ -96,8 +96,8 @@ func main() { } // removing these unused dependencies will cause silent crash due to fx framework -func run(_ secrets.Component, _ autodiscovery.Component, _ healthprobeDef.Component) error { - cloudService, logConfig, traceAgent, metricAgent, logsAgent := setup(modeConf) +func run(_ secrets.Component, _ autodiscovery.Component, _ healthprobeDef.Component, tagger tagger.Component) error { + cloudService, logConfig, traceAgent, metricAgent, logsAgent := setup(modeConf, tagger) err := modeConf.Runner(logConfig) @@ -107,7 +107,7 @@ func run(_ secrets.Component, _ autodiscovery.Component, _ healthprobeDef.Compon return err } -func setup(mode.Conf) (cloudservice.CloudService, *serverlessInitLog.Config, trace.ServerlessTraceAgent, *metrics.ServerlessMetricAgent, logsAgent.ServerlessLogsAgent) { +func setup(_ mode.Conf, tagger tagger.Component) (cloudservice.CloudService, *serverlessInitLog.Config, trace.ServerlessTraceAgent, *metrics.ServerlessMetricAgent, logsAgent.ServerlessLogsAgent) { tracelog.SetLogger(corelogger{}) // load proxy settings @@ -138,7 +138,7 @@ func setup(mode.Conf) (cloudservice.CloudService, *serverlessInitLog.Config, tra if err != nil { log.Debugf("Error loading config: %v\n", err) } - logsAgent := serverlessInitLog.SetupLogAgent(agentLogConfig, tags) + logsAgent := serverlessInitLog.SetupLogAgent(agentLogConfig, tags, tagger) traceAgent := setupTraceAgent(tags) diff --git a/cmd/serverless-init/main_test.go b/cmd/serverless-init/main_test.go index 1c3a758f685ee..4d0c534ce328d 100644 --- a/cmd/serverless-init/main_test.go +++ b/cmd/serverless-init/main_test.go @@ -17,6 +17,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/DataDog/datadog-agent/cmd/serverless-init/mode" + "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl" "github.com/DataDog/datadog-agent/comp/logs/agent/agentimpl" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/serverless/logs" @@ -27,6 +28,9 @@ func TestTagsSetup(t *testing.T) { // TODO: Fix and re-enable flaky test t.Skip() + fakeTagger := taggerimpl.SetupFakeTagger(t) + defer fakeTagger.ResetTagger() + configmock.New(t) ddTagsEnv := "key1:value1 key2:value2 key3:value3:4" @@ -38,7 +42,7 @@ func TestTagsSetup(t *testing.T) { allTags := append(ddTags, ddExtraTags...) - _, _, traceAgent, metricAgent, _ := setup(mode.Conf{}) + _, _, traceAgent, metricAgent, _ := setup(mode.Conf{}, fakeTagger) defer traceAgent.Stop() defer metricAgent.Stop() assert.Subset(t, metricAgent.GetExtraTags(), allTags) diff --git a/cmd/serverless/dependencies_linux_amd64.txt b/cmd/serverless/dependencies_linux_amd64.txt index 3a31177623a64..6059520efe6e5 100644 --- a/cmd/serverless/dependencies_linux_amd64.txt +++ b/cmd/serverless/dependencies_linux_amd64.txt @@ -77,6 +77,7 @@ github.com/DataDog/datadog-agent/comp/core/log/impl github.com/DataDog/datadog-agent/comp/core/secrets github.com/DataDog/datadog-agent/comp/core/status github.com/DataDog/datadog-agent/comp/core/tagger +github.com/DataDog/datadog-agent/comp/core/tagger/noopimpl github.com/DataDog/datadog-agent/comp/core/tagger/telemetry github.com/DataDog/datadog-agent/comp/core/tagger/types github.com/DataDog/datadog-agent/comp/core/tagger/utils @@ -260,6 +261,7 @@ github.com/DataDog/datadog-agent/pkg/util/cloudproviders/gce github.com/DataDog/datadog-agent/pkg/util/clusteragent github.com/DataDog/datadog-agent/pkg/util/common github.com/DataDog/datadog-agent/pkg/util/containers +github.com/DataDog/datadog-agent/pkg/util/containers/image github.com/DataDog/datadog-agent/pkg/util/containers/metrics/provider github.com/DataDog/datadog-agent/pkg/util/dmi github.com/DataDog/datadog-agent/pkg/util/docker @@ -291,6 +293,7 @@ github.com/DataDog/datadog-agent/pkg/util/statstracker github.com/DataDog/datadog-agent/pkg/util/sync github.com/DataDog/datadog-agent/pkg/util/system github.com/DataDog/datadog-agent/pkg/util/system/socket +github.com/DataDog/datadog-agent/pkg/util/tagger github.com/DataDog/datadog-agent/pkg/util/tmplvar github.com/DataDog/datadog-agent/pkg/version github.com/DataDog/datadog-api-client-go/v2 @@ -844,7 +847,9 @@ google.golang.org/grpc/credentials/insecure google.golang.org/grpc/encoding google.golang.org/grpc/encoding/gzip google.golang.org/grpc/encoding/proto +google.golang.org/grpc/experimental/stats google.golang.org/grpc/grpclog +google.golang.org/grpc/grpclog/internal google.golang.org/grpc/health/grpc_health_v1 google.golang.org/grpc/internal google.golang.org/grpc/internal/backoff @@ -867,11 +872,13 @@ google.golang.org/grpc/internal/resolver/dns/internal google.golang.org/grpc/internal/resolver/passthrough google.golang.org/grpc/internal/resolver/unix google.golang.org/grpc/internal/serviceconfig +google.golang.org/grpc/internal/stats google.golang.org/grpc/internal/status google.golang.org/grpc/internal/syscall google.golang.org/grpc/internal/transport google.golang.org/grpc/internal/transport/networktype google.golang.org/grpc/keepalive +google.golang.org/grpc/mem google.golang.org/grpc/metadata google.golang.org/grpc/peer google.golang.org/grpc/resolver diff --git a/cmd/serverless/dependencies_linux_arm64.txt b/cmd/serverless/dependencies_linux_arm64.txt index 8cb3beccdeee6..f5373b770a685 100644 --- a/cmd/serverless/dependencies_linux_arm64.txt +++ b/cmd/serverless/dependencies_linux_arm64.txt @@ -77,6 +77,7 @@ github.com/DataDog/datadog-agent/comp/core/log/impl github.com/DataDog/datadog-agent/comp/core/secrets github.com/DataDog/datadog-agent/comp/core/status github.com/DataDog/datadog-agent/comp/core/tagger +github.com/DataDog/datadog-agent/comp/core/tagger/noopimpl github.com/DataDog/datadog-agent/comp/core/tagger/telemetry github.com/DataDog/datadog-agent/comp/core/tagger/types github.com/DataDog/datadog-agent/comp/core/tagger/utils @@ -260,6 +261,7 @@ github.com/DataDog/datadog-agent/pkg/util/cloudproviders/gce github.com/DataDog/datadog-agent/pkg/util/clusteragent github.com/DataDog/datadog-agent/pkg/util/common github.com/DataDog/datadog-agent/pkg/util/containers +github.com/DataDog/datadog-agent/pkg/util/containers/image github.com/DataDog/datadog-agent/pkg/util/containers/metrics/provider github.com/DataDog/datadog-agent/pkg/util/dmi github.com/DataDog/datadog-agent/pkg/util/docker @@ -291,6 +293,7 @@ github.com/DataDog/datadog-agent/pkg/util/statstracker github.com/DataDog/datadog-agent/pkg/util/sync github.com/DataDog/datadog-agent/pkg/util/system github.com/DataDog/datadog-agent/pkg/util/system/socket +github.com/DataDog/datadog-agent/pkg/util/tagger github.com/DataDog/datadog-agent/pkg/util/tmplvar github.com/DataDog/datadog-agent/pkg/version github.com/DataDog/datadog-api-client-go/v2 @@ -843,7 +846,9 @@ google.golang.org/grpc/credentials/insecure google.golang.org/grpc/encoding google.golang.org/grpc/encoding/gzip google.golang.org/grpc/encoding/proto +google.golang.org/grpc/experimental/stats google.golang.org/grpc/grpclog +google.golang.org/grpc/grpclog/internal google.golang.org/grpc/health/grpc_health_v1 google.golang.org/grpc/internal google.golang.org/grpc/internal/backoff @@ -866,11 +871,13 @@ google.golang.org/grpc/internal/resolver/dns/internal google.golang.org/grpc/internal/resolver/passthrough google.golang.org/grpc/internal/resolver/unix google.golang.org/grpc/internal/serviceconfig +google.golang.org/grpc/internal/stats google.golang.org/grpc/internal/status google.golang.org/grpc/internal/syscall google.golang.org/grpc/internal/transport google.golang.org/grpc/internal/transport/networktype google.golang.org/grpc/keepalive +google.golang.org/grpc/mem google.golang.org/grpc/metadata google.golang.org/grpc/peer google.golang.org/grpc/resolver diff --git a/cmd/serverless/main.go b/cmd/serverless/main.go index c1be0ce2f8edb..122368a28d675 100644 --- a/cmd/serverless/main.go +++ b/cmd/serverless/main.go @@ -15,6 +15,8 @@ import ( "syscall" "time" + "github.com/DataDog/datadog-agent/comp/core/tagger" + taggernoop "github.com/DataDog/datadog-agent/comp/core/tagger/noopimpl" logConfig "github.com/DataDog/datadog-agent/comp/logs/agent/config" "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/model" @@ -69,7 +71,10 @@ const ( func main() { // run the agent - err := fxutil.OneShot(runAgent) + err := fxutil.OneShot( + runAgent, + taggernoop.Module(), + ) if err != nil { log.Error(err) @@ -77,7 +82,7 @@ func main() { } } -func runAgent() { +func runAgent(tagger tagger.Component) { startTime := time.Now() setupLambdaAgentOverrides() @@ -113,7 +118,7 @@ func runAgent() { go startTraceAgent(&wg, lambdaSpanChan, coldStartSpanId, serverlessDaemon) go startOtlpAgent(&wg, metricAgent, serverlessDaemon) - go startTelemetryCollection(&wg, serverlessID, logChannel, serverlessDaemon) + go startTelemetryCollection(&wg, serverlessID, logChannel, serverlessDaemon, tagger) // start appsec appsecProxyProcessor := startAppSec(serverlessDaemon) @@ -284,7 +289,7 @@ func startAppSec(serverlessDaemon *daemon.Daemon) *httpsec.ProxyLifecycleProcess return appsecProxyProcessor } -func startTelemetryCollection(wg *sync.WaitGroup, serverlessID registration.ID, logChannel chan *logConfig.ChannelMessage, serverlessDaemon *daemon.Daemon) { +func startTelemetryCollection(wg *sync.WaitGroup, serverlessID registration.ID, logChannel chan *logConfig.ChannelMessage, serverlessDaemon *daemon.Daemon, tagger tagger.Component) { defer wg.Done() if os.Getenv(daemon.LocalTestEnvVar) == "true" || os.Getenv(daemon.LocalTestEnvVar) == "1" { log.Debug("Running in local test mode. Telemetry collection HTTP route won't be enabled") @@ -308,7 +313,7 @@ func startTelemetryCollection(wg *sync.WaitGroup, serverlessID registration.ID, if logRegistrationError != nil { log.Error("Can't subscribe to logs:", logRegistrationError) } else { - logsAgent, err := serverlessLogs.SetupLogAgent(logChannel, "AWS Logs", "lambda") + logsAgent, err := serverlessLogs.SetupLogAgent(logChannel, "AWS Logs", "lambda", tagger) if err != nil { log.Errorf("Error setting up the logs agent: %s", err) } diff --git a/cmd/system-probe/api/module/common.go b/cmd/system-probe/api/module/common.go index 951d9d63bd8f8..0e1b156553fd7 100644 --- a/cmd/system-probe/api/module/common.go +++ b/cmd/system-probe/api/module/common.go @@ -8,6 +8,10 @@ package module import ( "errors" + + "github.com/DataDog/datadog-agent/comp/core/telemetry" + workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" + "go.uber.org/fx" ) // ErrNotEnabled is a special error type that should be returned by a Factory @@ -20,3 +24,11 @@ type Module interface { Register(*Router) error Close() } + +// FactoryDependencies defines the fx dependencies for a module factory +type FactoryDependencies struct { + fx.In + + WMeta workloadmeta.Component + Telemetry telemetry.Component +} diff --git a/cmd/system-probe/api/module/factory_linux.go b/cmd/system-probe/api/module/factory_linux.go index b70e452414f26..656a9536b9105 100644 --- a/cmd/system-probe/api/module/factory_linux.go +++ b/cmd/system-probe/api/module/factory_linux.go @@ -9,14 +9,12 @@ package module import ( sysconfigtypes "github.com/DataDog/datadog-agent/cmd/system-probe/config/types" - "github.com/DataDog/datadog-agent/comp/core/telemetry" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" ) // Factory encapsulates the initialization of a Module type Factory struct { Name sysconfigtypes.ModuleName ConfigNamespaces []string - Fn func(cfg *sysconfigtypes.Config, wmeta workloadmeta.Component, telemetry telemetry.Component) (Module, error) + Fn func(cfg *sysconfigtypes.Config, deps FactoryDependencies) (Module, error) NeedsEBPF func() bool } diff --git a/cmd/system-probe/api/module/factory_others.go b/cmd/system-probe/api/module/factory_others.go index 5f5f43f321147..c8c0bae1de724 100644 --- a/cmd/system-probe/api/module/factory_others.go +++ b/cmd/system-probe/api/module/factory_others.go @@ -9,13 +9,11 @@ package module import ( sysconfigtypes "github.com/DataDog/datadog-agent/cmd/system-probe/config/types" - "github.com/DataDog/datadog-agent/comp/core/telemetry" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" ) // Factory encapsulates the initialization of a Module type Factory struct { Name sysconfigtypes.ModuleName ConfigNamespaces []string - Fn func(cfg *sysconfigtypes.Config, wmeta workloadmeta.Component, telemetry telemetry.Component) (Module, error) + Fn func(cfg *sysconfigtypes.Config, deps FactoryDependencies) (Module, error) } diff --git a/cmd/system-probe/api/module/loader.go b/cmd/system-probe/api/module/loader.go index 610c2796cd0ad..51048f9d63a0e 100644 --- a/cmd/system-probe/api/module/loader.go +++ b/cmd/system-probe/api/module/loader.go @@ -81,7 +81,11 @@ func Register(cfg *sysconfigtypes.Config, httpMux *mux.Router, factories []Facto var err error var module Module withModule(factory.Name, func() { - module, err = factory.Fn(cfg, wmeta, telemetry) + deps := FactoryDependencies{ + WMeta: wmeta, + Telemetry: telemetry, + } + module, err = factory.Fn(cfg, deps) }) // In case a module failed to be started, do not make the whole `system-probe` abort. @@ -156,7 +160,11 @@ func RestartModule(factory Factory, wmeta workloadmeta.Component, telemetry tele var err error withModule(factory.Name, func() { currentModule.Close() - newModule, err = factory.Fn(l.cfg, wmeta, telemetry) + deps := FactoryDependencies{ + WMeta: wmeta, + Telemetry: telemetry, + } + newModule, err = factory.Fn(l.cfg, deps) }) if err != nil { l.errors[factory.Name] = err diff --git a/cmd/system-probe/modules/compliance.go b/cmd/system-probe/modules/compliance.go index 319120435942a..8d05cfa141af9 100644 --- a/cmd/system-probe/modules/compliance.go +++ b/cmd/system-probe/modules/compliance.go @@ -19,8 +19,6 @@ import ( "github.com/DataDog/datadog-agent/cmd/system-probe/config" sysconfigtypes "github.com/DataDog/datadog-agent/cmd/system-probe/config/types" "github.com/DataDog/datadog-agent/cmd/system-probe/utils" - "github.com/DataDog/datadog-agent/comp/core/telemetry" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/compliance/dbconfig" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -36,7 +34,7 @@ import ( var ComplianceModule = module.Factory{ Name: config.ComplianceModule, ConfigNamespaces: []string{}, - Fn: func(_ *sysconfigtypes.Config, _ workloadmeta.Component, _ telemetry.Component) (module.Module, error) { + Fn: func(_ *sysconfigtypes.Config, _ module.FactoryDependencies) (module.Module, error) { return &complianceModule{}, nil }, NeedsEBPF: func() bool { diff --git a/cmd/system-probe/modules/crashdetect_windows.go b/cmd/system-probe/modules/crashdetect_windows.go index 67f1245a8a826..a2694d1147627 100644 --- a/cmd/system-probe/modules/crashdetect_windows.go +++ b/cmd/system-probe/modules/crashdetect_windows.go @@ -15,8 +15,6 @@ import ( "github.com/DataDog/datadog-agent/cmd/system-probe/config" sysconfigtypes "github.com/DataDog/datadog-agent/cmd/system-probe/config/types" "github.com/DataDog/datadog-agent/cmd/system-probe/utils" - "github.com/DataDog/datadog-agent/comp/core/telemetry" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/system/wincrashdetect/probe" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -25,7 +23,7 @@ import ( var WinCrashProbe = module.Factory{ Name: config.WindowsCrashDetectModule, ConfigNamespaces: []string{"windows_crash_detection"}, - Fn: func(cfg *sysconfigtypes.Config, _ workloadmeta.Component, _ telemetry.Component) (module.Module, error) { + Fn: func(cfg *sysconfigtypes.Config, _ module.FactoryDependencies) (module.Module, error) { log.Infof("Starting the WinCrashProbe probe") cp, err := probe.NewWinCrashProbe(cfg) if err != nil { diff --git a/cmd/system-probe/modules/dynamic_instrumentation.go b/cmd/system-probe/modules/dynamic_instrumentation.go index 98eec05108e8f..7ff8d7d48ba6d 100644 --- a/cmd/system-probe/modules/dynamic_instrumentation.go +++ b/cmd/system-probe/modules/dynamic_instrumentation.go @@ -14,8 +14,6 @@ import ( "github.com/DataDog/datadog-agent/cmd/system-probe/api/module" "github.com/DataDog/datadog-agent/cmd/system-probe/config" sysconfigtypes "github.com/DataDog/datadog-agent/cmd/system-probe/config/types" - "github.com/DataDog/datadog-agent/comp/core/telemetry" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation" "github.com/DataDog/datadog-agent/pkg/ebpf" ) @@ -24,7 +22,7 @@ import ( var DynamicInstrumentation = module.Factory{ Name: config.DynamicInstrumentationModule, ConfigNamespaces: []string{}, - Fn: func(agentConfiguration *sysconfigtypes.Config, _ workloadmeta.Component, _ telemetry.Component) (module.Module, error) { + Fn: func(agentConfiguration *sysconfigtypes.Config, _ module.FactoryDependencies) (module.Module, error) { config, err := dynamicinstrumentation.NewConfig(agentConfiguration) if err != nil { return nil, fmt.Errorf("invalid dynamic instrumentation module configuration: %w", err) diff --git a/cmd/system-probe/modules/ebpf.go b/cmd/system-probe/modules/ebpf.go index d71a1dbfec6bb..6ca7a5b77f4f3 100644 --- a/cmd/system-probe/modules/ebpf.go +++ b/cmd/system-probe/modules/ebpf.go @@ -18,8 +18,6 @@ import ( "github.com/DataDog/datadog-agent/cmd/system-probe/config" sysconfigtypes "github.com/DataDog/datadog-agent/cmd/system-probe/config/types" "github.com/DataDog/datadog-agent/cmd/system-probe/utils" - "github.com/DataDog/datadog-agent/comp/core/telemetry" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/ebpf/probe/ebpfcheck" "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -29,7 +27,7 @@ import ( var EBPFProbe = module.Factory{ Name: config.EBPFModule, ConfigNamespaces: []string{}, - Fn: func(_ *sysconfigtypes.Config, _ workloadmeta.Component, _ telemetry.Component) (module.Module, error) { + Fn: func(_ *sysconfigtypes.Config, _ module.FactoryDependencies) (module.Module, error) { log.Infof("Starting the ebpf probe") okp, err := ebpfcheck.NewProbe(ebpf.NewConfig()) if err != nil { diff --git a/cmd/system-probe/modules/eventmonitor.go b/cmd/system-probe/modules/eventmonitor.go index 4bab66647d039..cec624ea3f746 100644 --- a/cmd/system-probe/modules/eventmonitor.go +++ b/cmd/system-probe/modules/eventmonitor.go @@ -10,8 +10,6 @@ package modules import ( "github.com/DataDog/datadog-agent/cmd/system-probe/api/module" sysconfigtypes "github.com/DataDog/datadog-agent/cmd/system-probe/config/types" - "github.com/DataDog/datadog-agent/comp/core/telemetry" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/eventmonitor" emconfig "github.com/DataDog/datadog-agent/pkg/eventmonitor/config" netconfig "github.com/DataDog/datadog-agent/pkg/network/config" @@ -24,7 +22,7 @@ import ( var eventMonitorModuleConfigNamespaces = []string{"event_monitoring_config", "runtime_security_config"} -func createEventMonitorModule(_ *sysconfigtypes.Config, wmeta workloadmeta.Component, telemetry telemetry.Component) (module.Module, error) { +func createEventMonitorModule(_ *sysconfigtypes.Config, deps module.FactoryDependencies) (module.Module, error) { emconfig := emconfig.NewConfig() secconfig, err := secconfig.NewConfig() @@ -43,7 +41,7 @@ func createEventMonitorModule(_ *sysconfigtypes.Config, wmeta workloadmeta.Compo secmodule.DisableRuntimeSecurity(secconfig) } - evm, err := eventmonitor.NewEventMonitor(emconfig, secconfig, opts, wmeta, telemetry) + evm, err := eventmonitor.NewEventMonitor(emconfig, secconfig, opts, deps.WMeta, deps.Telemetry) if err != nil { log.Errorf("error initializing event monitoring module: %v", err) return nil, module.ErrNotEnabled diff --git a/cmd/system-probe/modules/language_detection.go b/cmd/system-probe/modules/language_detection.go index 04567200d5de1..7c5546ade6c35 100644 --- a/cmd/system-probe/modules/language_detection.go +++ b/cmd/system-probe/modules/language_detection.go @@ -17,8 +17,6 @@ import ( "github.com/DataDog/datadog-agent/cmd/system-probe/api/module" "github.com/DataDog/datadog-agent/cmd/system-probe/config" sysconfigtypes "github.com/DataDog/datadog-agent/cmd/system-probe/config/types" - "github.com/DataDog/datadog-agent/comp/core/telemetry" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" "github.com/DataDog/datadog-agent/pkg/languagedetection/privileged" languageDetectionProto "github.com/DataDog/datadog-agent/pkg/proto/pbgo/languagedetection" @@ -29,7 +27,7 @@ import ( var LanguageDetectionModule = module.Factory{ Name: config.LanguageDetectionModule, ConfigNamespaces: []string{"language_detection"}, - Fn: func(_ *sysconfigtypes.Config, _ workloadmeta.Component, _ telemetry.Component) (module.Module, error) { + Fn: func(_ *sysconfigtypes.Config, _ module.FactoryDependencies) (module.Module, error) { return &languageDetectionModule{ languageDetector: privileged.NewLanguageDetector(), }, nil diff --git a/cmd/system-probe/modules/network_tracer.go b/cmd/system-probe/modules/network_tracer.go index 85a238142334d..4852575b36a75 100644 --- a/cmd/system-probe/modules/network_tracer.go +++ b/cmd/system-probe/modules/network_tracer.go @@ -23,8 +23,6 @@ import ( "github.com/DataDog/datadog-agent/cmd/system-probe/api/module" sysconfigtypes "github.com/DataDog/datadog-agent/cmd/system-probe/config/types" "github.com/DataDog/datadog-agent/cmd/system-probe/utils" - telemetryComponent "github.com/DataDog/datadog-agent/comp/core/telemetry" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" coreconfig "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/network" networkconfig "github.com/DataDog/datadog-agent/pkg/network/config" @@ -48,7 +46,7 @@ const inactivityRestartDuration = 20 * time.Minute var networkTracerModuleConfigNamespaces = []string{"network_config", "service_monitoring_config"} -func createNetworkTracerModule(cfg *sysconfigtypes.Config, _ workloadmeta.Component, telemetryComponent telemetryComponent.Component) (module.Module, error) { +func createNetworkTracerModule(cfg *sysconfigtypes.Config, deps module.FactoryDependencies) (module.Module, error) { ncfg := networkconfig.New() // Checking whether the current OS + kernel version is supported by the tracer @@ -63,7 +61,7 @@ func createNetworkTracerModule(cfg *sysconfigtypes.Config, _ workloadmeta.Compon log.Info("enabling universal service monitoring (USM)") } - t, err := tracer.NewTracer(ncfg, telemetryComponent) + t, err := tracer.NewTracer(ncfg, deps.Telemetry) done := make(chan struct{}) if err == nil { diff --git a/cmd/system-probe/modules/oom_kill_probe.go b/cmd/system-probe/modules/oom_kill_probe.go index 74b8b868b0ac4..10ac4211a69ed 100644 --- a/cmd/system-probe/modules/oom_kill_probe.go +++ b/cmd/system-probe/modules/oom_kill_probe.go @@ -18,8 +18,6 @@ import ( "github.com/DataDog/datadog-agent/cmd/system-probe/config" sysconfigtypes "github.com/DataDog/datadog-agent/cmd/system-probe/config/types" "github.com/DataDog/datadog-agent/cmd/system-probe/utils" - "github.com/DataDog/datadog-agent/comp/core/telemetry" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/ebpf/probe/oomkill" "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -29,7 +27,7 @@ import ( var OOMKillProbe = module.Factory{ Name: config.OOMKillProbeModule, ConfigNamespaces: []string{}, - Fn: func(_ *sysconfigtypes.Config, _ workloadmeta.Component, _ telemetry.Component) (module.Module, error) { + Fn: func(_ *sysconfigtypes.Config, _ module.FactoryDependencies) (module.Module, error) { log.Infof("Starting the OOM Kill probe") okp, err := oomkill.NewProbe(ebpf.NewConfig()) if err != nil { diff --git a/cmd/system-probe/modules/ping.go b/cmd/system-probe/modules/ping.go index 02d8e1ba5ce60..1d673108f405f 100644 --- a/cmd/system-probe/modules/ping.go +++ b/cmd/system-probe/modules/ping.go @@ -19,8 +19,6 @@ import ( "github.com/DataDog/datadog-agent/cmd/system-probe/api/module" "github.com/DataDog/datadog-agent/cmd/system-probe/config" sysconfigtypes "github.com/DataDog/datadog-agent/cmd/system-probe/config/types" - "github.com/DataDog/datadog-agent/comp/core/telemetry" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" pingcheck "github.com/DataDog/datadog-agent/pkg/networkdevice/pinger" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -37,7 +35,7 @@ type pinger struct{} var Pinger = module.Factory{ Name: config.PingModule, ConfigNamespaces: []string{"ping"}, - Fn: func(_ *sysconfigtypes.Config, _ workloadmeta.Component, _ telemetry.Component) (module.Module, error) { + Fn: func(_ *sysconfigtypes.Config, _ module.FactoryDependencies) (module.Module, error) { return &pinger{}, nil }, NeedsEBPF: func() bool { diff --git a/cmd/system-probe/modules/process.go b/cmd/system-probe/modules/process.go index 7ffbaad6e4bff..502d05fae3483 100644 --- a/cmd/system-probe/modules/process.go +++ b/cmd/system-probe/modules/process.go @@ -20,8 +20,6 @@ import ( "github.com/DataDog/datadog-agent/cmd/system-probe/api/module" "github.com/DataDog/datadog-agent/cmd/system-probe/config" sysconfigtypes "github.com/DataDog/datadog-agent/cmd/system-probe/config/types" - "github.com/DataDog/datadog-agent/comp/core/telemetry" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" //nolint:revive // TODO(PROC) Fix revive linter sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" @@ -38,7 +36,7 @@ var ErrProcessUnsupported = errors.New("process module unsupported") var Process = module.Factory{ Name: config.ProcessModule, ConfigNamespaces: []string{}, - Fn: func(_ *sysconfigtypes.Config, _ workloadmeta.Component, _ telemetry.Component) (module.Module, error) { + Fn: func(_ *sysconfigtypes.Config, _ module.FactoryDependencies) (module.Module, error) { log.Infof("Creating process module for: %s", filepath.Base(os.Args[0])) // we disable returning zero values for stats to reduce parsing work on process-agent side diff --git a/cmd/system-probe/modules/tcp_queue_tracer.go b/cmd/system-probe/modules/tcp_queue_tracer.go index 0b5a6fc14b8ca..1b226e6b31485 100644 --- a/cmd/system-probe/modules/tcp_queue_tracer.go +++ b/cmd/system-probe/modules/tcp_queue_tracer.go @@ -18,8 +18,6 @@ import ( "github.com/DataDog/datadog-agent/cmd/system-probe/config" sysconfigtypes "github.com/DataDog/datadog-agent/cmd/system-probe/config/types" "github.com/DataDog/datadog-agent/cmd/system-probe/utils" - "github.com/DataDog/datadog-agent/comp/core/telemetry" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/ebpf/probe/tcpqueuelength" "github.com/DataDog/datadog-agent/pkg/ebpf" ) @@ -28,7 +26,7 @@ import ( var TCPQueueLength = module.Factory{ Name: config.TCPQueueLengthTracerModule, ConfigNamespaces: []string{}, - Fn: func(_ *sysconfigtypes.Config, _ workloadmeta.Component, _ telemetry.Component) (module.Module, error) { + Fn: func(_ *sysconfigtypes.Config, _ module.FactoryDependencies) (module.Module, error) { t, err := tcpqueuelength.NewTracer(ebpf.NewConfig()) if err != nil { return nil, fmt.Errorf("unable to start the TCP queue length tracer: %w", err) diff --git a/cmd/system-probe/modules/traceroute.go b/cmd/system-probe/modules/traceroute.go index 117718f577ee2..8314fe4dce396 100644 --- a/cmd/system-probe/modules/traceroute.go +++ b/cmd/system-probe/modules/traceroute.go @@ -21,8 +21,6 @@ import ( "github.com/DataDog/datadog-agent/cmd/system-probe/api/module" sysconfigtypes "github.com/DataDog/datadog-agent/cmd/system-probe/config/types" - "github.com/DataDog/datadog-agent/comp/core/telemetry" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/networkpath/payload" tracerouteutil "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -38,8 +36,8 @@ var ( tracerouteConfigNamespaces = []string{"traceroute"} ) -func createTracerouteModule(_ *sysconfigtypes.Config, _ workloadmeta.Component, telemetry telemetry.Component) (module.Module, error) { - runner, err := tracerouteutil.NewRunner(telemetry) +func createTracerouteModule(_ *sysconfigtypes.Config, deps module.FactoryDependencies) (module.Module, error) { + runner, err := tracerouteutil.NewRunner(deps.Telemetry) if err != nil { return &traceroute{}, err } diff --git a/cmd/system-probe/subcommands/runtime/command_test.go b/cmd/system-probe/subcommands/runtime/command_test.go index da026fe89e4e2..1555bd10577ee 100644 --- a/cmd/system-probe/subcommands/runtime/command_test.go +++ b/cmd/system-probe/subcommands/runtime/command_test.go @@ -9,7 +9,6 @@ package runtime import ( "bytes" - "math" "testing" secagent "github.com/DataDog/datadog-agent/pkg/security/agent" @@ -58,13 +57,11 @@ func newMockRSClient(t *testing.T) secagent.SecurityModuleClientWrapper { { EventType: "exec", Mode: 1, - Flags: math.MaxUint8, Approvers: nil, }, { EventType: "open", Mode: 2, - Flags: math.MaxUint8, Approvers: &api.Approvers{ ApproverDetails: []*api.ApproverDetails{ { @@ -114,20 +111,10 @@ func Test_checkPoliciesLoaded(t *testing.T) { "Policies": { "exec": { "Mode": "accept", - "Flags": [ - "basename", - "flags", - "mode" - ], "Approvers": null }, "open": { "Mode": "deny", - "Flags": [ - "basename", - "flags", - "mode" - ], "Approvers": { "open.file.path": [ { diff --git a/comp/core/agenttelemetry/impl/agenttelemetry_test.go b/comp/core/agenttelemetry/impl/agenttelemetry_test.go index 3e5f72d0540fa..a47686d674389 100644 --- a/comp/core/agenttelemetry/impl/agenttelemetry_test.go +++ b/comp/core/agenttelemetry/impl/agenttelemetry_test.go @@ -103,6 +103,7 @@ func convertYamlStrToMap(t *testing.T, cfgStr string) map[string]any { var c map[string]any err := yaml.Unmarshal([]byte(cfgStr), &c) assert.NoError(t, err) + assert.NotNil(t, c) return c } @@ -130,7 +131,11 @@ func makeStableMetricMap(metrics []*dto.Metric) map[string]*dto.Metric { } func makeTelMock(t *testing.T) telemetry.Component { - return fxutil.Test[telemetry.Component](t, telemetryimpl.MockModule()) + // Little hack. Telemetry component is not fully componentized, and relies on global registry so far + // so we need to reset it before running the test. This is not ideal and will be improved in the future. + tel := fxutil.Test[telemetry.Mock](t, telemetryimpl.MockModule()) + tel.Reset() + return tel } func makeCfgMock(t *testing.T, confOverrides map[string]any) config.Component { @@ -146,6 +151,16 @@ func makeStatusMock(t *testing.T) status.Component { return fxutil.Test[status.Mock](t, fx.Options(statusimpl.MockModule())) } +func makeSenderImpl(t *testing.T, c string) sender { + o := convertYamlStrToMap(t, c) + cfg := makeCfgMock(t, o) + log := makeLogMock(t) + client := newClientMock() + sndr, err := newSenderImpl(cfg, log, client) + assert.NoError(t, err) + return sndr +} + // aggregator mock function func getTestAtel(t *testing.T, tel telemetry.Component, @@ -263,11 +278,7 @@ func TestRun(t *testing.T) { } func TestReportMetricBasic(t *testing.T) { - // Little hack. Telemetry component is not fully componentized, and relies on global registry so far - // so we need to reset it before running the test. This is not ideal and will be improved in the future. - // TODO: moved Status and Metric collection to an interface and use a mock for testing - tel := fxutil.Test[telemetry.Mock](t, telemetryimpl.MockModule()) - tel.Reset() + tel := makeTelMock(t) counter := tel.NewCounter("checks", "execution_time", []string{"check_name"}, "") counter.Inc("mycheck") @@ -297,8 +308,7 @@ func TestNoTagSpecifiedAggregationCounter(t *testing.T) { ` // setup and initiate atel - tel := fxutil.Test[telemetry.Mock](t, telemetryimpl.MockModule()) - tel.Reset() + tel := makeTelMock(t) counter := tel.NewCounter("bar", "zoo", []string{"tag1", "tag2", "tag3"}, "") counter.AddWithTags(10, map[string]string{"tag1": "a1", "tag2": "b1", "tag3": "c1"}) counter.AddWithTags(20, map[string]string{"tag1": "a2", "tag2": "b2", "tag3": "c2"}) @@ -338,8 +348,7 @@ func TestNoTagSpecifiedAggregationGauge(t *testing.T) { ` // setup and initiate atel - tel := fxutil.Test[telemetry.Mock](t, telemetryimpl.MockModule()) - tel.Reset() + tel := makeTelMock(t) gauge := tel.NewGauge("bar", "zoo", []string{"tag1", "tag2", "tag3"}, "") gauge.WithTags(map[string]string{"tag1": "a1", "tag2": "b1", "tag3": "c1"}).Set(10) gauge.WithTags(map[string]string{"tag1": "a2", "tag2": "b2", "tag3": "c2"}).Set(20) @@ -379,9 +388,7 @@ func TestNoTagSpecifiedAggregationHistogram(t *testing.T) { ` // setup and initiate atel - tel := fxutil.Test[telemetry.Mock](t, telemetryimpl.MockModule()) - tel.Reset() - + tel := makeTelMock(t) buckets := []float64{10, 100, 1000, 10000} gauge := tel.NewHistogram("bar", "zoo", []string{"tag1", "tag2", "tag3"}, "", buckets) gauge.WithTags(map[string]string{"tag1": "a1", "tag2": "b1", "tag3": "c1"}).Observe(1001) @@ -422,8 +429,7 @@ func TestTagSpecifiedAggregationCounter(t *testing.T) { ` // setup and initiate atel - tel := fxutil.Test[telemetry.Mock](t, telemetryimpl.MockModule()) - tel.Reset() + tel := makeTelMock(t) counter := tel.NewCounter("bar", "zoo", []string{"tag1", "tag2", "tag3"}, "") // should generate 2 timeseries withj tag1:a1, tag1:a2 @@ -471,8 +477,7 @@ func TestTagAggregateTotalCounter(t *testing.T) { - tag1 ` // setup and initiate atel - tel := fxutil.Test[telemetry.Mock](t, telemetryimpl.MockModule()) - tel.Reset() + tel := makeTelMock(t) counter := tel.NewCounter("bar", "zoo", []string{"tag1", "tag2", "tag3"}, "") // should generate 4 timeseries withj tag1:a1, tag1:a2, tag1:a3 and total:6 @@ -517,4 +522,136 @@ func TestTagAggregateTotalCounter(t *testing.T) { assert.Equal(t, float64(210), m4.Counter.GetValue()) } -// TODO: Add more status tests (status mock inspirations are at datadog-agent\comp\core\status\statusimpl\status_test.go) +func TestSenderConfigNoConfig(t *testing.T) { + c := ` + agent_telemetry: + enabled: true + ` + sndr := makeSenderImpl(t, c) + + url := buildURL(sndr.(*senderImpl).endpoints.Main) + assert.Equal(t, "https://instrumentation-telemetry-intake.datadoghq.com/api/v2/apmtelemetry", url) +} + +// TestSenderConfigSite tests that the site configuration is correctly used to build the endpoint URL +func TestSenderConfigOnlySites(t *testing.T) { + ctemp := ` + site: %s + agent_telemetry: + enabled: true + ` + // Probably overkill (since 2 should be sufficient), but let's test all the sites + tests := []struct { + site string + testURL string + }{ + {"datadoghq.com", "https://instrumentation-telemetry-intake.datadoghq.com/api/v2/apmtelemetry"}, + {"datad0g.com", "https://instrumentation-telemetry-intake.datad0g.com/api/v2/apmtelemetry"}, + {"datadoghq.eu", "https://instrumentation-telemetry-intake.datadoghq.eu/api/v2/apmtelemetry"}, + {"us3.datadoghq.com", "https://instrumentation-telemetry-intake.us3.datadoghq.com/api/v2/apmtelemetry"}, + {"us5.datadoghq.com", "https://instrumentation-telemetry-intake.us5.datadoghq.com/api/v2/apmtelemetry"}, + {"ap1.datadoghq.com", "https://instrumentation-telemetry-intake.ap1.datadoghq.com/api/v2/apmtelemetry"}, + } + + for _, tt := range tests { + c := fmt.Sprintf(ctemp, tt.site) + sndr := makeSenderImpl(t, c) + url := buildURL(sndr.(*senderImpl).endpoints.Main) + assert.Equal(t, tt.testURL, url) + } +} + +// TestSenderConfigAdditionalEndpoint tests that the additional endpoint configuration is correctly used to build the endpoint URL +func TestSenderConfigAdditionalEndpoint(t *testing.T) { + c := ` + site: datadoghq.com + api_key: foo + agent_telemetry: + enabled: true + additional_endpoints: + - api_key: bar + host: instrumentation-telemetry-intake.us5.datadoghq.com + ` + sndr := makeSenderImpl(t, c) + assert.NotNil(t, sndr) + + assert.Len(t, sndr.(*senderImpl).endpoints.Endpoints, 2) + url := buildURL(sndr.(*senderImpl).endpoints.Endpoints[0]) + assert.Equal(t, "https://instrumentation-telemetry-intake.datadoghq.com/api/v2/apmtelemetry", url) + url = buildURL(sndr.(*senderImpl).endpoints.Endpoints[1]) + assert.Equal(t, "https://instrumentation-telemetry-intake.us5.datadoghq.com/api/v2/apmtelemetry", url) +} + +// TestSenderConfigPartialDDUrl dd_url overrides alone +func TestSenderConfigPartialDDUrl(t *testing.T) { + c := ` + site: datadoghq.com + api_key: foo + agent_telemetry: + enabled: true + dd_url: instrumentation-telemetry-intake.us5.datadoghq.com. + ` + sndr := makeSenderImpl(t, c) + assert.NotNil(t, sndr) + + assert.Len(t, sndr.(*senderImpl).endpoints.Endpoints, 1) + url := buildURL(sndr.(*senderImpl).endpoints.Endpoints[0]) + assert.Equal(t, "https://instrumentation-telemetry-intake.us5.datadoghq.com./api/v2/apmtelemetry", url) +} + +// TestSenderConfigFullDDUrl dd_url overrides alone +func TestSenderConfigFullDDUrl(t *testing.T) { + c := ` + site: datadoghq.com + api_key: foo + agent_telemetry: + enabled: true + dd_url: https://instrumentation-telemetry-intake.us5.datadoghq.com. + ` + sndr := makeSenderImpl(t, c) + assert.NotNil(t, sndr) + + assert.Len(t, sndr.(*senderImpl).endpoints.Endpoints, 1) + url := buildURL(sndr.(*senderImpl).endpoints.Endpoints[0]) + assert.Equal(t, "https://instrumentation-telemetry-intake.us5.datadoghq.com./api/v2/apmtelemetry", url) +} + +// TestSenderConfigDDUrlWithAdditionalEndpoints dd_url overrides with additional endpoints +func TestSenderConfigDDUrlWithAdditionalEndpoints(t *testing.T) { + c := ` + site: datadoghq.com + api_key: foo + agent_telemetry: + enabled: true + dd_url: instrumentation-telemetry-intake.us5.datadoghq.com. + additional_endpoints: + - api_key: bar + host: instrumentation-telemetry-intake.us3.datadoghq.com. + ` + sndr := makeSenderImpl(t, c) + assert.NotNil(t, sndr) + + assert.Len(t, sndr.(*senderImpl).endpoints.Endpoints, 2) + url := buildURL(sndr.(*senderImpl).endpoints.Endpoints[0]) + assert.Equal(t, "https://instrumentation-telemetry-intake.us5.datadoghq.com./api/v2/apmtelemetry", url) + url = buildURL(sndr.(*senderImpl).endpoints.Endpoints[1]) + assert.Equal(t, "https://instrumentation-telemetry-intake.us3.datadoghq.com./api/v2/apmtelemetry", url) +} + +// TestSenderConfigDDUrlWithEmptyAdditionalPoint dd_url overrides with empty additional endpoints +func TestSenderConfigDDUrlWithEmptyAdditionalPoint(t *testing.T) { + c := ` + site: datadoghq.com + api_key: foo + agent_telemetry: + enabled: true + dd_url: instrumentation-telemetry-intake.us5.datadoghq.com. + additional_endpoints: + ` + sndr := makeSenderImpl(t, c) + assert.NotNil(t, sndr) + + assert.Len(t, sndr.(*senderImpl).endpoints.Endpoints, 1) + url := buildURL(sndr.(*senderImpl).endpoints.Endpoints[0]) + assert.Equal(t, "https://instrumentation-telemetry-intake.us5.datadoghq.com./api/v2/apmtelemetry", url) +} diff --git a/comp/core/agenttelemetry/impl/sender.go b/comp/core/agenttelemetry/impl/sender.go index 1563154e9fb6b..7dc806718c85d 100644 --- a/comp/core/agenttelemetry/impl/sender.go +++ b/comp/core/agenttelemetry/impl/sender.go @@ -15,6 +15,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "net/http" "net/url" @@ -25,14 +26,18 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" + logconfig "github.com/DataDog/datadog-agent/comp/logs/agent/config" metadatautils "github.com/DataDog/datadog-agent/comp/metadata/host/hostimpl/utils" - "github.com/DataDog/datadog-agent/pkg/config/utils" httputils "github.com/DataDog/datadog-agent/pkg/util/http" "github.com/DataDog/datadog-agent/pkg/version" ) const ( - telemetryEndpointPrefix = "https://instrumentation-telemetry-intake." + telemetryEndpointPrefix = "https://instrumentation-telemetry-intake." + telemetryConfigPrefix = "agent_telemetry." + telemetryHostnameEndpointPrefix = "instrumentation-telemetry-intake." + telemetryIntakeTrackType = "agenttelemetry" + telemetryPath = "/api/v2/apmtelemetry" httpClientResetInterval = 5 * time.Minute httpClientTimeout = 10 * time.Second @@ -60,7 +65,8 @@ type senderImpl struct { client client - endpointURL string + endpoints *logconfig.Endpoints + agentVersion string // pre-fill parts of payload which are not changing during run-time @@ -161,6 +167,31 @@ func newSenderClientImpl(agentCfg config.Component) client { return httputils.NewResetClient(httpClientResetInterval, httpClientFactory(agentCfg, httpClientTimeout)) } +// buils url from a config endpoint. +func buildURL(endpoint logconfig.Endpoint) string { + var address string + if endpoint.Port != 0 { + address = fmt.Sprintf("%v:%v", endpoint.Host, endpoint.Port) + } else { + address = endpoint.Host + } + url := url.URL{ + Scheme: "https", + Host: address, + Path: telemetryPath, + } + + return url.String() +} + +func getEndpoints(cfgComp config.Component) (*logconfig.Endpoints, error) { + // borrowed and styled after EP Forwarder newHTTPPassthroughPipeline(). + // Will be eliminated in the future after switching to EP Forwarder. + configKeys := logconfig.NewLogsConfigKeys(telemetryConfigPrefix, cfgComp) + return logconfig.BuildHTTPEndpointsWithConfig(cfgComp, configKeys, + telemetryHostnameEndpointPrefix, telemetryIntakeTrackType, logconfig.DefaultIntakeProtocol, logconfig.DefaultIntakeOrigin) +} + func newSenderImpl( cfgComp config.Component, logComp log.Component, @@ -168,15 +199,16 @@ func newSenderImpl( // "Sending" part of the sender will be moved to EP Forwarder in the future to be able // to support retry, caching, URL management, API key rotation at runtime, flush to - // disk, backoff logic, etc. Specifically, different types of data need to be sent using - // different types of EP Forwarder Pipelines (separate entries for passthroughPipelineDesc - // array). - - // build endpoint URL - endpointHost := utils.GetMainEndpoint(cfgComp, telemetryEndpointPrefix, "agent_telemetry.dd_url") - endpointURL, err := url.JoinPath(endpointHost, "/api/v2/apmtelemetry") + // disk, backoff logic, etc. There are few nuances needs to be adopted by EP Forwarder + // to support Agent Telemetry: + // * Support custom HTTP headers + // * Support indication of main Endpoint selection/filtering if there are more than one + // * Potentially/optionally support custom batching of payloads (custom batching envelope) + // + // When ported to EP Forwarder we will need to send each telemetry type on a separate pipeline. + endpoints, err := getEndpoints(cfgComp) if err != nil { - return nil, fmt.Errorf("failed to form agent telemetry endpoint URL from configuration: %v", err) + return nil, fmt.Errorf("failed to get agent telemetry endpoints: %v", err) } // Get host information (only hostid is used for now) @@ -199,7 +231,7 @@ func newSenderImpl( logComp: logComp, client: client, - endpointURL: endpointURL, + endpoints: endpoints, agentVersion: agentVersion.GetNumberAndPre(), // pre-fill parts of payload which are not changing during run-time payloadTemplate: Payload{ @@ -306,32 +338,36 @@ func (s *senderImpl) flushSession(ss *senderSession) error { return err } - // Send the payload. In the future we want to move this functionality/code into DefaultForwarder - // because it provides retry, caching, URL management, API key rotation at runtime, flush to disk, - // backoff logic etc. - req, err := http.NewRequest("POST", s.endpointURL, bytes.NewReader(reqBody)) - if err != nil { - return err - } - s.addHeaders(req, payload.RequestType, s.cfgComp.GetString("api_key"), strconv.Itoa(len(reqBody))) - resp, err := s.client.Do(req.WithContext(ss.cancelCtx)) - if err != nil { - return err - } - defer func() { - if resp != nil && resp.Body != nil { - resp.Body.Close() + // Send the payload to all endpoints + var errs error + for _, ep := range s.endpoints.Endpoints { + url := buildURL(ep) + req, err := http.NewRequest("POST", url, bytes.NewReader(reqBody)) + if err != nil { + errs = errors.Join(errs, err) + continue } - }() + s.addHeaders(req, payload.RequestType, ep.GetAPIKey(), strconv.Itoa(len(reqBody))) + resp, err := s.client.Do(req.WithContext(ss.cancelCtx)) + if err != nil { + errs = errors.Join(errs, err) + continue + } + defer func() { + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + }() - // Log return status (and URL if unsuccessful) - if resp.StatusCode >= 200 && resp.StatusCode < 300 { - s.logComp.Infof("Telemetery enpoint response status: %s, status code: %d", resp.Status, resp.StatusCode) - } else { - s.logComp.Warnf("Telemetery enpoint response status: %s, status code: %d, url: %s", resp.Status, resp.StatusCode, s.endpointURL) + // Log return status (and URL if unsuccessful) + if resp.StatusCode >= 200 && resp.StatusCode < 300 { + s.logComp.Infof("Telemetery enpoint response status: %s, status code: %d", resp.Status, resp.StatusCode) + } else { + s.logComp.Warnf("Telemetery enpoint response status: %s, status code: %d, url: %s", resp.Status, resp.StatusCode, url) + } } - return nil + return errs } func (s *senderImpl) sendAgentMetricPayloads(ss *senderSession, metrics []*agentmetric) error { diff --git a/comp/core/autodiscovery/listeners/container.go b/comp/core/autodiscovery/listeners/container.go index b0dce0e562106..f7afa5ec80cd5 100644 --- a/comp/core/autodiscovery/listeners/container.go +++ b/comp/core/autodiscovery/listeners/container.go @@ -16,9 +16,11 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/common/utils" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" "github.com/DataDog/datadog-agent/comp/core/tagger" + "github.com/DataDog/datadog-agent/comp/core/tagger/types" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/util/containers" + pkgcontainersimage "github.com/DataDog/datadog-agent/pkg/util/containers/image" "github.com/DataDog/datadog-agent/pkg/util/docker" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/optional" @@ -112,7 +114,7 @@ func (l *ContainerListener) createContainerService(entity workloadmeta.Entity) { svc := &service{ entity: container, - tagsHash: tagger.GetEntityHash(containers.BuildTaggerEntityName(container.ID), tagger.ChecksCardinality()), + tagsHash: tagger.GetEntityHash(types.NewEntityID(types.ContainerID, container.ID).String(), tagger.ChecksCardinality()), adIdentifiers: computeContainerServiceIDs( containers.BuildEntityName(string(container.Runtime), container.ID), containerImg.RawName, @@ -214,7 +216,7 @@ func computeContainerServiceIDs(entity string, image string, labels map[string]s ids := []string{entity} // Add Image names (long then short if different) - long, _, short, _, err := containers.SplitImageName(image) + long, _, short, _, err := pkgcontainersimage.SplitImageName(image) if err != nil { log.Warnf("error while spliting image name: %s", err) } diff --git a/comp/core/autodiscovery/listeners/kubelet.go b/comp/core/autodiscovery/listeners/kubelet.go index 6e81e83df6099..187401ea1ead0 100644 --- a/comp/core/autodiscovery/listeners/kubelet.go +++ b/comp/core/autodiscovery/listeners/kubelet.go @@ -15,6 +15,8 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/common/utils" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" "github.com/DataDog/datadog-agent/comp/core/tagger" + "github.com/DataDog/datadog-agent/comp/core/tagger/common" + "github.com/DataDog/datadog-agent/comp/core/tagger/types" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/util/containers" @@ -91,9 +93,10 @@ func (l *KubeletListener) createPodService( }) entity := kubelet.PodUIDToEntityName(pod.ID) + taggerEntityID := common.BuildTaggerEntityID(pod.GetID()).String() svc := &service{ entity: pod, - tagsHash: tagger.GetEntityHash(kubelet.PodUIDToTaggerEntityName(pod.ID), tagger.ChecksCardinality()), + tagsHash: tagger.GetEntityHash(taggerEntityID, tagger.ChecksCardinality()), adIdentifiers: []string{entity}, hosts: map[string]string{"pod": pod.IP}, ports: ports, @@ -155,7 +158,7 @@ func (l *KubeletListener) createContainerService( entity := containers.BuildEntityName(string(container.Runtime), container.ID) svc := &service{ entity: container, - tagsHash: tagger.GetEntityHash(containers.BuildTaggerEntityName(container.ID), tagger.ChecksCardinality()), + tagsHash: tagger.GetEntityHash(types.NewEntityID(types.ContainerID, container.ID).String(), tagger.ChecksCardinality()), ready: pod.Ready, ports: ports, extraConfig: map[string]string{ diff --git a/comp/core/autodiscovery/listeners/service.go b/comp/core/autodiscovery/listeners/service.go index 616aec5303d76..f8aada6167d1d 100644 --- a/comp/core/autodiscovery/listeners/service.go +++ b/comp/core/autodiscovery/listeners/service.go @@ -13,6 +13,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" "github.com/DataDog/datadog-agent/comp/core/tagger" + taggercommon "github.com/DataDog/datadog-agent/comp/core/tagger/common" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/util/containers" @@ -88,18 +89,7 @@ func (s *service) GetPorts(_ context.Context) ([]ContainerPort, error) { // GetTags returns the tags associated with the service. func (s *service) GetTags() ([]string, error) { - taggerEntity := "" - switch e := s.entity.(type) { - case *workloadmeta.Container: - taggerEntity = containers.BuildTaggerEntityName(e.ID) - case *workloadmeta.KubernetesPod: - taggerEntity = kubelet.PodUIDToTaggerEntityName(e.ID) - default: - entityID := s.entity.GetID() - log.Errorf("cannot build AD entity ID for kind %q, ID %q", entityID.Kind, entityID.ID) - } - - return tagger.Tag(taggerEntity, tagger.ChecksCardinality()) + return tagger.Tag(taggercommon.BuildTaggerEntityID(s.entity.GetID()).String(), tagger.ChecksCardinality()) } // GetPid returns the process ID of the service. diff --git a/comp/core/tagger/common/entity_id_builder.go b/comp/core/tagger/common/entity_id_builder.go new file mode 100644 index 0000000000000..d46af748cc08b --- /dev/null +++ b/comp/core/tagger/common/entity_id_builder.go @@ -0,0 +1,46 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +// Package common provides common utilities that are useful when interacting with the tagger. +package common + +import ( + "github.com/DataDog/datadog-agent/comp/core/tagger/types" + workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" + "github.com/DataDog/datadog-agent/pkg/util/log" +) + +// BuildTaggerEntityID builds tagger entity id based on workloadmeta entity id +func BuildTaggerEntityID(entityID workloadmeta.EntityID) types.EntityID { + switch entityID.Kind { + case workloadmeta.KindContainer: + return types.NewEntityID(types.ContainerID, entityID.ID) + case workloadmeta.KindKubernetesPod: + return types.NewEntityID(types.KubernetesPodUID, entityID.ID) + case workloadmeta.KindECSTask: + return types.NewEntityID(types.ECSTask, entityID.ID) + case workloadmeta.KindContainerImageMetadata: + return types.NewEntityID(types.ContainerImageMetadata, entityID.ID) + case workloadmeta.KindProcess: + return types.NewEntityID(types.Process, entityID.ID) + case workloadmeta.KindKubernetesDeployment: + return types.NewEntityID(types.KubernetesDeployment, entityID.ID) + case workloadmeta.KindHost: + return types.NewEntityID(types.Host, entityID.ID) + case workloadmeta.KindKubernetesMetadata: + return types.NewEntityID(types.KubernetesMetadata, entityID.ID) + default: + log.Errorf("can't recognize entity %q with kind %q; trying %s://%s as tagger entity", + entityID.ID, entityID.Kind, entityID.ID, entityID.Kind) + return types.NewEntityID(types.EntityIDPrefix(entityID.Kind), entityID.ID) + } +} + +var globalEntityID = types.NewEntityID("internal", "global-entity-id") + +// GetGlobalEntityID returns the entity ID that holds global tags +func GetGlobalEntityID() types.EntityID { + return globalEntityID +} diff --git a/comp/core/tagger/common/prefixes.go b/comp/core/tagger/common/prefixes.go deleted file mode 100644 index 6988fcb1c8675..0000000000000 --- a/comp/core/tagger/common/prefixes.go +++ /dev/null @@ -1,42 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2024-present Datadog, Inc. - -// Package common provides common constants and methods for the tagger component and implementation -package common - -import "github.com/DataDog/datadog-agent/comp/core/tagger/types" - -const ( - // ContainerID is the prefix `container_id` - ContainerID types.EntityIDPrefix = "container_id" - // ContainerImageMetadata is the prefix `container_image_metadata` - ContainerImageMetadata types.EntityIDPrefix = "container_image_metadata" - // ECSTask is the prefix `ecs_task` - ECSTask types.EntityIDPrefix = "ecs_task" - // Host is the prefix `host` - Host types.EntityIDPrefix = "host" - // KubernetesDeployment is the prefix `deployment` - KubernetesDeployment types.EntityIDPrefix = "deployment" - // KubernetesMetadata is the prefix `kubernetes_metadata` - KubernetesMetadata types.EntityIDPrefix = "kubernetes_metadata" - // KubernetesPodUID is the prefix `kubernetes_pod_uid` - KubernetesPodUID types.EntityIDPrefix = "kubernetes_pod_uid" - // Process is the prefix `process` - Process types.EntityIDPrefix = "process" -) - -// AllPrefixesSet returns a set of all possible entity id prefixes that can be used in the tagger -func AllPrefixesSet() map[types.EntityIDPrefix]struct{} { - return map[types.EntityIDPrefix]struct{}{ - ContainerID: {}, - ContainerImageMetadata: {}, - ECSTask: {}, - Host: {}, - KubernetesDeployment: {}, - KubernetesMetadata: {}, - KubernetesPodUID: {}, - Process: {}, - } -} diff --git a/comp/core/tagger/component.go b/comp/core/tagger/component.go index b125299bd27bc..8e325161f84ac 100644 --- a/comp/core/tagger/component.go +++ b/comp/core/tagger/component.go @@ -38,14 +38,14 @@ type Component interface { Stop() error ReplayTagger() ReplayTagger GetTaggerTelemetryStore() *telemetry.Store - Tag(entity string, cardinality types.TagCardinality) ([]string, error) - AccumulateTagsFor(entity string, cardinality types.TagCardinality, tb tagset.TagsAccumulator) error - Standard(entity string) ([]string, error) + Tag(entityID string, cardinality types.TagCardinality) ([]string, error) + AccumulateTagsFor(entityID string, cardinality types.TagCardinality, tb tagset.TagsAccumulator) error + Standard(entityID string) ([]string, error) List() types.TaggerListResponse GetEntity(entityID string) (*types.Entity, error) Subscribe(cardinality types.TagCardinality) chan []types.EntityEvent Unsubscribe(ch chan []types.EntityEvent) - GetEntityHash(entity string, cardinality types.TagCardinality) string + GetEntityHash(entityID string, cardinality types.TagCardinality) string AgentTags(cardinality types.TagCardinality) ([]string, error) GlobalTags(cardinality types.TagCardinality) ([]string, error) SetNewCaptureTagger(newCaptureTagger Component) diff --git a/comp/core/tagger/component_mock.go b/comp/core/tagger/component_mock.go index db5b9f5079ea4..3aa46c5dde5db 100644 --- a/comp/core/tagger/component_mock.go +++ b/comp/core/tagger/component_mock.go @@ -13,7 +13,7 @@ type Mock interface { Component // SetTags allows to set tags in the mock fake tagger - SetTags(entity, source string, low, orch, high, std []string) + SetTags(entityID string, source string, low, orch, high, std []string) // SetGlobalTags allows to set tags in store for the global entity SetGlobalTags(low, orch, high, std []string) diff --git a/comp/core/tagger/global.go b/comp/core/tagger/global.go index 189d259111a92..d807a67edd837 100644 --- a/comp/core/tagger/global.go +++ b/comp/core/tagger/global.go @@ -53,19 +53,19 @@ func Tag(entity string, cardinality types.TagCardinality) ([]string, error) { } // GetEntityHash is an interface function that queries taggerclient singleton -func GetEntityHash(entity string, cardinality types.TagCardinality) string { +func GetEntityHash(entityID string, cardinality types.TagCardinality) string { if globalTagger != nil { - return globalTagger.GetEntityHash(entity, cardinality) + return globalTagger.GetEntityHash(entityID, cardinality) } return "" } // StandardTags is an interface function that queries taggerclient singleton -func StandardTags(entity string) ([]string, error) { +func StandardTags(entityID string) ([]string, error) { if globalTagger == nil { return nil, fmt.Errorf("a global tagger must be set before calling StandardTags") } - return globalTagger.Standard(entity) + return globalTagger.Standard(entityID) } // AgentTags is an interface function that queries taggerclient singleton diff --git a/comp/core/tagger/noopimpl/tagger.go b/comp/core/tagger/noopimpl/tagger.go new file mode 100644 index 0000000000000..9563d90fd4000 --- /dev/null +++ b/comp/core/tagger/noopimpl/tagger.go @@ -0,0 +1,110 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package tagger implements the Tagger component. The Tagger is the central +// source of truth for client-side entity tagging. It subscribes to workloadmeta +// to get updates for all the entity kinds (containers, kubernetes pods, +// kubernetes nodes, etc.) and extracts the tags for each of them. Tags are then +// stored in memory (by the TagStore) and can be queried by the tagger.Tag() +// method. + +// Package noopimpl provides a noop implementation for the tagger component +package noopimpl + +import ( + "context" + + "go.uber.org/fx" + + "github.com/DataDog/datadog-agent/comp/core/tagger" + "github.com/DataDog/datadog-agent/comp/core/tagger/telemetry" + "github.com/DataDog/datadog-agent/comp/core/tagger/types" + taggertypes "github.com/DataDog/datadog-agent/pkg/tagger/types" + "github.com/DataDog/datadog-agent/pkg/tagset" + "github.com/DataDog/datadog-agent/pkg/util/fxutil" +) + +// Module defines the fx options for this component. +func Module() fxutil.Module { + return fxutil.Component( + fx.Provide( + newTaggerClient, + ), + ) +} + +type noopTagger struct{} + +func (n *noopTagger) Start(context.Context) error { + return nil +} + +func (n *noopTagger) Stop() error { + return nil +} + +func (n *noopTagger) ReplayTagger() tagger.ReplayTagger { + return nil +} + +func (n *noopTagger) GetTaggerTelemetryStore() *telemetry.Store { + return nil +} + +func (n *noopTagger) Tag(string, types.TagCardinality) ([]string, error) { + return nil, nil +} + +func (n *noopTagger) AccumulateTagsFor(string, types.TagCardinality, tagset.TagsAccumulator) error { + return nil +} + +func (n *noopTagger) Standard(string) ([]string, error) { + return nil, nil +} + +func (n *noopTagger) List() types.TaggerListResponse { + return types.TaggerListResponse{} +} + +func (n *noopTagger) GetEntity(string) (*types.Entity, error) { + return nil, nil +} + +func (n *noopTagger) Subscribe(types.TagCardinality) chan []types.EntityEvent { + return make(chan []types.EntityEvent) +} + +func (n *noopTagger) Unsubscribe(chan []types.EntityEvent) {} + +func (n *noopTagger) GetEntityHash(string, types.TagCardinality) string { + return "" +} + +func (n *noopTagger) AgentTags(types.TagCardinality) ([]string, error) { + return nil, nil +} + +func (n *noopTagger) GlobalTags(types.TagCardinality) ([]string, error) { + return nil, nil +} + +func (n *noopTagger) SetNewCaptureTagger(tagger.Component) {} + +func (n *noopTagger) ResetCaptureTagger() {} + +func (n *noopTagger) EnrichTags(tagset.TagsAccumulator, taggertypes.OriginInfo) {} + +func (n *noopTagger) ChecksCardinality() types.TagCardinality { + return types.LowCardinality +} + +func (n *noopTagger) DogstatsdCardinality() types.TagCardinality { + return types.LowCardinality +} + +func newTaggerClient() tagger.Component { + return &noopTagger{} +} diff --git a/comp/core/tagger/proto/proto.go b/comp/core/tagger/proto/proto.go index 139ca918f3a05..0d57f1d1a387b 100644 --- a/comp/core/tagger/proto/proto.go +++ b/comp/core/tagger/proto/proto.go @@ -9,7 +9,6 @@ package proto import ( "errors" "fmt" - "strings" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -19,15 +18,11 @@ import ( ) // Tagger2PbEntityID helper to convert an Entity ID to its expected protobuf format. -func Tagger2PbEntityID(entityID string) (*pb.EntityId, error) { - parts := strings.SplitN(entityID, "://", 2) - if len(parts) != 2 { - return nil, fmt.Errorf("invalid entity id %q", entityID) - } +func Tagger2PbEntityID(entityID types.EntityID) (*pb.EntityId, error) { return &pb.EntityId{ - Prefix: parts[0], - Uid: parts[1], + Prefix: string(entityID.GetPrefix()), + Uid: entityID.GetID(), }, nil } @@ -64,12 +59,12 @@ func Tagger2PbEntityEvent(event types.EntityEvent) (*pb.StreamTagsEvent, error) } // Pb2TaggerEntityID helper to convert a protobuf Entity ID to its expected format. -func Pb2TaggerEntityID(entityID *pb.EntityId) (string, error) { +func Pb2TaggerEntityID(entityID *pb.EntityId) (types.EntityID, error) { if entityID == nil { - return "", errors.New("Invalid entityID argument") + return nil, errors.New("Invalid entityID argument") } - return fmt.Sprintf("%s://%s", entityID.Prefix, entityID.Uid), nil + return types.NewEntityID(types.EntityIDPrefix(entityID.Prefix), entityID.Uid), nil } // Pb2TaggerCardinality helper to convert protobuf cardinality to native tag cardinality. diff --git a/comp/core/tagger/taggerimpl/collectors/workloadmeta_extract.go b/comp/core/tagger/taggerimpl/collectors/workloadmeta_extract.go index aa4208829ca7e..d7753c7f495f2 100644 --- a/comp/core/tagger/taggerimpl/collectors/workloadmeta_extract.go +++ b/comp/core/tagger/taggerimpl/collectors/workloadmeta_extract.go @@ -23,9 +23,6 @@ import ( ) const ( - // GlobalEntityID defines the entity ID that holds global tags - GlobalEntityID = "internal://global-entity-id" - podAnnotationPrefix = "ad.datadoghq.com/" podContainerTagsAnnotationFormat = podAnnotationPrefix + "%s.tags" podTagsAnnotation = podAnnotationPrefix + "tags" @@ -123,18 +120,18 @@ func (c *WorkloadMetaCollector) processEvents(evBundle workloadmeta.EventBundle) switch ev.Type { case workloadmeta.EventTypeSet: - taggerEntityID := buildTaggerEntityID(entityID) + taggerEntityID := common.BuildTaggerEntityID(entityID) // keep track of children of this entity from previous // iterations ... - unseen := make(map[string]struct{}) + unseen := make(map[types.EntityID]struct{}) for childTaggerID := range c.children[taggerEntityID] { unseen[childTaggerID] = struct{}{} } // ... and create a new empty map to store the children // seen in this iteration. - c.children[taggerEntityID] = make(map[string]struct{}) + c.children[taggerEntityID] = make(map[types.EntityID]struct{}) switch entityID.Kind { case workloadmeta.KindContainer: @@ -237,7 +234,7 @@ func (c *WorkloadMetaCollector) handleContainer(ev workloadmeta.Event) []*types. return []*types.TagInfo{ { Source: containerSource, - Entity: buildTaggerEntityID(container.EntityID), + EntityID: common.BuildTaggerEntityID(container.EntityID), HighCardTags: high, OrchestratorCardTags: orch, LowCardTags: low, @@ -287,7 +284,7 @@ func (c *WorkloadMetaCollector) handleContainerImage(ev workloadmeta.Event) []*t return []*types.TagInfo{ { Source: containerImageSource, - Entity: buildTaggerEntityID(image.EntityID), + EntityID: common.BuildTaggerEntityID(image.EntityID), HighCardTags: high, OrchestratorCardTags: orch, LowCardTags: low, @@ -301,7 +298,7 @@ func (c *WorkloadMetaCollector) handleHostTags(ev workloadmeta.Event) []*types.T return []*types.TagInfo{ { Source: hostSource, - Entity: GlobalEntityID, + EntityID: types.NewEntityID("internal", "global-entity-id"), LowCardTags: hostTags.HostTags, }, } @@ -415,7 +412,7 @@ func (c *WorkloadMetaCollector) handleKubePod(ev workloadmeta.Event) []*types.Ta tagInfos := []*types.TagInfo{ { Source: podSource, - Entity: buildTaggerEntityID(pod.EntityID), + EntityID: common.BuildTaggerEntityID(pod.EntityID), HighCardTags: high, OrchestratorCardTags: orch, LowCardTags: low, @@ -484,7 +481,7 @@ func (c *WorkloadMetaCollector) handleECSTask(ev workloadmeta.Event) []*types.Ta // taskSource here is not a mistake. the source is // always from the parent resource. Source: taskSource, - Entity: buildTaggerEntityID(container.EntityID), + EntityID: common.BuildTaggerEntityID(container.EntityID), HighCardTags: high, OrchestratorCardTags: orch, LowCardTags: low, @@ -496,7 +493,7 @@ func (c *WorkloadMetaCollector) handleECSTask(ev workloadmeta.Event) []*types.Ta low, orch, high, standard := taskTags.Compute() tagInfos = append(tagInfos, &types.TagInfo{ Source: taskSource, - Entity: GlobalEntityID, + EntityID: common.GetGlobalEntityID(), HighCardTags: high, OrchestratorCardTags: orch, LowCardTags: low, @@ -511,7 +508,7 @@ func (c *WorkloadMetaCollector) handleGardenContainer(container *workloadmeta.Co return []*types.TagInfo{ { Source: containerSource, - Entity: buildTaggerEntityID(container.EntityID), + EntityID: common.BuildTaggerEntityID(container.EntityID), HighCardTags: container.CollectorTags, }, } @@ -551,7 +548,7 @@ func (c *WorkloadMetaCollector) handleKubeDeployment(ev workloadmeta.Event) []*t tagInfos := []*types.TagInfo{ { Source: deploymentSource, - Entity: buildTaggerEntityID(deployment.EntityID), + EntityID: common.BuildTaggerEntityID(deployment.EntityID), HighCardTags: high, OrchestratorCardTags: orch, LowCardTags: low, @@ -593,7 +590,7 @@ func (c *WorkloadMetaCollector) handleKubeMetadata(ev workloadmeta.Event) []*typ tagInfos := []*types.TagInfo{ { Source: kubeMetadataSource, - Entity: buildTaggerEntityID(kubeMetadata.EntityID), + EntityID: common.BuildTaggerEntityID(kubeMetadata.EntityID), HighCardTags: high, OrchestratorCardTags: orch, LowCardTags: low, @@ -717,7 +714,7 @@ func (c *WorkloadMetaCollector) extractTagsFromPodContainer(pod *workloadmeta.Ku // podSource here is not a mistake. the source is // always from the parent resource. Source: podSource, - Entity: buildTaggerEntityID(container.EntityID), + EntityID: common.BuildTaggerEntityID(container.EntityID), HighCardTags: high, OrchestratorCardTags: orch, LowCardTags: low, @@ -726,12 +723,12 @@ func (c *WorkloadMetaCollector) extractTagsFromPodContainer(pod *workloadmeta.Ku } func (c *WorkloadMetaCollector) registerChild(parent, child workloadmeta.EntityID) { - parentTaggerEntityID := buildTaggerEntityID(parent) - childTaggerEntityID := buildTaggerEntityID(child) + parentTaggerEntityID := common.BuildTaggerEntityID(parent) + childTaggerEntityID := common.BuildTaggerEntityID(child) m, ok := c.children[parentTaggerEntityID] if !ok { - c.children[parentTaggerEntityID] = make(map[string]struct{}) + c.children[parentTaggerEntityID] = make(map[types.EntityID]struct{}) m = c.children[parentTaggerEntityID] } @@ -740,7 +737,7 @@ func (c *WorkloadMetaCollector) registerChild(parent, child workloadmeta.EntityI func (c *WorkloadMetaCollector) handleDelete(ev workloadmeta.Event) []*types.TagInfo { entityID := ev.Entity.GetID() - taggerEntityID := buildTaggerEntityID(entityID) + taggerEntityID := common.BuildTaggerEntityID(entityID) children := c.children[taggerEntityID] @@ -748,7 +745,7 @@ func (c *WorkloadMetaCollector) handleDelete(ev workloadmeta.Event) []*types.Tag tagInfos := make([]*types.TagInfo, 0, len(children)+1) tagInfos = append(tagInfos, &types.TagInfo{ Source: source, - Entity: taggerEntityID, + EntityID: taggerEntityID, DeleteEntity: true, }) tagInfos = append(tagInfos, c.handleDeleteChildren(source, children)...) @@ -758,13 +755,13 @@ func (c *WorkloadMetaCollector) handleDelete(ev workloadmeta.Event) []*types.Tag return tagInfos } -func (c *WorkloadMetaCollector) handleDeleteChildren(source string, children map[string]struct{}) []*types.TagInfo { +func (c *WorkloadMetaCollector) handleDeleteChildren(source string, children map[types.EntityID]struct{}) []*types.TagInfo { tagInfos := make([]*types.TagInfo, 0, len(children)) for childEntityID := range children { t := types.TagInfo{ Source: source, - Entity: childEntityID, + EntityID: childEntityID, DeleteEntity: true, } tagInfos = append(tagInfos, &t) @@ -818,31 +815,6 @@ func (c *WorkloadMetaCollector) addOpenTelemetryStandardTags(container *workload c.extractFromMapWithFn(container.EnvVars, otelStandardEnvKeys, tags.AddStandard) } -func buildTaggerEntityID(entityID workloadmeta.EntityID) string { - switch entityID.Kind { - case workloadmeta.KindContainer: - return common.ContainerID.ToUID(entityID.ID) - case workloadmeta.KindKubernetesPod: - return common.KubernetesPodUID.ToUID(entityID.ID) - case workloadmeta.KindECSTask: - return common.ECSTask.ToUID(entityID.ID) - case workloadmeta.KindContainerImageMetadata: - return common.ContainerImageMetadata.ToUID(entityID.ID) - case workloadmeta.KindProcess: - return common.Process.ToUID(entityID.ID) - case workloadmeta.KindKubernetesDeployment: - return common.KubernetesDeployment.ToUID(entityID.ID) - case workloadmeta.KindHost: - return common.Host.ToUID(entityID.ID) - case workloadmeta.KindKubernetesMetadata: - return common.KubernetesMetadata.ToUID(entityID.ID) - default: - log.Errorf("can't recognize entity %q with kind %q; trying %s://%s as tagger entity", - entityID.ID, entityID.Kind, entityID.ID, entityID.Kind) - return fmt.Sprintf("%s://%s", string(entityID.Kind), entityID.ID) - } -} - func buildTaggerSource(entityID workloadmeta.EntityID) string { return fmt.Sprintf("%s-%s", workloadmetaCollectorName, string(entityID.Kind)) } diff --git a/comp/core/tagger/taggerimpl/collectors/workloadmeta_main.go b/comp/core/tagger/taggerimpl/collectors/workloadmeta_main.go index 3103c0bb777a6..fe561a0ca3481 100644 --- a/comp/core/tagger/taggerimpl/collectors/workloadmeta_main.go +++ b/comp/core/tagger/taggerimpl/collectors/workloadmeta_main.go @@ -11,11 +11,12 @@ import ( "github.com/gobwas/glob" + "github.com/DataDog/datadog-agent/comp/core/config" + "github.com/DataDog/datadog-agent/comp/core/tagger/common" k8smetadata "github.com/DataDog/datadog-agent/comp/core/tagger/k8s_metadata" "github.com/DataDog/datadog-agent/comp/core/tagger/taglist" "github.com/DataDog/datadog-agent/comp/core/tagger/types" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" configutils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/status/health" "github.com/DataDog/datadog-agent/pkg/util" @@ -51,7 +52,7 @@ type processor interface { // store. type WorkloadMetaCollector struct { store workloadmeta.Component - children map[string]map[string]struct{} + children map[types.EntityID]map[types.EntityID]struct{} tagProcessor processor containerEnvAsTags map[string]string @@ -119,7 +120,7 @@ func (c *WorkloadMetaCollector) collectStaticGlobalTags(ctx context.Context) { c.tagProcessor.ProcessTagInfo([]*types.TagInfo{ { Source: staticSource, - Entity: GlobalEntityID, + EntityID: common.GetGlobalEntityID(), HighCardTags: high, OrchestratorCardTags: orch, LowCardTags: low, @@ -164,28 +165,28 @@ func (c *WorkloadMetaCollector) stream(ctx context.Context) { } // NewWorkloadMetaCollector returns a new WorkloadMetaCollector. -func NewWorkloadMetaCollector(_ context.Context, store workloadmeta.Component, p processor) *WorkloadMetaCollector { +func NewWorkloadMetaCollector(_ context.Context, cfg config.Component, store workloadmeta.Component, p processor) *WorkloadMetaCollector { c := &WorkloadMetaCollector{ tagProcessor: p, store: store, - children: make(map[string]map[string]struct{}), - collectEC2ResourceTags: config.Datadog().GetBool("ecs_collect_resource_tags_ec2"), - collectPersistentVolumeClaimsTags: config.Datadog().GetBool("kubernetes_persistent_volume_claims_as_tags"), + children: make(map[types.EntityID]map[types.EntityID]struct{}), + collectEC2ResourceTags: cfg.GetBool("ecs_collect_resource_tags_ec2"), + collectPersistentVolumeClaimsTags: cfg.GetBool("kubernetes_persistent_volume_claims_as_tags"), } containerLabelsAsTags := mergeMaps( - retrieveMappingFromConfig("docker_labels_as_tags"), - retrieveMappingFromConfig("container_labels_as_tags"), + retrieveMappingFromConfig(cfg, "docker_labels_as_tags"), + retrieveMappingFromConfig(cfg, "container_labels_as_tags"), ) // Adding new environment variables require adding them to pkg/util/containers/env_vars_filter.go containerEnvAsTags := mergeMaps( - retrieveMappingFromConfig("docker_env_as_tags"), - retrieveMappingFromConfig("container_env_as_tags"), + retrieveMappingFromConfig(cfg, "docker_env_as_tags"), + retrieveMappingFromConfig(cfg, "container_env_as_tags"), ) c.initContainerMetaAsTags(containerLabelsAsTags, containerEnvAsTags) // kubernetes resources metadata as tags - metadataAsTags := configutils.GetMetadataAsTags(config.Datadog()) + metadataAsTags := configutils.GetMetadataAsTags(cfg) c.initK8sResourcesMetaAsTags(metadataAsTags.GetResourcesLabelsAsTags(), metadataAsTags.GetResourcesAnnotationsAsTags()) return c @@ -193,13 +194,12 @@ func NewWorkloadMetaCollector(_ context.Context, store workloadmeta.Component, p // retrieveMappingFromConfig gets a stringmapstring config key and // lowercases all map keys to make envvar and yaml sources consistent -func retrieveMappingFromConfig(configKey string) map[string]string { - labelsList := config.Datadog().GetStringMapString(configKey) +func retrieveMappingFromConfig(cfg config.Component, configKey string) map[string]string { + labelsList := cfg.GetStringMapString(configKey) for label, value := range labelsList { delete(labelsList, label) labelsList[strings.ToLower(label)] = value } - return labelsList } diff --git a/comp/core/tagger/taggerimpl/collectors/workloadmeta_test.go b/comp/core/tagger/taggerimpl/collectors/workloadmeta_test.go index 76a8f164dd136..63c2a66adc4af 100644 --- a/comp/core/tagger/taggerimpl/collectors/workloadmeta_test.go +++ b/comp/core/tagger/taggerimpl/collectors/workloadmeta_test.go @@ -8,10 +8,10 @@ package collectors import ( "context" "fmt" + "reflect" "sort" "testing" - "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" "go.uber.org/fx" "k8s.io/apimachinery/pkg/runtime/schema" @@ -19,12 +19,14 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" logmock "github.com/DataDog/datadog-agent/comp/core/log/mock" + "github.com/DataDog/datadog-agent/comp/core/tagger/common" "github.com/DataDog/datadog-agent/comp/core/tagger/taglist" "github.com/DataDog/datadog-agent/comp/core/tagger/types" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/util" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" workloadmetamock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/mock" + configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" ) @@ -54,9 +56,9 @@ func TestHandleKubePod(t *testing.T) { ID: "foobar", } - podTaggerEntityID := fmt.Sprintf("kubernetes_pod_uid://%s", podEntityID.ID) - fullyFleshedContainerTaggerEntityID := fmt.Sprintf("container_id://%s", fullyFleshedContainerID) - noEnvContainerTaggerEntityID := fmt.Sprintf("container_id://%s", noEnvContainerID) + podTaggerEntityID := types.NewEntityID(types.KubernetesPodUID, podEntityID.ID) + fullyFleshedContainerTaggerEntityID := types.NewEntityID(types.ContainerID, fullyFleshedContainerID) + noEnvContainerTaggerEntityID := types.NewEntityID(types.ContainerID, noEnvContainerID) image := workloadmeta.ContainerImage{ ID: "datadog/agent@sha256:a63d3f66fb2f69d955d4f2ca0b229385537a77872ffc04290acae65aed5317d2", @@ -222,8 +224,8 @@ func TestHandleKubePod(t *testing.T) { }, expected: []*types.TagInfo{ { - Source: podSource, - Entity: podTaggerEntityID, + Source: podSource, + EntityID: podTaggerEntityID, HighCardTags: []string{ "gitcommit:foobar", }, @@ -288,7 +290,7 @@ func TestHandleKubePod(t *testing.T) { expected: []*types.TagInfo{ { Source: podSource, - Entity: podTaggerEntityID, + EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ fmt.Sprintf("pod_name:%s", podName), @@ -301,8 +303,8 @@ func TestHandleKubePod(t *testing.T) { StandardTags: []string{}, }, { - Source: podSource, - Entity: noEnvContainerTaggerEntityID, + Source: podSource, + EntityID: noEnvContainerTaggerEntityID, HighCardTags: []string{ fmt.Sprintf("container_id:%s", noEnvContainerID), fmt.Sprintf("display_container_name:%s_%s", runtimeContainerName, podName), @@ -343,7 +345,7 @@ func TestHandleKubePod(t *testing.T) { expected: []*types.TagInfo{ { Source: podSource, - Entity: podTaggerEntityID, + EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ fmt.Sprintf("pod_name:%s", podName), @@ -354,8 +356,8 @@ func TestHandleKubePod(t *testing.T) { StandardTags: []string{}, }, { - Source: podSource, - Entity: fullyFleshedContainerTaggerEntityID, + Source: podSource, + EntityID: fullyFleshedContainerTaggerEntityID, HighCardTags: []string{ fmt.Sprintf("container_id:%s", fullyFleshedContainerID), fmt.Sprintf("display_container_name:%s_%s", runtimeContainerName, podName), @@ -394,7 +396,7 @@ func TestHandleKubePod(t *testing.T) { expected: []*types.TagInfo{ { Source: podSource, - Entity: podTaggerEntityID, + EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ fmt.Sprintf("pod_name:%s", podName), @@ -405,8 +407,8 @@ func TestHandleKubePod(t *testing.T) { StandardTags: []string{}, }, { - Source: podSource, - Entity: fmt.Sprintf("container_id://%s", otelEnvContainerID), + Source: podSource, + EntityID: types.NewEntityID(types.ContainerID, otelEnvContainerID), HighCardTags: []string{ fmt.Sprintf("container_id:%s", otelEnvContainerID), fmt.Sprintf("display_container_name:%s_%s", runtimeContainerName, podName), @@ -449,7 +451,7 @@ func TestHandleKubePod(t *testing.T) { expected: []*types.TagInfo{ { Source: podSource, - Entity: podTaggerEntityID, + EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ fmt.Sprintf("pod_name:%s", podName), @@ -460,8 +462,8 @@ func TestHandleKubePod(t *testing.T) { StandardTags: []string{}, }, { - Source: podSource, - Entity: noEnvContainerTaggerEntityID, + Source: podSource, + EntityID: noEnvContainerTaggerEntityID, HighCardTags: []string{ fmt.Sprintf("container_id:%s", noEnvContainerID), fmt.Sprintf("display_container_name:%s_%s", runtimeContainerName, podName), @@ -494,7 +496,7 @@ func TestHandleKubePod(t *testing.T) { expected: []*types.TagInfo{ { Source: podSource, - Entity: podTaggerEntityID, + EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ fmt.Sprintf("pod_name:%s", podName), @@ -524,7 +526,7 @@ func TestHandleKubePod(t *testing.T) { expected: []*types.TagInfo{ { Source: podSource, - Entity: podTaggerEntityID, + EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ fmt.Sprintf("pod_name:%s", podName), @@ -556,7 +558,7 @@ func TestHandleKubePod(t *testing.T) { expected: []*types.TagInfo{ { Source: podSource, - Entity: podTaggerEntityID, + EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ fmt.Sprintf("pod_name:%s", podName), @@ -589,7 +591,7 @@ func TestHandleKubePod(t *testing.T) { expected: []*types.TagInfo{ { Source: podSource, - Entity: podTaggerEntityID, + EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ fmt.Sprintf("pod_name:%s", podName), @@ -625,7 +627,7 @@ func TestHandleKubePod(t *testing.T) { expected: []*types.TagInfo{ { Source: podSource, - Entity: podTaggerEntityID, + EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ fmt.Sprintf("pod_name:%s", podName), @@ -659,7 +661,7 @@ func TestHandleKubePod(t *testing.T) { expected: []*types.TagInfo{ { Source: podSource, - Entity: podTaggerEntityID, + EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ fmt.Sprintf("pod_name:%s", podName), @@ -696,7 +698,7 @@ func TestHandleKubePod(t *testing.T) { expected: []*types.TagInfo{ { Source: podSource, - Entity: podTaggerEntityID, + EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ fmt.Sprintf("pod_name:%s", podName), @@ -730,7 +732,7 @@ func TestHandleKubePod(t *testing.T) { expected: []*types.TagInfo{ { Source: podSource, - Entity: podTaggerEntityID, + EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ fmt.Sprintf("pod_name:%s", podName), @@ -768,7 +770,7 @@ func TestHandleKubePod(t *testing.T) { expected: []*types.TagInfo{ { Source: podSource, - Entity: podTaggerEntityID, + EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ fmt.Sprintf("pod_name:%s", podName), @@ -799,7 +801,7 @@ func TestHandleKubePod(t *testing.T) { expected: []*types.TagInfo{ { Source: podSource, - Entity: podTaggerEntityID, + EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ fmt.Sprintf("pod_name:%s", podName), @@ -829,7 +831,7 @@ func TestHandleKubePod(t *testing.T) { expected: []*types.TagInfo{ { Source: podSource, - Entity: podTaggerEntityID, + EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ fmt.Sprintf("pod_name:%s", podName), @@ -845,7 +847,8 @@ func TestHandleKubePod(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - collector := NewWorkloadMetaCollector(context.Background(), store, nil) + cfg := configmock.New(t) + collector := NewWorkloadMetaCollector(context.Background(), cfg, store, nil) collector.staticTags = tt.staticTags collector.initK8sResourcesMetaAsTags(tt.k8sResourcesLabelsAsTags, tt.k8sResourcesAnnotationsAsTags) @@ -873,7 +876,7 @@ func TestHandleKubePodWithoutPvcAsTags(t *testing.T) { ID: "foobar", } - podTaggerEntityID := fmt.Sprintf("kubernetes_pod_uid://%s", podEntityID.ID) + podTaggerEntityID := types.NewEntityID(types.KubernetesPodUID, podEntityID.ID) image := workloadmeta.ContainerImage{ ID: "datadog/agent@sha256:a63d3f66fb2f69d955d4f2ca0b229385537a77872ffc04290acae65aed5317d2", @@ -937,7 +940,7 @@ func TestHandleKubePodWithoutPvcAsTags(t *testing.T) { expected: []*types.TagInfo{ { Source: podSource, - Entity: podTaggerEntityID, + EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ fmt.Sprintf("pod_name:%s", podName), @@ -949,8 +952,8 @@ func TestHandleKubePodWithoutPvcAsTags(t *testing.T) { StandardTags: []string{}, }, { - Source: podSource, - Entity: fmt.Sprintf("container_id://%s", noEnvContainerID), + Source: podSource, + EntityID: types.NewEntityID(types.ContainerID, noEnvContainerID), HighCardTags: []string{ fmt.Sprintf("container_id:%s", noEnvContainerID), fmt.Sprintf("display_container_name:%s_%s", runtimeContainerName, podName), @@ -975,7 +978,9 @@ func TestHandleKubePodWithoutPvcAsTags(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - collector := NewWorkloadMetaCollector(context.Background(), store, nil) + cfg := configmock.New(t) + cfg.SetWithoutSource("kubernetes_persistent_volume_claims_as_tags", false) + collector := NewWorkloadMetaCollector(context.Background(), cfg, store, nil) collector.staticTags = tt.staticTags actual := collector.handleKubePod(workloadmeta.Event{ @@ -1012,8 +1017,8 @@ func TestHandleKubePodNoContainerName(t *testing.T) { ID: "foobar", } - podTaggerEntityID := fmt.Sprintf("kubernetes_pod_uid://%s", podEntityID.ID) - fullyFleshedContainerTaggerEntityID := fmt.Sprintf("container_id://%s", fullyFleshedContainerID) + podTaggerEntityID := types.NewEntityID(types.KubernetesPodUID, podEntityID.ID) + fullyFleshedContainerTaggerEntityID := types.NewEntityID(types.ContainerID, fullyFleshedContainerID) image := workloadmeta.ContainerImage{ ID: "datadog/agent@sha256:a63d3f66fb2f69d955d4f2ca0b229385537a77872ffc04290acae65aed5317d2", @@ -1084,7 +1089,7 @@ func TestHandleKubePodNoContainerName(t *testing.T) { expected: []*types.TagInfo{ { Source: podSource, - Entity: podTaggerEntityID, + EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ fmt.Sprintf("pod_name:%s", podName), @@ -1095,8 +1100,8 @@ func TestHandleKubePodNoContainerName(t *testing.T) { StandardTags: []string{}, }, { - Source: podSource, - Entity: fullyFleshedContainerTaggerEntityID, + Source: podSource, + EntityID: fullyFleshedContainerTaggerEntityID, HighCardTags: []string{ fmt.Sprintf("container_id:%s", fullyFleshedContainerID), fmt.Sprintf("display_container_name:%s_%s", containerName, podName), @@ -1120,7 +1125,8 @@ func TestHandleKubePodNoContainerName(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - collector := NewWorkloadMetaCollector(context.Background(), store, nil) + cfg := configmock.New(t) + collector := NewWorkloadMetaCollector(context.Background(), cfg, store, nil) collector.staticTags = tt.staticTags actual := collector.handleKubePod(workloadmeta.Event{ @@ -1203,7 +1209,8 @@ func TestHandleKubeMetadata(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - collector := NewWorkloadMetaCollector(context.Background(), store, nil) + cfg := configmock.New(t) + collector := NewWorkloadMetaCollector(context.Background(), cfg, store, nil) collector.initK8sResourcesMetaAsTags(test.k8sResourcesLabelsAsTags, test.k8sResourcesAnnotationsAsTags) @@ -1227,7 +1234,7 @@ func TestHandleKubeDeployment(t *testing.T) { ID: kubeMetadataID, } - taggerEntityID := fmt.Sprintf("kubernetes_metadata://%s", kubeMetadataEntityID.ID) + taggerEntityID := types.NewEntityID(types.KubernetesMetadata, kubeMetadataEntityID.ID) store := fxutil.Test[workloadmetamock.Mock](t, fx.Options( fx.Provide(func() log.Component { return logmock.New(t) }), @@ -1326,7 +1333,7 @@ func TestHandleKubeDeployment(t *testing.T) { expected: []*types.TagInfo{ { Source: kubeMetadataSource, - Entity: taggerEntityID, + EntityID: taggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{}, LowCardTags: []string{ @@ -1343,7 +1350,8 @@ func TestHandleKubeDeployment(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - collector := NewWorkloadMetaCollector(context.Background(), store, nil) + cfg := configmock.New(t) + collector := NewWorkloadMetaCollector(context.Background(), cfg, store, nil) collector.initK8sResourcesMetaAsTags(test.k8sResourcesLabelsAsTags, test.k8sResourcesAnnotationsAsTags) @@ -1368,7 +1376,7 @@ func TestHandleECSTask(t *testing.T) { ID: "foobar", } - taggerEntityID := fmt.Sprintf("container_id://%s", containerID) + taggerEntityID := types.NewEntityID(types.ContainerID, containerID) store := fxutil.Test[workloadmetamock.Mock](t, fx.Options( fx.Provide(func() log.Component { return logmock.New(t) }), @@ -1422,7 +1430,7 @@ func TestHandleECSTask(t *testing.T) { expected: []*types.TagInfo{ { Source: taskSource, - Entity: taggerEntityID, + EntityID: taggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ "task_arn:foobar", @@ -1464,7 +1472,7 @@ func TestHandleECSTask(t *testing.T) { expected: []*types.TagInfo{ { Source: taskSource, - Entity: taggerEntityID, + EntityID: taggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ "task_arn:foobar", @@ -1484,7 +1492,7 @@ func TestHandleECSTask(t *testing.T) { }, { Source: taskSource, - Entity: GlobalEntityID, + EntityID: common.GetGlobalEntityID(), HighCardTags: []string{}, OrchestratorCardTags: []string{ "task_arn:foobar", @@ -1507,7 +1515,9 @@ func TestHandleECSTask(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - collector := NewWorkloadMetaCollector(context.Background(), store, nil) + cfg := configmock.New(t) + cfg.SetWithoutSource("ecs_collect_resource_tags_ec2", true) + collector := NewWorkloadMetaCollector(context.Background(), cfg, store, nil) actual := collector.handleECSTask(workloadmeta.Event{ Type: workloadmeta.EventTypeSet, @@ -1541,7 +1551,7 @@ func TestHandleContainer(t *testing.T) { ID: "foobar", } - taggerEntityID := fmt.Sprintf("container_id://%s", entityID.ID) + taggerEntityID := types.NewEntityID(types.ContainerID, entityID.ID) tests := []struct { name string @@ -1574,8 +1584,8 @@ func TestHandleContainer(t *testing.T) { }, expected: []*types.TagInfo{ { - Source: containerSource, - Entity: taggerEntityID, + Source: containerSource, + EntityID: taggerEntityID, HighCardTags: []string{ fmt.Sprintf("container_name:%s", containerName), fmt.Sprintf("container_id:%s", entityID.ID), @@ -1619,8 +1629,8 @@ func TestHandleContainer(t *testing.T) { }, expected: []*types.TagInfo{ { - Source: containerSource, - Entity: taggerEntityID, + Source: containerSource, + EntityID: taggerEntityID, HighCardTags: []string{ fmt.Sprintf("container_name:%s", containerName), fmt.Sprintf("container_id:%s", entityID.ID), @@ -1657,8 +1667,8 @@ func TestHandleContainer(t *testing.T) { }, expected: []*types.TagInfo{ { - Source: containerSource, - Entity: taggerEntityID, + Source: containerSource, + EntityID: taggerEntityID, HighCardTags: []string{ fmt.Sprintf("container_name:%s", containerName), fmt.Sprintf("container_id:%s", entityID.ID), @@ -1693,8 +1703,8 @@ func TestHandleContainer(t *testing.T) { }, expected: []*types.TagInfo{ { - Source: containerSource, - Entity: taggerEntityID, + Source: containerSource, + EntityID: taggerEntityID, HighCardTags: []string{ fmt.Sprintf("container_name:%s", containerName), fmt.Sprintf("container_id:%s", entityID.ID), @@ -1728,8 +1738,8 @@ func TestHandleContainer(t *testing.T) { }, expected: []*types.TagInfo{ { - Source: containerSource, - Entity: taggerEntityID, + Source: containerSource, + EntityID: taggerEntityID, HighCardTags: []string{ fmt.Sprintf("container_name:%s", containerName), fmt.Sprintf("container_id:%s", entityID.ID), @@ -1763,8 +1773,8 @@ func TestHandleContainer(t *testing.T) { }, expected: []*types.TagInfo{ { - Source: containerSource, - Entity: taggerEntityID, + Source: containerSource, + EntityID: taggerEntityID, HighCardTags: []string{ fmt.Sprintf("container_name:%s", containerName), fmt.Sprintf("container_id:%s", entityID.ID), @@ -1800,8 +1810,8 @@ func TestHandleContainer(t *testing.T) { }, expected: []*types.TagInfo{ { - Source: containerSource, - Entity: taggerEntityID, + Source: containerSource, + EntityID: taggerEntityID, HighCardTags: []string{ fmt.Sprintf("container_name:%s", containerName), fmt.Sprintf("container_id:%s", entityID.ID), @@ -1834,8 +1844,8 @@ func TestHandleContainer(t *testing.T) { }, expected: []*types.TagInfo{ { - Source: containerSource, - Entity: taggerEntityID, + Source: containerSource, + EntityID: taggerEntityID, HighCardTags: []string{ fmt.Sprintf("container_name:%s", containerName), fmt.Sprintf("container_id:%s", entityID.ID), @@ -1867,8 +1877,8 @@ func TestHandleContainer(t *testing.T) { }, expected: []*types.TagInfo{ { - Source: containerSource, - Entity: taggerEntityID, + Source: containerSource, + EntityID: taggerEntityID, HighCardTags: []string{ fmt.Sprintf("container_name:%s", containerName), fmt.Sprintf("container_id:%s", entityID.ID), @@ -1901,8 +1911,8 @@ func TestHandleContainer(t *testing.T) { }, expected: []*types.TagInfo{ { - Source: containerSource, - Entity: taggerEntityID, + Source: containerSource, + EntityID: taggerEntityID, HighCardTags: []string{ fmt.Sprintf("container_name:%s", containerName), fmt.Sprintf("container_id:%s", entityID.ID), @@ -1945,8 +1955,8 @@ func TestHandleContainer(t *testing.T) { }, expected: []*types.TagInfo{ { - Source: containerSource, - Entity: taggerEntityID, + Source: containerSource, + EntityID: taggerEntityID, HighCardTags: []string{ fmt.Sprintf("container_name:%s", containerName), fmt.Sprintf("container_id:%s", entityID.ID), @@ -1980,8 +1990,8 @@ func TestHandleContainer(t *testing.T) { }, expected: []*types.TagInfo{ { - Source: containerSource, - Entity: taggerEntityID, + Source: containerSource, + EntityID: taggerEntityID, HighCardTags: []string{ fmt.Sprintf("container_name:%s", containerName), fmt.Sprintf("container_id:%s", entityID.ID), @@ -2009,8 +2019,8 @@ func TestHandleContainer(t *testing.T) { }, expected: []*types.TagInfo{ { - Source: containerSource, - Entity: taggerEntityID, + Source: containerSource, + EntityID: taggerEntityID, HighCardTags: []string{ fmt.Sprintf("container_name:%s", containerName), fmt.Sprintf("container_id:%s", entityID.ID), @@ -2037,8 +2047,8 @@ func TestHandleContainer(t *testing.T) { }, expected: []*types.TagInfo{ { - Source: containerSource, - Entity: taggerEntityID, + Source: containerSource, + EntityID: taggerEntityID, HighCardTags: []string{ fmt.Sprintf("container_name:%s", containerName), fmt.Sprintf("container_id:%s", entityID.ID), @@ -2055,7 +2065,8 @@ func TestHandleContainer(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - collector := NewWorkloadMetaCollector(context.Background(), nil, nil) + cfg := configmock.New(t) + collector := NewWorkloadMetaCollector(context.Background(), cfg, nil, nil) collector.staticTags = tt.staticTags collector.initContainerMetaAsTags(tt.labelsAsTags, tt.envAsTags) @@ -2076,7 +2087,7 @@ func TestHandleContainerImage(t *testing.T) { ID: "sha256:651c55002cd5deb06bde7258f6ec6e0ff7f4f17a648ce6e2ec01917da9ae5104", } - taggerEntityID := fmt.Sprintf("container_image_metadata://%s", entityID.ID) + taggerEntityID := types.NewEntityID(types.ContainerImageMetadata, entityID.ID) tests := []struct { name string @@ -2114,7 +2125,7 @@ func TestHandleContainerImage(t *testing.T) { expected: []*types.TagInfo{ { Source: containerImageSource, - Entity: taggerEntityID, + EntityID: taggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{}, LowCardTags: []string{ @@ -2141,7 +2152,8 @@ func TestHandleContainerImage(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - collector := NewWorkloadMetaCollector(context.Background(), nil, nil) + cfg := configmock.New(t) + collector := NewWorkloadMetaCollector(context.Background(), cfg, nil, nil) actual := collector.handleContainerImage(workloadmeta.Event{ Type: workloadmeta.EventTypeSet, @@ -2179,8 +2191,8 @@ func TestHandleDelete(t *testing.T) { }, } - podTaggerEntityID := fmt.Sprintf("kubernetes_pod_uid://%s", podEntityID.ID) - containerTaggerEntityID := fmt.Sprintf("container_id://%s", containerID) + podTaggerEntityID := types.NewEntityID(types.KubernetesPodUID, podEntityID.ID) + containerTaggerEntityID := types.NewEntityID(types.ContainerID, containerID) store := fxutil.Test[workloadmetamock.Mock](t, fx.Options( fx.Provide(func() log.Component { return logmock.New(t) }), @@ -2198,7 +2210,8 @@ func TestHandleDelete(t *testing.T) { }, }) - collector := NewWorkloadMetaCollector(context.Background(), store, nil) + cfg := configmock.New(t) + collector := NewWorkloadMetaCollector(context.Background(), cfg, store, nil) collector.handleKubePod(workloadmeta.Event{ Type: workloadmeta.EventTypeSet, @@ -2208,12 +2221,12 @@ func TestHandleDelete(t *testing.T) { expected := []*types.TagInfo{ { Source: podSource, - Entity: podTaggerEntityID, + EntityID: podTaggerEntityID, DeleteEntity: true, }, { Source: podSource, - Entity: containerTaggerEntityID, + EntityID: containerTaggerEntityID, DeleteEntity: true, }, } @@ -2240,7 +2253,7 @@ func TestHandlePodWithDeletedContainer(t *testing.T) { // exists even if it belonged to a pod that still exists. containerToBeDeletedID := "delete" - containerToBeDeletedTaggerEntityID := fmt.Sprintf("container_id://%s", containerToBeDeletedID) + containerToBeDeletedTaggerEntityID := types.NewEntityID(types.ContainerID, containerToBeDeletedID) pod := &workloadmeta.KubernetesPod{ EntityID: workloadmeta.EntityID{ @@ -2253,7 +2266,7 @@ func TestHandlePodWithDeletedContainer(t *testing.T) { }, Containers: []workloadmeta.OrchestratorContainer{}, } - podTaggerEntityID := fmt.Sprintf("kubernetes_pod_uid://%s", pod.ID) + podTaggerEntityID := types.NewEntityID(types.KubernetesPodUID, pod.ID) collectorCh := make(chan []*types.TagInfo, 10) @@ -2262,8 +2275,8 @@ func TestHandlePodWithDeletedContainer(t *testing.T) { config.MockModule(), workloadmetafxmock.MockModule(workloadmeta.NewParams()), )) - collector := NewWorkloadMetaCollector(context.Background(), fakeStore, &fakeProcessor{collectorCh}) - collector.children = map[string]map[string]struct{}{ + collector := NewWorkloadMetaCollector(context.Background(), configmock.New(t), fakeStore, &fakeProcessor{collectorCh}) + collector.children = map[types.EntityID]map[types.EntityID]struct{}{ // Notice that here we set the container that belonged to the pod // but that no longer exists podTaggerEntityID: {containerToBeDeletedTaggerEntityID: struct{}{}}} @@ -2283,7 +2296,7 @@ func TestHandlePodWithDeletedContainer(t *testing.T) { expected := &types.TagInfo{ Source: podSource, - Entity: containerToBeDeletedTaggerEntityID, + EntityID: containerToBeDeletedTaggerEntityID, DeleteEntity: true, } @@ -2293,7 +2306,7 @@ func TestHandlePodWithDeletedContainer(t *testing.T) { found := false for evBundle := range collectorCh { for _, event := range evBundle { - if cmp.Equal(event, expected) { + if reflect.DeepEqual(event, expected) { found = true break } diff --git a/comp/core/tagger/taggerimpl/generic_store/composite_store.go b/comp/core/tagger/taggerimpl/generic_store/composite_store.go new file mode 100644 index 0000000000000..13215d6fea371 --- /dev/null +++ b/comp/core/tagger/taggerimpl/generic_store/composite_store.go @@ -0,0 +1,86 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package genericstore + +import "github.com/DataDog/datadog-agent/comp/core/tagger/types" + +type compositeObjectStore[T any] struct { + data map[types.EntityIDPrefix]map[string]T + size int +} + +func newCompositeObjectStore[T any]() types.ObjectStore[T] { + return &compositeObjectStore[T]{ + data: make(map[types.EntityIDPrefix]map[string]T), + size: 0, + } +} + +// Get implements ObjectStore#Get +func (os *compositeObjectStore[T]) Get(entityID types.EntityID) (object T, found bool) { + submap, found := os.data[entityID.GetPrefix()] + if !found { + return + } + + object, found = submap[entityID.GetID()] + return +} + +// Set implements ObjectStore#Set +func (os *compositeObjectStore[T]) Set(entityID types.EntityID, object T) { + prefix := entityID.GetPrefix() + id := entityID.GetID() + if submap, found := os.data[prefix]; found { + if _, exists := submap[id]; !exists { + os.size++ + } + submap[id] = object + } else { + os.data[prefix] = map[string]T{id: object} + os.size++ + } +} + +// Unset implements ObjectStore#Unset +func (os *compositeObjectStore[T]) Unset(entityID types.EntityID) { + prefix := entityID.GetPrefix() + id := entityID.GetID() + // TODO: prune + if submap, found := os.data[prefix]; found { + if _, exists := submap[id]; exists { + delete(submap, id) + os.size-- + } + } +} + +// Size implements ObjectStore#Size +func (os *compositeObjectStore[T]) Size() int { + return os.size +} + +// ListObjects implements ObjectStore#ListObjects +func (os *compositeObjectStore[T]) ListObjects() []T { + objects := make([]T, 0, os.Size()) + + for _, idToObjects := range os.data { + for _, object := range idToObjects { + objects = append(objects, object) + } + } + + return objects +} + +// ForEach implements ObjectStore#ForEach +func (os *compositeObjectStore[T]) ForEach(apply types.ApplyFunc[T]) { + for prefix, idToObjects := range os.data { + for id, object := range idToObjects { + apply(types.NewEntityID(prefix, id), object) + } + } +} diff --git a/comp/core/tagger/taggerimpl/generic_store/default_store.go b/comp/core/tagger/taggerimpl/generic_store/default_store.go new file mode 100644 index 0000000000000..d0112b8c0c8eb --- /dev/null +++ b/comp/core/tagger/taggerimpl/generic_store/default_store.go @@ -0,0 +1,53 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package genericstore + +import "github.com/DataDog/datadog-agent/comp/core/tagger/types" + +type defaulObjectStore[T any] map[types.EntityID]T + +func newDefaultObjectStore[T any]() types.ObjectStore[T] { + return make(defaulObjectStore[T]) +} + +// Get implements ObjectStore#Get +func (os defaulObjectStore[T]) Get(entityID types.EntityID) (object T, found bool) { + obj, found := os[entityID] + return obj, found +} + +// Set implements ObjectStore#Set +func (os defaulObjectStore[T]) Set(entityID types.EntityID, object T) { + os[entityID] = object +} + +// Unset implements ObjectStore#Unset +func (os defaulObjectStore[T]) Unset(entityID types.EntityID) { + delete(os, entityID) +} + +// Size implements ObjectStore#Size +func (os defaulObjectStore[T]) Size() int { + return len(os) +} + +// ListObjects implements ObjectStore#ListObjects +func (os defaulObjectStore[T]) ListObjects() []T { + objects := make([]T, 0) + + for _, object := range os { + objects = append(objects, object) + } + + return objects +} + +// ForEach implements ObjectStore#ForEach +func (os defaulObjectStore[T]) ForEach(apply types.ApplyFunc[T]) { + for id, object := range os { + apply(id, object) + } +} diff --git a/comp/core/tagger/taggerimpl/generic_store/doc.go b/comp/core/tagger/taggerimpl/generic_store/doc.go new file mode 100644 index 0000000000000..61e295604cf9b --- /dev/null +++ b/comp/core/tagger/taggerimpl/generic_store/doc.go @@ -0,0 +1,15 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package genericstore defines a generic object store that satisfies a redundant use-case in the tagger component implementation. +// The implementation of the tagger component requires storing objects indexed by keys. +// Keys are in the form of `{prefix}://{id}`. +// +// The package provides a generic interface ObjectStore which can store objects of a given type and index by tagger EntityID (i.e. a prefix + an id). +// It also provides 2 implementations of this interface: +// - defaultObjectStore: implements the object store as a plain from entity id to entity object. It is intended to be used when EntityID is stored as a string. +// - compositeObjectStore: implements the object store as a 2-layered map. The first map is indexed by prefix, and the second map is indexed by id. It is intended to be used when EntityID is stored +// as a struct separating prefix and id into 2 fields. This implementation is optimised for quick lookups, listing and filtering by prefix. +package genericstore diff --git a/comp/core/tagger/taggerimpl/generic_store/store.go b/comp/core/tagger/taggerimpl/generic_store/store.go new file mode 100644 index 0000000000000..66279a627bf47 --- /dev/null +++ b/comp/core/tagger/taggerimpl/generic_store/store.go @@ -0,0 +1,20 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package genericstore + +import ( + "github.com/DataDog/datadog-agent/comp/core/config" + "github.com/DataDog/datadog-agent/comp/core/tagger/types" +) + +// NewObjectStore constructs and returns a an ObjectStore +func NewObjectStore[T any](cfg config.Component) types.ObjectStore[T] { + // TODO: use composite object store always or use component framework for config component + if cfg.GetBool("tagger.tagstore_use_composite_entity_id") { + return newCompositeObjectStore[T]() + } + return newDefaultObjectStore[T]() +} diff --git a/comp/core/tagger/taggerimpl/generic_store/store_bench_test.go b/comp/core/tagger/taggerimpl/generic_store/store_bench_test.go new file mode 100644 index 0000000000000..13715c69de459 --- /dev/null +++ b/comp/core/tagger/taggerimpl/generic_store/store_bench_test.go @@ -0,0 +1,209 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package genericstore + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/DataDog/datadog-agent/comp/core/tagger/types" + configmock "github.com/DataDog/datadog-agent/pkg/config/mock" +) + +const samples int = 1000000 + +var weightedPrefixes = map[string]int{ + "container_image_metadata": 60, + "container_id": 60, + "ecs_task": 5, + "host": 5, + "deployment": 15, + "kubernetes_metadata": 30, + "kubernetes_pod_uid": 30, + "process": 30, +} + +// getWeightedPrefix selects a prefix based on the provided weights. +func getNextPrefix() types.EntityIDPrefix { + totalWeight := 0 + for _, weight := range weightedPrefixes { + totalWeight += weight + } + + randomWeight := rand.Intn(totalWeight) + + // Iterate through the prefixes and select one based on the random weight + cumulativeWeight := 0 + for prefix, weight := range weightedPrefixes { + cumulativeWeight += weight + if randomWeight < cumulativeWeight { + return types.EntityIDPrefix(prefix) + } + } + + return "" // This line should never be reached if the weights are set up correctly +} + +func initStore(store types.ObjectStore[int]) { + for i := range samples { + entityID := types.NewEntityID(getNextPrefix(), fmt.Sprintf("%d", i)) + store.Set(entityID, i) + } +} + +// Mock ApplyFunc for testing purposes +func mockApplyFunc[T any](_ types.EntityID, _ T) {} + +func BenchmarkDefaultObjectStore_Set(b *testing.B) { + cfg := configmock.New(b) + cfg.SetWithoutSource("tagger.tagstore_use_composite_entity_id", false) + store := NewObjectStore[int](cfg) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + initStore(store) + } +} + +func BenchmarkCompositeObjectStore_Set(b *testing.B) { + cfg := configmock.New(b) + cfg.SetWithoutSource("tagger.tagstore_use_composite_entity_id", true) + store := NewObjectStore[int](cfg) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + initStore(store) + } +} + +func BenchmarkDefaultObjectStore_Get(b *testing.B) { + cfg := configmock.New(b) + cfg.SetWithoutSource("tagger.tagstore_use_composite_entity_id", false) + store := NewObjectStore[int](cfg) + initStore(store) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + entityID := types.NewEntityID(getNextPrefix(), fmt.Sprintf("%d", i)) + _, _ = store.Get(entityID) + } +} + +func BenchmarkCompositeObjectStore_Get(b *testing.B) { + cfg := configmock.New(b) + cfg.SetWithoutSource("tagger.tagstore_use_composite_entity_id", true) + store := NewObjectStore[int](cfg) + initStore(store) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + entityID := types.NewEntityID(getNextPrefix(), fmt.Sprintf("%d", i)) + _, _ = store.Get(entityID) + } +} + +func BenchmarkDefaultObjectStore_Unset(b *testing.B) { + cfg := configmock.New(b) + cfg.SetWithoutSource("tagger.tagstore_use_composite_entity_id", false) + store := NewObjectStore[int](cfg) + initStore(store) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + entityID := types.NewEntityID(getNextPrefix(), fmt.Sprintf("%d", i)) + store.Unset(entityID) + store.Set(entityID, i) // reset the state for the next iteration + } +} + +func BenchmarkCompositeObjectStore_Unset(b *testing.B) { + cfg := configmock.New(b) + cfg.SetWithoutSource("tagger.tagstore_use_composite_entity_id", true) + store := NewObjectStore[int](cfg) + initStore(store) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + entityID := types.NewEntityID(getNextPrefix(), fmt.Sprintf("%d", i)) + store.Unset(entityID) + store.Set(entityID, i) // reset the state for the next iteration + } +} + +func BenchmarkDefaultObjectStore_Size(b *testing.B) { + cfg := configmock.New(b) + cfg.SetWithoutSource("tagger.tagstore_use_composite_entity_id", false) + store := NewObjectStore[int](cfg) + initStore(store) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = store.Size() + } +} + +func BenchmarkCompositeObjectStore_Size(b *testing.B) { + cfg := configmock.New(b) + cfg.SetWithoutSource("tagger.tagstore_use_composite_entity_id", true) + store := NewObjectStore[int](cfg) + initStore(store) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = store.Size() + } +} + +func BenchmarkDefaultObjectStore_ForEach(b *testing.B) { + cfg := configmock.New(b) + cfg.SetWithoutSource("tagger.tagstore_use_composite_entity_id", false) + store := NewObjectStore[int](cfg) + initStore(store) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + store.ForEach(mockApplyFunc[int]) + } +} + +func BenchmarkCompositeObjectStore_ForEach(b *testing.B) { + cfg := configmock.New(b) + cfg.SetWithoutSource("tagger.tagstore_use_composite_entity_id", true) + store := NewObjectStore[int](cfg) + initStore(store) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + store.ForEach(mockApplyFunc[int]) + } +} + +func BenchmarkDefaultObjectStore_ListAll(b *testing.B) { + cfg := configmock.New(b) + cfg.SetWithoutSource("tagger.tagstore_use_composite_entity_id", false) + store := NewObjectStore[int](cfg) + + initStore(store) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = store.ListObjects() + } +} + +func BenchmarkCompositeObjectStore_ListAll(b *testing.B) { + cfg := configmock.New(b) + cfg.SetWithoutSource("tagger.tagstore_use_composite_entity_id", true) + store := NewObjectStore[int](cfg) + + initStore(store) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = store.ListObjects() + } +} diff --git a/comp/core/tagger/taggerimpl/generic_store/store_test.go b/comp/core/tagger/taggerimpl/generic_store/store_test.go new file mode 100644 index 0000000000000..0a72a87fecb2a --- /dev/null +++ b/comp/core/tagger/taggerimpl/generic_store/store_test.go @@ -0,0 +1,166 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package genericstore + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/DataDog/datadog-agent/comp/core/tagger/types" + configmock "github.com/DataDog/datadog-agent/pkg/config/mock" +) + +func TestNewObjectStore(t *testing.T) { + test := func(t *testing.T, isComposite bool) { + cfg := configmock.New(t) + cfg.SetWithoutSource("tagger.tagstore_use_composite_entity_id", isComposite) + + store := NewObjectStore[any](cfg) + + // assert correct store type is initialised + if isComposite { + _, ok := store.(*compositeObjectStore[any]) + assert.Truef(t, ok, "Should have initialised a composite object store") + } else { + _, ok := store.(defaulObjectStore[any]) + assert.Truef(t, ok, "Should have initialised a default object store") + } + } + + // default store + test(t, false) + + // composite composite + test(t, true) +} + +func TestObjectStore_GetSet(t *testing.T) { + test := func(t *testing.T, isComposite bool) { + cfg := configmock.New(t) + cfg.SetWithoutSource("tagger.tagstore_use_composite_entity_id", isComposite) + + store := NewObjectStore[any](cfg) + + id := types.NewEntityID("prefix", "id") + // getting a non existent item + obj, found := store.Get(id) + assert.Nil(t, obj) + assert.Falsef(t, found, "item should not be found in store") + + // set item + store.Set(id, struct{}{}) + + // getting item + obj, found = store.Get(id) + assert.NotNil(t, obj) + assert.Truef(t, found, "item should be found in store") + + // unsetting item + store.Unset(id) + + // getting a non existent item + obj, found = store.Get(id) + assert.Nil(t, obj) + assert.Falsef(t, found, "item should not be found in store") + } + + // default store + test(t, false) + + // composite store + test(t, true) +} + +func TestObjectStore_Size(t *testing.T) { + + test := func(t *testing.T, isComposite bool) { + // initialise store + cfg := configmock.New(t) + cfg.SetWithoutSource("tagger.tagstore_use_composite_entity_id", isComposite) + store := NewObjectStore[any](cfg) + + // store should be empty + assert.Equalf(t, store.Size(), 0, "store should be empty") + + // add item to store + id := types.NewEntityID("prefix", "id") + store.Set(id, struct{}{}) + + // store size should be 1 + assert.Equalf(t, 1, store.Size(), "store should contain 1 item") + + // unset item + store.Unset(id) + + // store should be empty + assert.Equalf(t, 0, store.Size(), "store should be empty") + } + + // default store + test(t, false) + + // composite store + test(t, true) +} + +func TestObjectStore_ListObjects(t *testing.T) { + test := func(t *testing.T, isComposite bool) { + // initialise store + cfg := configmock.New(t) + cfg.SetWithoutSource("tagger.tagstore_use_composite_entity_id", isComposite) + store := NewObjectStore[any](cfg) + + // list should return empty + list := store.ListObjects() + assert.Equalf(t, len(list), 0, "ListObjects should return an empty list") + + // add some items + ids := []string{"prefix1://id1", "prefix2://id2", "prefix3://id3", "prefix4://id4"} + for _, id := range ids { + entityID, _ := types.NewEntityIDFromString(id) + store.Set(entityID, struct{}{}) + } + + // list should return empty + list = store.ListObjects() + assert.Equalf(t, len(list), len(ids), "ListObjects should return a list of size %d", len(ids)) + } + + // default store + test(t, false) + + // composite store + test(t, true) +} + +func TestObjectStore_ForEach(t *testing.T) { + test := func(t *testing.T, isComposite bool) { + // initialise store + cfg := configmock.New(t) + cfg.SetWithoutSource("tagger.tagstore_use_composite_entity_id", isComposite) + store := NewObjectStore[any](cfg) + + // add some items + ids := []string{"prefix1://id1", "prefix2://id2", "prefix3://id3", "prefix4://id4"} + for _, id := range ids { + entityID, _ := types.NewEntityIDFromString(id) + store.Set(entityID, struct{}{}) + } + + accumulator := []string{} + store.ForEach(func(id types.EntityID, _ any) { accumulator = append(accumulator, id.String()) }) + + // list should return empty + assert.ElementsMatch(t, accumulator, ids) + } + + // default store + test(t, false) + + // composite store + test(t, true) +} diff --git a/comp/core/tagger/taggerimpl/local/fake_tagger.go b/comp/core/tagger/taggerimpl/local/fake_tagger.go index 968189ed1b5aa..7882d8ee26389 100644 --- a/comp/core/tagger/taggerimpl/local/fake_tagger.go +++ b/comp/core/tagger/taggerimpl/local/fake_tagger.go @@ -10,8 +10,9 @@ import ( "strconv" "sync" + "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/tagger" - "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/collectors" + "github.com/DataDog/datadog-agent/comp/core/tagger/common" "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/empty" "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/tagstore" "github.com/DataDog/datadog-agent/comp/core/tagger/telemetry" @@ -29,10 +30,10 @@ type FakeTagger struct { } // NewFakeTagger returns a new fake Tagger -func NewFakeTagger(telemetryStore *telemetry.Store) *FakeTagger { +func NewFakeTagger(cfg config.Component, telemetryStore *telemetry.Store) *FakeTagger { return &FakeTagger{ errors: make(map[string]error), - store: tagstore.NewTagStore(telemetryStore), + store: tagstore.NewTagStore(cfg, telemetryStore), telemetryStore: telemetryStore, } } @@ -40,11 +41,12 @@ func NewFakeTagger(telemetryStore *telemetry.Store) *FakeTagger { // FakeTagger specific interface // SetTags allows to set tags in store for a given source, entity -func (f *FakeTagger) SetTags(entity, source string, low, orch, high, std []string) { +func (f *FakeTagger) SetTags(entityID string, source string, low, orch, high, std []string) { + id, _ := types.NewEntityIDFromString(entityID) f.store.ProcessTagInfo([]*types.TagInfo{ { Source: source, - Entity: entity, + EntityID: id, LowCardTags: low, OrchestratorCardTags: orch, HighCardTags: high, @@ -55,7 +57,7 @@ func (f *FakeTagger) SetTags(entity, source string, low, orch, high, std []strin // SetGlobalTags allows to set tags in store for the global entity func (f *FakeTagger) SetGlobalTags(low, orch, high, std []string) { - f.SetTags(collectors.GlobalEntityID, "static", low, orch, high, std) + f.SetTags(common.GetGlobalEntityID().String(), "static", low, orch, high, std) } // SetTagsFromInfo allows to set tags from list of TagInfo @@ -65,11 +67,11 @@ func (f *FakeTagger) SetTagsFromInfo(tags []*types.TagInfo) { // SetError allows to set an error to be returned when `Tag` or `AccumulateTagsFor` is called // for this entity and cardinality -func (f *FakeTagger) SetError(entity string, cardinality types.TagCardinality, err error) { +func (f *FakeTagger) SetError(entityID types.EntityID, cardinality types.TagCardinality, err error) { f.Lock() defer f.Unlock() - f.errors[f.getKey(entity, cardinality)] = err + f.errors[f.getKey(entityID, cardinality)] = err } // Tagger interface @@ -96,10 +98,11 @@ func (f *FakeTagger) GetTaggerTelemetryStore() *telemetry.Store { } // Tag fake implementation -func (f *FakeTagger) Tag(entity string, cardinality types.TagCardinality) ([]string, error) { - tags := f.store.Lookup(entity, cardinality) +func (f *FakeTagger) Tag(entityID string, cardinality types.TagCardinality) ([]string, error) { + id, _ := types.NewEntityIDFromString(entityID) + tags := f.store.Lookup(id, cardinality) - key := f.getKey(entity, cardinality) + key := f.getKey(id, cardinality) if err := f.errors[key]; err != nil { return nil, err } @@ -109,12 +112,12 @@ func (f *FakeTagger) Tag(entity string, cardinality types.TagCardinality) ([]str // GlobalTags fake implementation func (f *FakeTagger) GlobalTags(cardinality types.TagCardinality) ([]string, error) { - return f.Tag(collectors.GlobalEntityID, cardinality) + return f.Tag(common.GetGlobalEntityID().String(), cardinality) } // AccumulateTagsFor fake implementation -func (f *FakeTagger) AccumulateTagsFor(entity string, cardinality types.TagCardinality, tb tagset.TagsAccumulator) error { - tags, err := f.Tag(entity, cardinality) +func (f *FakeTagger) AccumulateTagsFor(entityID string, cardinality types.TagCardinality, tb tagset.TagsAccumulator) error { + tags, err := f.Tag(entityID, cardinality) if err != nil { return err } @@ -124,13 +127,21 @@ func (f *FakeTagger) AccumulateTagsFor(entity string, cardinality types.TagCardi } // Standard fake implementation -func (f *FakeTagger) Standard(entity string) ([]string, error) { - return f.store.LookupStandard(entity) +func (f *FakeTagger) Standard(entityID string) ([]string, error) { + id, err := types.NewEntityIDFromString(entityID) + if err != nil { + return nil, err + } + return f.store.LookupStandard(id) } // GetEntity returns faked entity corresponding to the specified id and an error func (f *FakeTagger) GetEntity(entityID string) (*types.Entity, error) { - return f.store.GetEntity(entityID) + id, err := types.NewEntityIDFromString(entityID) + if err != nil { + return nil, err + } + return f.store.GetEntity(id) } // List fake implementation @@ -149,6 +160,6 @@ func (f *FakeTagger) Unsubscribe(ch chan []types.EntityEvent) { } // Fake internals -func (f *FakeTagger) getKey(entity string, cardinality types.TagCardinality) string { - return entity + strconv.FormatInt(int64(cardinality), 10) +func (f *FakeTagger) getKey(entity types.EntityID, cardinality types.TagCardinality) string { + return entity.String() + strconv.FormatInt(int64(cardinality), 10) } diff --git a/comp/core/tagger/taggerimpl/local/tagger.go b/comp/core/tagger/taggerimpl/local/tagger.go index 011e2a0dbeb9f..5a0d1dbf31afd 100644 --- a/comp/core/tagger/taggerimpl/local/tagger.go +++ b/comp/core/tagger/taggerimpl/local/tagger.go @@ -11,6 +11,7 @@ import ( "fmt" "sync" + "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/tagger" "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/collectors" "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/empty" @@ -30,6 +31,7 @@ type Tagger struct { tagStore *tagstore.TagStore workloadStore workloadmeta.Component + cfg config.Component collector *collectors.WorkloadMetaCollector ctx context.Context @@ -40,11 +42,12 @@ type Tagger struct { // NewTagger returns an allocated tagger. You are probably looking for // tagger.Tag() using the global instance instead of creating your own. -func NewTagger(workloadStore workloadmeta.Component, telemetryStore *telemetry.Store) *Tagger { +func NewTagger(cfg config.Component, workloadStore workloadmeta.Component, telemetryStore *telemetry.Store) *Tagger { return &Tagger{ - tagStore: tagstore.NewTagStore(telemetryStore), + tagStore: tagstore.NewTagStore(cfg, telemetryStore), workloadStore: workloadStore, telemetryStore: telemetryStore, + cfg: cfg, } } @@ -54,6 +57,7 @@ func (t *Tagger) Start(ctx context.Context) error { t.collector = collectors.NewWorkloadMetaCollector( t.ctx, + t.cfg, t.workloadStore, t.tagStore, ) @@ -71,28 +75,30 @@ func (t *Tagger) Stop() error { } // getTags returns a read only list of tags for a given entity. -func (t *Tagger) getTags(entity string, cardinality types.TagCardinality) (tagset.HashedTags, error) { - if entity == "" { +func (t *Tagger) getTags(entityID types.EntityID, cardinality types.TagCardinality) (tagset.HashedTags, error) { + if entityID.GetID() == "" { t.telemetryStore.QueriesByCardinality(cardinality).EmptyEntityID.Inc() return tagset.HashedTags{}, fmt.Errorf("empty entity ID") } - cachedTags := t.tagStore.LookupHashed(entity, cardinality) + cachedTags := t.tagStore.LookupHashed(entityID, cardinality) t.telemetryStore.QueriesByCardinality(cardinality).Success.Inc() return cachedTags, nil } // AccumulateTagsFor appends tags for a given entity from the tagger to the TagsAccumulator -func (t *Tagger) AccumulateTagsFor(entity string, cardinality types.TagCardinality, tb tagset.TagsAccumulator) error { - tags, err := t.getTags(entity, cardinality) +func (t *Tagger) AccumulateTagsFor(entityID string, cardinality types.TagCardinality, tb tagset.TagsAccumulator) error { + id, _ := types.NewEntityIDFromString(entityID) + tags, err := t.getTags(id, cardinality) tb.AppendHashed(tags) return err } // Tag returns a copy of the tags for a given entity -func (t *Tagger) Tag(entity string, cardinality types.TagCardinality) ([]string, error) { - tags, err := t.getTags(entity, cardinality) +func (t *Tagger) Tag(entityID string, cardinality types.TagCardinality) ([]string, error) { + id, _ := types.NewEntityIDFromString(entityID) + tags, err := t.getTags(id, cardinality) if err != nil { return nil, err } @@ -101,17 +107,19 @@ func (t *Tagger) Tag(entity string, cardinality types.TagCardinality) ([]string, // Standard returns standard tags for a given entity // It triggers a tagger fetch if the no tags are found -func (t *Tagger) Standard(entity string) ([]string, error) { - if entity == "" { +func (t *Tagger) Standard(entityID string) ([]string, error) { + if entityID == "" { return nil, fmt.Errorf("empty entity ID") } - return t.tagStore.LookupStandard(entity) + id, _ := types.NewEntityIDFromString(entityID) + return t.tagStore.LookupStandard(id) } // GetEntity returns the entity corresponding to the specified id and an error func (t *Tagger) GetEntity(entityID string) (*types.Entity, error) { - return t.tagStore.GetEntity(entityID) + id, _ := types.NewEntityIDFromString(entityID) + return t.tagStore.GetEntity(id) } // List the content of the tagger diff --git a/comp/core/tagger/taggerimpl/local/tagger_test.go b/comp/core/tagger/taggerimpl/local/tagger_test.go index 1e03371474607..f45e470f7701e 100644 --- a/comp/core/tagger/taggerimpl/local/tagger_test.go +++ b/comp/core/tagger/taggerimpl/local/tagger_test.go @@ -22,12 +22,15 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" workloadmetamock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/mock" + configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/tagset" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) func TestTagBuilder(t *testing.T) { + entityID := types.NewEntityID("", "entity_name") + store := fxutil.Test[workloadmetamock.Mock](t, fx.Options( fx.Supply(config.Params{}), fx.Supply(log.Params{}), @@ -38,26 +41,27 @@ func TestTagBuilder(t *testing.T) { tel := fxutil.Test[telemetry.Component](t, telemetryimpl.MockModule()) telemetryStore := taggerTelemetry.NewStore(tel) - tagger := NewTagger(store, telemetryStore) + cfg := configmock.New(t) + tagger := NewTagger(cfg, store, telemetryStore) tagger.Start(context.Background()) defer tagger.Stop() tagger.tagStore.ProcessTagInfo([]*types.TagInfo{ { - Entity: "entity_name", + EntityID: entityID, Source: "stream", LowCardTags: []string{"low1"}, HighCardTags: []string{"high"}, }, { - Entity: "entity_name", + EntityID: entityID, Source: "pull", LowCardTags: []string{"low2"}, }, }) tb := tagset.NewHashlessTagsAccumulator() - err := tagger.AccumulateTagsFor("entity_name", types.HighCardinality, tb) + err := tagger.AccumulateTagsFor(entityID.String(), types.HighCardinality, tb) assert.NoError(t, err) assert.ElementsMatch(t, []string{"high", "low1", "low2"}, tb.Get()) } diff --git a/comp/core/tagger/taggerimpl/local/tagstore_bench_test.go b/comp/core/tagger/taggerimpl/local/tagstore_bench_test.go index f5167e86f38ac..913bd7c2ee761 100644 --- a/comp/core/tagger/taggerimpl/local/tagstore_bench_test.go +++ b/comp/core/tagger/taggerimpl/local/tagstore_bench_test.go @@ -18,6 +18,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger/types" "github.com/DataDog/datadog-agent/comp/core/telemetry" "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" + configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -48,7 +49,7 @@ func init() { func BenchmarkTagStoreThroughput(b *testing.B) { tel := fxutil.Test[telemetry.Component](b, telemetryimpl.MockModule()) telemetryStore := taggerTelemetry.NewStore(tel) - store := tagstore.NewTagStore(telemetryStore) + store := tagstore.NewTagStore(configmock.New(b), telemetryStore) doneCh := make(chan struct{}) pruneTicker := time.NewTicker(time.Second) @@ -75,7 +76,7 @@ func BenchmarkTagStoreThroughput(b *testing.B) { go func() { for i := 0; i < 1000; i++ { - id := ids[rand.Intn(nEntities)] + id := types.NewEntityID("", ids[rand.Intn(nEntities)]) store.Lookup(id, types.HighCardinality) } wg.Done() @@ -94,7 +95,8 @@ func BenchmarkTagStoreThroughput(b *testing.B) { func BenchmarkTagStore_processTagInfo(b *testing.B) { tel := fxutil.Test[telemetry.Component](b, telemetryimpl.MockModule()) telemetryStore := taggerTelemetry.NewStore(tel) - store := tagstore.NewTagStore(telemetryStore) + + store := tagstore.NewTagStore(configmock.New(b), telemetryStore) for i := 0; i < b.N; i++ { processRandomTagInfoBatch(store) @@ -105,7 +107,7 @@ func generateRandomTagInfo() *types.TagInfo { id := ids[rand.Intn(nEntities)] source := sources[rand.Intn(nSources)] return &types.TagInfo{ - Entity: id, + EntityID: types.NewEntityID("", id), Source: source, LowCardTags: generateRandomTags(), OrchestratorCardTags: generateRandomTags(), diff --git a/comp/core/tagger/taggerimpl/remote/tagger.go b/comp/core/tagger/taggerimpl/remote/tagger.go index dfb485fae6135..e0a7d7ec01f17 100644 --- a/comp/core/tagger/taggerimpl/remote/tagger.go +++ b/comp/core/tagger/taggerimpl/remote/tagger.go @@ -21,13 +21,12 @@ import ( "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" - configComponent "github.com/DataDog/datadog-agent/comp/core/config" + "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/tagger" "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/empty" "github.com/DataDog/datadog-agent/comp/core/tagger/telemetry" "github.com/DataDog/datadog-agent/comp/core/tagger/types" "github.com/DataDog/datadog-agent/pkg/api/security" - "github.com/DataDog/datadog-agent/pkg/config" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/tagset" "github.com/DataDog/datadog-agent/pkg/util/clusteragent" @@ -49,6 +48,8 @@ type Tagger struct { ready bool options Options + cfg config.Component + conn *grpc.ClientConn client pb.AgentSecureClient stream pb.AgentSecure_TaggerStreamEntitiesClient @@ -72,7 +73,7 @@ type Options struct { } // NodeAgentOptions returns the tagger options used in the node agent. -func NodeAgentOptions(config configComponent.Component) (Options, error) { +func NodeAgentOptions(config config.Component) (Options, error) { return Options{ Target: fmt.Sprintf(":%v", config.GetInt("cmd_port")), TokenFetcher: func() (string, error) { return security.FetchAuthToken(config) }, @@ -82,15 +83,15 @@ func NodeAgentOptions(config configComponent.Component) (Options, error) { // NodeAgentOptionsForSecurityResolvers is a legacy function that returns the // same options as NodeAgentOptions, but it's used by the tag security resolvers only // TODO (component): remove this function once the security resolver migrates to component -func NodeAgentOptionsForSecurityResolvers() (Options, error) { +func NodeAgentOptionsForSecurityResolvers(cfg config.Component) (Options, error) { return Options{ - Target: fmt.Sprintf(":%v", config.Datadog().GetInt("cmd_port")), - TokenFetcher: func() (string, error) { return security.FetchAuthToken(config.Datadog()) }, + Target: fmt.Sprintf(":%v", cfg.GetInt("cmd_port")), + TokenFetcher: func() (string, error) { return security.FetchAuthToken(cfg) }, }, nil } // CLCRunnerOptions returns the tagger options used in the CLC Runner. -func CLCRunnerOptions(config configComponent.Component) (Options, error) { +func CLCRunnerOptions(config config.Component) (Options, error) { opts := Options{ Disabled: !config.GetBool("clc_runner_remote_tagger_enabled"), } @@ -111,10 +112,11 @@ func CLCRunnerOptions(config configComponent.Component) (Options, error) { // NewTagger returns an allocated tagger. You still have to run Init() // once the config package is ready. -func NewTagger(options Options, telemetryStore *telemetry.Store) *Tagger { +func NewTagger(options Options, cfg config.Component, telemetryStore *telemetry.Store) *Tagger { return &Tagger{ options: options, - store: newTagStore(telemetryStore), + cfg: cfg, + store: newTagStore(cfg, telemetryStore), telemetryStore: telemetryStore, } } @@ -150,7 +152,7 @@ func (t *Tagger) Start(ctx context.Context) error { t.client = pb.NewAgentSecureClient(t.conn) - timeout := time.Duration(config.Datadog().GetInt("remote_tagger_timeout_seconds")) * time.Second + timeout := time.Duration(t.cfg.GetInt("remote_tagger_timeout_seconds")) * time.Second err = t.startTaggerStream(timeout) if err != nil { // tagger stopped before being connected @@ -196,7 +198,8 @@ func (t *Tagger) GetTaggerTelemetryStore() *telemetry.Store { // Tag returns tags for a given entity at the desired cardinality. func (t *Tagger) Tag(entityID string, cardinality types.TagCardinality) ([]string, error) { - entity := t.store.getEntity(entityID) + id, _ := types.NewEntityIDFromString(entityID) + entity := t.store.getEntity(id) if entity != nil { t.telemetryStore.QueriesByCardinality(cardinality).Success.Inc() return entity.GetTags(cardinality), nil @@ -219,7 +222,8 @@ func (t *Tagger) AccumulateTagsFor(entityID string, cardinality types.TagCardina // Standard returns the standard tags for a given entity. func (t *Tagger) Standard(entityID string) ([]string, error) { - entity := t.store.getEntity(entityID) + id, _ := types.NewEntityIDFromString(entityID) + entity := t.store.getEntity(id) if entity == nil { return []string{}, nil } @@ -229,7 +233,8 @@ func (t *Tagger) Standard(entityID string) ([]string, error) { // GetEntity returns the entity corresponding to the specified id and an error func (t *Tagger) GetEntity(entityID string) (*types.Entity, error) { - entity := t.store.getEntity(entityID) + id, _ := types.NewEntityIDFromString(entityID) + entity := t.store.getEntity(id) if entity == nil { return nil, fmt.Errorf("Entity not found for entityID") } @@ -245,7 +250,7 @@ func (t *Tagger) List() types.TaggerListResponse { } for _, e := range entities { - resp.Entities[e.ID] = types.TaggerListEntity{ + resp.Entities[e.ID.String()] = types.TaggerListEntity{ Tags: map[string][]string{ remoteSource: e.GetTags(types.HighCardinality), }, @@ -339,7 +344,7 @@ func (t *Tagger) processResponse(response *pb.StreamTagsResponse) error { events = append(events, types.EntityEvent{ EventType: eventType, Entity: types.Entity{ - ID: convertEntityID(entity.Id), + ID: types.NewEntityID(types.EntityIDPrefix(entity.Id.Prefix), entity.Id.Uid), HighCardinalityTags: entity.HighCardinalityTags, OrchestratorCardinalityTags: entity.OrchestratorCardinalityTags, LowCardinalityTags: entity.LowCardinalityTags, @@ -419,10 +424,6 @@ func convertEventType(t pb.EventType) (types.EventType, error) { return types.EventTypeAdded, fmt.Errorf("unknown event type: %q", t) } -func convertEntityID(id *pb.EntityId) string { - return fmt.Sprintf("%s://%s", id.Prefix, id.Uid) -} - // TODO(components): verify the grpclog is initialized elsewhere and cleanup func init() { grpclog.SetLoggerV2(grpcutil.NewLogger()) diff --git a/comp/core/tagger/taggerimpl/remote/tagstore.go b/comp/core/tagger/taggerimpl/remote/tagstore.go index 56667d81b9509..5f3d12de8eac4 100644 --- a/comp/core/tagger/taggerimpl/remote/tagstore.go +++ b/comp/core/tagger/taggerimpl/remote/tagstore.go @@ -8,27 +8,30 @@ package remote import ( "sync" + "github.com/DataDog/datadog-agent/comp/core/config" + genericstore "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/generic_store" "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/subscriber" "github.com/DataDog/datadog-agent/comp/core/tagger/telemetry" "github.com/DataDog/datadog-agent/comp/core/tagger/types" - "github.com/DataDog/datadog-agent/pkg/util/containers" ) const remoteSource = "remote" type tagStore struct { mutex sync.RWMutex - store map[string]*types.Entity + store types.ObjectStore[*types.Entity] telemetry map[string]float64 + cfg config.Component subscriber *subscriber.Subscriber telemetryStore *telemetry.Store } -func newTagStore(telemetryStore *telemetry.Store) *tagStore { +func newTagStore(cfg config.Component, telemetryStore *telemetry.Store) *tagStore { return &tagStore{ - store: make(map[string]*types.Entity), + store: genericstore.NewObjectStore[*types.Entity](cfg), telemetry: make(map[string]float64), + cfg: cfg, subscriber: subscriber.NewSubscriber(telemetryStore), telemetryStore: telemetryStore, } @@ -48,14 +51,14 @@ func (s *tagStore) processEvents(events []types.EntityEvent, replace bool) error switch event.EventType { case types.EventTypeAdded: s.telemetryStore.UpdatedEntities.Inc() - s.store[event.Entity.ID] = &entity + s.store.Set(event.Entity.ID, &entity) case types.EventTypeModified: s.telemetryStore.UpdatedEntities.Inc() - s.store[event.Entity.ID] = &entity + s.store.Set(event.Entity.ID, &entity) case types.EventTypeDeleted: - delete(s.store, event.Entity.ID) + s.store.Unset(event.Entity.ID) } } @@ -64,23 +67,21 @@ func (s *tagStore) processEvents(events []types.EntityEvent, replace bool) error return nil } -func (s *tagStore) getEntity(entityID string) *types.Entity { +func (s *tagStore) getEntity(entityID types.EntityID) *types.Entity { s.mutex.RLock() defer s.mutex.RUnlock() - return s.store[entityID] + if entity, present := s.store.Get(entityID); present { + return entity + } + + return nil } func (s *tagStore) listEntities() []*types.Entity { s.mutex.RLock() defer s.mutex.RUnlock() - entities := make([]*types.Entity, 0, len(s.store)) - - for _, e := range s.store { - entities = append(entities, e) - } - - return entities + return s.store.ListObjects() } func (s *tagStore) collectTelemetry() { @@ -92,10 +93,7 @@ func (s *tagStore) collectTelemetry() { s.mutex.Lock() defer s.mutex.Unlock() - for _, entity := range s.store { - prefix, _ := containers.SplitEntityName(entity.ID) - s.telemetry[prefix]++ - } + s.store.ForEach(func(_ types.EntityID, e *types.Entity) { s.telemetry[string(e.ID.GetPrefix())]++ }) for prefix, storedEntities := range s.telemetry { s.telemetryStore.StoredEntities.Set(storedEntities, remoteSource, prefix) @@ -107,14 +105,14 @@ func (s *tagStore) subscribe(cardinality types.TagCardinality) chan []types.Enti s.mutex.RLock() defer s.mutex.RUnlock() - events := make([]types.EntityEvent, 0, len(s.store)) + events := make([]types.EntityEvent, 0, s.store.Size()) - for _, e := range s.store { + s.store.ForEach(func(_ types.EntityID, e *types.Entity) { events = append(events, types.EntityEvent{ EventType: types.EventTypeAdded, Entity: *e, }) - } + }) return s.subscriber.Subscribe(cardinality, events) } @@ -134,20 +132,20 @@ func (s *tagStore) notifySubscribers(events []types.EntityEvent) { // NOTE: caller must ensure that it holds s.mutex's lock, as this func does not // do it on its own. func (s *tagStore) reset() { - if len(s.store) == 0 { + if s.store.Size() == 0 { return } - events := make([]types.EntityEvent, 0, len(s.store)) + events := make([]types.EntityEvent, 0, s.store.Size()) - for _, e := range s.store { + s.store.ForEach(func(_ types.EntityID, e *types.Entity) { events = append(events, types.EntityEvent{ EventType: types.EventTypeDeleted, Entity: types.Entity{ID: e.ID}, }) - } + }) s.notifySubscribers(events) - s.store = make(map[string]*types.Entity) + s.store = genericstore.NewObjectStore[*types.Entity](s.cfg) } diff --git a/comp/core/tagger/taggerimpl/remote/tagstore_test.go b/comp/core/tagger/taggerimpl/remote/tagstore_test.go index 84c71239a8a53..c00abbde5a5a0 100644 --- a/comp/core/tagger/taggerimpl/remote/tagstore_test.go +++ b/comp/core/tagger/taggerimpl/remote/tagstore_test.go @@ -14,12 +14,13 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger/types" "github.com/DataDog/datadog-agent/comp/core/telemetry" "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" + configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) -const ( - entityID = "foo://bar" - anotherEntityID = "foo://quux" +var ( + entityID types.EntityID = types.NewEntityID("foo", "bar") + anotherEntityID types.EntityID = types.NewEntityID("foo", "quux") ) func TestProcessEvent_AddAndModify(t *testing.T) { @@ -49,7 +50,8 @@ func TestProcessEvent_AddAndModify(t *testing.T) { } tel := fxutil.Test[telemetry.Component](t, telemetryimpl.MockModule()) telemetryStore := taggerTelemetry.NewStore(tel) - store := newTagStore(telemetryStore) + cfg := configmock.New(t) + store := newTagStore(cfg, telemetryStore) store.processEvents(events, false) entity := store.getEntity(entityID) @@ -86,7 +88,8 @@ func TestProcessEvent_AddAndDelete(t *testing.T) { tel := fxutil.Test[telemetry.Component](t, telemetryimpl.MockModule()) telemetryStore := taggerTelemetry.NewStore(tel) - store := newTagStore(telemetryStore) + cfg := configmock.New(t) + store := newTagStore(cfg, telemetryStore) store.processEvents(events, false) entity := store.getEntity(entityID) @@ -100,8 +103,9 @@ func TestProcessEvent_AddAndDelete(t *testing.T) { func TestProcessEvent_Replace(t *testing.T) { tel := fxutil.Test[telemetry.Component](t, telemetryimpl.MockModule()) + cfg := configmock.New(t) telemetryStore := taggerTelemetry.NewStore(tel) - store := newTagStore(telemetryStore) + store := newTagStore(cfg, telemetryStore) store.processEvents([]types.EntityEvent{ { diff --git a/comp/core/tagger/taggerimpl/replay/tagger.go b/comp/core/tagger/taggerimpl/replay/tagger.go index 899ff90d7d794..5c49d195ab0a6 100644 --- a/comp/core/tagger/taggerimpl/replay/tagger.go +++ b/comp/core/tagger/taggerimpl/replay/tagger.go @@ -10,6 +10,7 @@ import ( "context" "time" + "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/tagger" "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/empty" "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/tagstore" @@ -33,9 +34,9 @@ type Tagger struct { // NewTagger returns an allocated tagger. You still have to run Init() // once the config package is ready. -func NewTagger(telemetryStore *telemetry.Store) *Tagger { +func NewTagger(cfg config.Component, telemetryStore *telemetry.Store) *Tagger { return &Tagger{ - store: tagstore.NewTagStore(telemetryStore), + store: tagstore.NewTagStore(cfg, telemetryStore), telemetryStore: telemetryStore, } } @@ -63,13 +64,15 @@ func (t *Tagger) Stop() error { // Tag returns tags for a given entity at the desired cardinality. func (t *Tagger) Tag(entityID string, cardinality types.TagCardinality) ([]string, error) { - tags := t.store.Lookup(entityID, cardinality) + id, _ := types.NewEntityIDFromString(entityID) + tags := t.store.Lookup(id, cardinality) return tags, nil } // AccumulateTagsFor returns tags for a given entity at the desired cardinality. func (t *Tagger) AccumulateTagsFor(entityID string, cardinality types.TagCardinality, tb tagset.TagsAccumulator) error { - tags := t.store.LookupHashed(entityID, cardinality) + id, _ := types.NewEntityIDFromString(entityID) + tags := t.store.LookupHashed(id, cardinality) if tags.Len() == 0 { t.telemetryStore.QueriesByCardinality(cardinality).EmptyTags.Inc() @@ -84,7 +87,8 @@ func (t *Tagger) AccumulateTagsFor(entityID string, cardinality types.TagCardina // Standard returns the standard tags for a given entity. func (t *Tagger) Standard(entityID string) ([]string, error) { - tags, err := t.store.LookupStandard(entityID) + id, _ := types.NewEntityIDFromString(entityID) + tags, err := t.store.LookupStandard(id) if err != nil { return []string{}, err } @@ -127,7 +131,7 @@ func (t *Tagger) LoadState(state []types.Entity) { for _, entity := range state { t.store.ProcessTagInfo([]*types.TagInfo{{ Source: "replay", - Entity: entity.ID, + EntityID: entity.ID, HighCardTags: entity.HighCardinalityTags, OrchestratorCardTags: entity.OrchestratorCardinalityTags, LowCardTags: entity.LowCardinalityTags, @@ -141,5 +145,6 @@ func (t *Tagger) LoadState(state []types.Entity) { // GetEntity returns the entity corresponding to the specified id and an error func (t *Tagger) GetEntity(entityID string) (*types.Entity, error) { - return t.store.GetEntity(entityID) + id, _ := types.NewEntityIDFromString(entityID) + return t.store.GetEntity(id) } diff --git a/comp/core/tagger/taggerimpl/server/server.go b/comp/core/tagger/taggerimpl/server/server.go index 16d4744cb7645..946a194235579 100644 --- a/comp/core/tagger/taggerimpl/server/server.go +++ b/comp/core/tagger/taggerimpl/server/server.go @@ -16,6 +16,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger" "github.com/DataDog/datadog-agent/comp/core/tagger/proto" + "github.com/DataDog/datadog-agent/comp/core/tagger/types" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util/grpc" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -125,7 +126,7 @@ func (s *Server) TaggerFetchEntity(_ context.Context, in *pb.FetchEntityRequest) return nil, status.Errorf(codes.InvalidArgument, `missing "id" parameter`) } - entityID := fmt.Sprintf("%s://%s", in.Id.Prefix, in.Id.Uid) + entityID := types.EntityIDPrefix(in.Id.Prefix).ToUID(in.Id.Uid) cardinality, err := proto.Pb2TaggerCardinality(in.GetCardinality()) if err != nil { return nil, err diff --git a/comp/core/tagger/taggerimpl/subscriber/subscriber_test.go b/comp/core/tagger/taggerimpl/subscriber/subscriber_test.go index 366d84085fddb..f99a8d173c448 100644 --- a/comp/core/tagger/taggerimpl/subscriber/subscriber_test.go +++ b/comp/core/tagger/taggerimpl/subscriber/subscriber_test.go @@ -17,8 +17,8 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) -const ( - entityID = "foo://bar" +var ( + entityID = types.NewEntityID("foo", "bar") ) func TestSubscriber(t *testing.T) { diff --git a/comp/core/tagger/taggerimpl/tagger.go b/comp/core/tagger/taggerimpl/tagger.go index 65e75abe51ddd..87bd87086b736 100644 --- a/comp/core/tagger/taggerimpl/tagger.go +++ b/comp/core/tagger/taggerimpl/tagger.go @@ -17,10 +17,10 @@ import ( "time" api "github.com/DataDog/datadog-agent/comp/api/api/def" - configComponent "github.com/DataDog/datadog-agent/comp/core/config" + "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" taggerComp "github.com/DataDog/datadog-agent/comp/core/tagger" - "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/collectors" + taggercommon "github.com/DataDog/datadog-agent/comp/core/tagger/common" "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/local" "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/remote" "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/replay" @@ -33,12 +33,10 @@ import ( taggertypes "github.com/DataDog/datadog-agent/pkg/tagger/types" "github.com/DataDog/datadog-agent/pkg/tagset" "github.com/DataDog/datadog-agent/pkg/util/common" - "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/containers/metrics" "github.com/DataDog/datadog-agent/pkg/util/containers/metrics/provider" "github.com/DataDog/datadog-agent/pkg/util/fxutil" httputils "github.com/DataDog/datadog-agent/pkg/util/http" - "github.com/DataDog/datadog-agent/pkg/util/kubernetes/kubelet" "github.com/DataDog/datadog-agent/pkg/util/optional" "go.uber.org/fx" @@ -63,7 +61,7 @@ type dependencies struct { fx.In Lc fx.Lifecycle - Config configComponent.Component + Config config.Component Log log.Component Wmeta workloadmeta.Component Params taggerComp.Params @@ -105,6 +103,7 @@ type TaggerClient struct { defaultTagger taggerComp.Component wmeta workloadmeta.Component + cfg config.Component datadogConfig datadogConfig checksCardinality types.TagCardinality @@ -136,23 +135,23 @@ func newTaggerClient(deps dependencies) provides { if err != nil { deps.Log.Errorf("unable to deps.Configure the remote tagger: %s", err) - taggerClient = createTaggerClient(local.NewFakeTagger(telemetryStore), nil, deps.Log) + taggerClient = createTaggerClient(local.NewFakeTagger(deps.Config, telemetryStore), nil, deps.Log) } else if options.Disabled { deps.Log.Errorf("remote tagger is disabled in clc runner.") - taggerClient = createTaggerClient(local.NewFakeTagger(telemetryStore), nil, deps.Log) + taggerClient = createTaggerClient(local.NewFakeTagger(deps.Config, telemetryStore), nil, deps.Log) } else { - taggerClient = createTaggerClient(remote.NewTagger(options, telemetryStore), nil, deps.Log) + taggerClient = createTaggerClient(remote.NewTagger(options, deps.Config, telemetryStore), nil, deps.Log) } case taggerComp.NodeRemoteTaggerAgent: options, _ := remote.NodeAgentOptions(deps.Config) - taggerClient = createTaggerClient(remote.NewTagger(options, telemetryStore), nil, deps.Log) + taggerClient = createTaggerClient(remote.NewTagger(options, deps.Config, telemetryStore), nil, deps.Log) case taggerComp.LocalTaggerAgent: - taggerClient = createTaggerClient(local.NewTagger(deps.Wmeta, telemetryStore), nil, deps.Log) + taggerClient = createTaggerClient(local.NewTagger(deps.Config, deps.Wmeta, telemetryStore), nil, deps.Log) case taggerComp.FakeTagger: // all binaries are expected to provide their own tagger at startup. we // provide a fake tagger for testing purposes, as calling the global // tagger without proper initialization is very common there. - taggerClient = createTaggerClient(local.NewFakeTagger(telemetryStore), nil, deps.Log) + taggerClient = createTaggerClient(local.NewFakeTagger(deps.Config, telemetryStore), nil, deps.Log) } if taggerClient != nil { @@ -191,7 +190,7 @@ func newTaggerClient(deps dependencies) provides { err = taggerClient.Start(mainCtx) if err != nil && deps.Params.FallBackToLocalIfRemoteTaggerFails { deps.Log.Warnf("Starting remote tagger failed. Falling back to local tagger: %s", err) - taggerClient.defaultTagger = local.NewTagger(deps.Wmeta, telemetryStore) + taggerClient.defaultTagger = local.NewTagger(deps.Config, deps.Wmeta, telemetryStore) // Retry to start the local tagger return taggerClient.Start(mainCtx) } @@ -229,7 +228,7 @@ func (t *TaggerClient) Stop() error { // ReplayTagger returns the replay tagger instance func (t *TaggerClient) ReplayTagger() taggerComp.ReplayTagger { - return replay.NewTagger(t.telemetryStore) + return replay.NewTagger(t.cfg, t.telemetryStore) } // GetTaggerTelemetryStore returns tagger telemetry store @@ -260,42 +259,42 @@ func (t *TaggerClient) GetEntity(entityID string) (*types.Entity, error) { // Tag queries the captureTagger (for replay scenarios) or the defaultTagger. // It can return tags at high cardinality (with tags about individual containers), // or at orchestrator cardinality (pod/task level). -func (t *TaggerClient) Tag(entity string, cardinality types.TagCardinality) ([]string, error) { +func (t *TaggerClient) Tag(entityID string, cardinality types.TagCardinality) ([]string, error) { // TODO: defer unlock once performance overhead of defer is negligible t.mux.RLock() if t.captureTagger != nil { - tags, err := t.captureTagger.Tag(entity, cardinality) + tags, err := t.captureTagger.Tag(entityID, cardinality) if err == nil && len(tags) > 0 { t.mux.RUnlock() return tags, nil } } t.mux.RUnlock() - return t.defaultTagger.Tag(entity, cardinality) + return t.defaultTagger.Tag(entityID, cardinality) } // AccumulateTagsFor queries the defaultTagger to get entity tags from cache or // sources and appends them to the TagsAccumulator. It can return tags at high // cardinality (with tags about individual containers), or at orchestrator // cardinality (pod/task level). -func (t *TaggerClient) AccumulateTagsFor(entity string, cardinality types.TagCardinality, tb tagset.TagsAccumulator) error { +func (t *TaggerClient) AccumulateTagsFor(entityID string, cardinality types.TagCardinality, tb tagset.TagsAccumulator) error { // TODO: defer unlock once performance overhead of defer is negligible t.mux.RLock() if t.captureTagger != nil { - err := t.captureTagger.AccumulateTagsFor(entity, cardinality, tb) + err := t.captureTagger.AccumulateTagsFor(entityID, cardinality, tb) if err == nil { t.mux.RUnlock() return nil } } t.mux.RUnlock() - return t.defaultTagger.AccumulateTagsFor(entity, cardinality, tb) + return t.defaultTagger.AccumulateTagsFor(entityID, cardinality, tb) } // GetEntityHash returns the hash for the tags associated with the given entity // Returns an empty string if the tags lookup fails -func (t *TaggerClient) GetEntityHash(entity string, cardinality types.TagCardinality) string { - tags, err := t.Tag(entity, cardinality) +func (t *TaggerClient) GetEntityHash(entityID string, cardinality types.TagCardinality) string { + tags, err := t.Tag(entityID, cardinality) if err != nil { return "" } @@ -304,18 +303,18 @@ func (t *TaggerClient) GetEntityHash(entity string, cardinality types.TagCardina // Standard queries the defaultTagger to get entity // standard tags (env, version, service) from cache or sources. -func (t *TaggerClient) Standard(entity string) ([]string, error) { +func (t *TaggerClient) Standard(entityID string) ([]string, error) { t.mux.RLock() // TODO(components) (tagger): captureTagger is a legacy global variable to be eliminated if t.captureTagger != nil { - tags, err := t.captureTagger.Standard(entity) + tags, err := t.captureTagger.Standard(entityID) if err == nil && len(tags) > 0 { t.mux.RUnlock() return tags, nil } } t.mux.RUnlock() - return t.defaultTagger.Standard(entity) + return t.defaultTagger.Standard(entityID) } // AgentTags returns the agent tags @@ -330,7 +329,7 @@ func (t *TaggerClient) AgentTags(cardinality types.TagCardinality) ([]string, er return nil, nil } - entityID := containers.BuildTaggerEntityName(ctrID) + entityID := types.NewEntityID(types.ContainerID, ctrID).String() return t.Tag(entityID, cardinality) } @@ -339,14 +338,14 @@ func (t *TaggerClient) AgentTags(cardinality types.TagCardinality) ([]string, er func (t *TaggerClient) GlobalTags(cardinality types.TagCardinality) ([]string, error) { t.mux.RLock() if t.captureTagger != nil { - tags, err := t.captureTagger.Tag(collectors.GlobalEntityID, cardinality) + tags, err := t.captureTagger.Tag(taggercommon.GetGlobalEntityID().String(), cardinality) if err == nil && len(tags) > 0 { t.mux.RUnlock() return tags, nil } } t.mux.RUnlock() - return t.defaultTagger.Tag(collectors.GlobalEntityID, cardinality) + return t.defaultTagger.Tag(taggercommon.GetGlobalEntityID().String(), cardinality) } // globalTagBuilder queries global tags that should apply to all data coming @@ -354,7 +353,7 @@ func (t *TaggerClient) GlobalTags(cardinality types.TagCardinality) ([]string, e func (t *TaggerClient) globalTagBuilder(cardinality types.TagCardinality, tb tagset.TagsAccumulator) error { t.mux.RLock() if t.captureTagger != nil { - err := t.captureTagger.AccumulateTagsFor(collectors.GlobalEntityID, cardinality, tb) + err := t.captureTagger.AccumulateTagsFor(taggercommon.GetGlobalEntityID().String(), cardinality, tb) if err == nil { t.mux.RUnlock() @@ -362,7 +361,7 @@ func (t *TaggerClient) globalTagBuilder(cardinality types.TagCardinality, tb tag } } t.mux.RUnlock() - return t.defaultTagger.AccumulateTagsFor(collectors.GlobalEntityID, cardinality, tb) + return t.defaultTagger.AccumulateTagsFor(taggercommon.GetGlobalEntityID().String(), cardinality, tb) } // List the content of the defaulTagger @@ -446,10 +445,10 @@ func (t *TaggerClient) EnrichTags(tb tagset.TagsAccumulator, originInfo taggerty if originInfo.FromTag != "" && originInfo.FromTag != "none" { // Check if the value is not "none" in order to avoid calling the tagger for entity that doesn't exist. // Currently only supported for pods - originFromClient = kubelet.KubePodTaggerEntityPrefix + originInfo.FromTag + originFromClient = types.NewEntityID(types.KubernetesPodUID, originInfo.FromTag).String() } else if originInfo.FromTag == "" && len(originInfo.FromMsg) > 0 { // originInfo.FromMsg is the container ID sent by the newer clients. - originFromClient = containers.BuildTaggerEntityName(originInfo.FromMsg) + originFromClient = types.NewEntityID(types.ContainerID, originInfo.FromMsg).String() } if originFromClient != "" { @@ -466,11 +465,11 @@ func (t *TaggerClient) EnrichTags(tb tagset.TagsAccumulator, originInfo taggerty } } - if err := t.AccumulateTagsFor(containers.BuildTaggerEntityName(originInfo.FromMsg), cardinality, tb); err != nil { + if err := t.AccumulateTagsFor(types.ContainerID.ToUID(originInfo.FromMsg), cardinality, tb); err != nil { t.log.Tracef("Cannot get tags for entity %s: %s", originInfo.FromMsg, err) } - if err := t.AccumulateTagsFor(kubelet.KubePodTaggerEntityPrefix+originInfo.FromTag, cardinality, tb); err != nil { + if err := t.AccumulateTagsFor(types.KubernetesPodUID.ToUID(originInfo.FromTag), cardinality, tb); err != nil { t.log.Tracef("Cannot get tags for entity %s: %s", originInfo.FromTag, err) } @@ -503,7 +502,7 @@ func (t *TaggerClient) EnrichTags(tb tagset.TagsAccumulator, originInfo taggerty // Accumulate tags for pod UID if parsedExternalData.podUID != "" { - if err := t.AccumulateTagsFor(kubelet.KubePodTaggerEntityPrefix+parsedExternalData.podUID, cardinality, tb); err != nil { + if err := t.AccumulateTagsFor(types.KubernetesPodUID.ToUID(parsedExternalData.podUID), cardinality, tb); err != nil { t.log.Tracef("Cannot get tags for entity %s: %s", originInfo.FromMsg, err) } } @@ -516,7 +515,7 @@ func (t *TaggerClient) EnrichTags(tb tagset.TagsAccumulator, originInfo taggerty // Accumulate tags for generated container ID if generatedContainerID != "" { - if err := t.AccumulateTagsFor(containers.BuildTaggerEntityName(generatedContainerID), cardinality, tb); err != nil { + if err := t.AccumulateTagsFor(types.ContainerID.ToUID(generatedContainerID), cardinality, tb); err != nil { t.log.Tracef("Cannot get tags for entity %s: %s", generatedContainerID, err) } } @@ -577,7 +576,7 @@ type optionalTaggerDeps struct { fx.In Lc fx.Lifecycle - Config configComponent.Component + Config config.Component Log log.Component Wmeta optional.Option[workloadmeta.Component] Telemetry coretelemetry.Component diff --git a/comp/core/tagger/taggerimpl/tagger_test.go b/comp/core/tagger/taggerimpl/tagger_test.go index ddbe5a5c36760..600fdfd631685 100644 --- a/comp/core/tagger/taggerimpl/tagger_test.go +++ b/comp/core/tagger/taggerimpl/tagger_test.go @@ -17,9 +17,7 @@ import ( configmock "github.com/DataDog/datadog-agent/pkg/config/mock" taggertypes "github.com/DataDog/datadog-agent/pkg/tagger/types" "github.com/DataDog/datadog-agent/pkg/tagset" - "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/kubernetes/kubelet" ) // TODO Improve test coverage with dogstatsd/enrich tests once Origin Detection is refactored. @@ -43,8 +41,8 @@ func TestEnrichTags(t *testing.T) { defer fakeTagger.ResetTagger() // Fill fake tagger with entities - fakeTagger.SetTags(kubelet.KubePodTaggerEntityPrefix+"pod", "host", []string{"pod-low"}, []string{"pod-orch"}, []string{"pod-high"}, []string{"pod-std"}) - fakeTagger.SetTags(containers.BuildTaggerEntityName("container"), kubelet.KubePodTaggerEntityPrefix+"pod", []string{"container-low"}, []string{"container-orch"}, []string{"container-high"}, []string{"container-std"}) + fakeTagger.SetTags(types.KubernetesPodUID.ToUID("pod"), "host", []string{"pod-low"}, []string{"pod-orch"}, []string{"pod-high"}, []string{"pod-std"}) + fakeTagger.SetTags(types.ContainerID.ToUID("container"), "host", []string{"container-low"}, []string{"container-orch"}, []string{"container-high"}, []string{"container-std"}) for _, tt := range []struct { name string @@ -94,9 +92,9 @@ func TestEnrichTags(t *testing.T) { func TestEnrichTagsOrchestrator(t *testing.T) { fakeTagger := fxutil.Test[tagger.Mock](t, MockModule()) defer fakeTagger.ResetTagger() - fakeTagger.SetTags("foo", "fooSource", []string{"lowTag"}, []string{"orchTag"}, nil, nil) + fakeTagger.SetTags("foo://bar", "fooSource", []string{"lowTag"}, []string{"orchTag"}, nil, nil) tb := tagset.NewHashingTagsAccumulator() - fakeTagger.EnrichTags(tb, taggertypes.OriginInfo{FromUDS: "foo", Cardinality: "orchestrator"}) + fakeTagger.EnrichTags(tb, taggertypes.OriginInfo{FromUDS: "foo://bar", Cardinality: "orchestrator"}) assert.Equal(t, []string{"lowTag", "orchTag"}, tb.Get()) } @@ -105,11 +103,11 @@ func TestEnrichTagsOptOut(t *testing.T) { defer fakeTagger.ResetTagger() cfg := configmock.New(t) cfg.SetWithoutSource("dogstatsd_origin_optout_enabled", true) - fakeTagger.SetTags("foo", "fooSource", []string{"lowTag"}, []string{"orchTag"}, nil, nil) + fakeTagger.SetTags("foo://bar", "fooSource", []string{"lowTag"}, []string{"orchTag"}, nil, nil) tb := tagset.NewHashingTagsAccumulator() - fakeTagger.EnrichTags(tb, taggertypes.OriginInfo{FromUDS: "originID", FromTag: "pod-uid", FromMsg: "container-id", Cardinality: "none", ProductOrigin: taggertypes.ProductOriginDogStatsD}) + fakeTagger.EnrichTags(tb, taggertypes.OriginInfo{FromUDS: "foo://originID", FromTag: "pod-uid", FromMsg: "container-id", Cardinality: "none", ProductOrigin: taggertypes.ProductOriginDogStatsD}) assert.Equal(t, []string{}, tb.Get()) - fakeTagger.EnrichTags(tb, taggertypes.OriginInfo{FromUDS: "originID", FromMsg: "container-id", Cardinality: "none", ProductOrigin: taggertypes.ProductOriginDogStatsD}) + fakeTagger.EnrichTags(tb, taggertypes.OriginInfo{FromUDS: "foo://originID", FromMsg: "container-id", Cardinality: "none", ProductOrigin: taggertypes.ProductOriginDogStatsD}) assert.Equal(t, []string{}, tb.Get()) } diff --git a/comp/core/tagger/taggerimpl/tagstore/entity_tags.go b/comp/core/tagger/taggerimpl/tagstore/entity_tags.go index 89748baf055ff..a5584254fd12b 100644 --- a/comp/core/tagger/taggerimpl/tagstore/entity_tags.go +++ b/comp/core/tagger/taggerimpl/tagstore/entity_tags.go @@ -40,6 +40,7 @@ import ( // EntityTags holds the tag information for a given entity. type EntityTags interface { toEntity() types.Entity + getEntityID() types.EntityID getStandard() []string getHashedTags(cardinality types.TagCardinality) tagset.HashedTags tagsForSource(source string) *sourceTags @@ -56,7 +57,7 @@ type EntityTags interface { // not be shared outside of the store. Usage inside the store is safe since it // relies on a global lock. type EntityTagsWithMultipleSources struct { - entityID string + entityID types.EntityID sourceTags map[string]sourceTags cacheValid bool cachedAll tagset.HashedTags // Low + orchestrator + high @@ -64,7 +65,7 @@ type EntityTagsWithMultipleSources struct { cachedLow tagset.HashedTags // Sub-slice of cachedAll } -func newEntityTags(entityID string, source string) EntityTags { +func newEntityTags(entityID types.EntityID, source string) EntityTags { if flavor.GetFlavor() == flavor.ClusterAgent { return newEntityTagsWithSingleSource(entityID, source) } @@ -76,6 +77,9 @@ func newEntityTags(entityID string, source string) EntityTags { } } +func (e *EntityTagsWithMultipleSources) getEntityID() types.EntityID { + return e.entityID +} func (e *EntityTagsWithMultipleSources) toEntity() types.Entity { e.computeCache() @@ -251,7 +255,7 @@ func (e *EntityTagsWithMultipleSources) setSourceExpiration(source string, expir // not be shared outside of the store. Usage inside the store is safe since it // relies on a global lock. type EntityTagsWithSingleSource struct { - entityID string + entityID types.EntityID source string expiryDate time.Time standardTags []string @@ -261,13 +265,17 @@ type EntityTagsWithSingleSource struct { isExpired bool } -func newEntityTagsWithSingleSource(entityID string, source string) *EntityTagsWithSingleSource { +func newEntityTagsWithSingleSource(entityID types.EntityID, source string) *EntityTagsWithSingleSource { return &EntityTagsWithSingleSource{ entityID: entityID, source: source, } } +func (e *EntityTagsWithSingleSource) getEntityID() types.EntityID { + return e.entityID +} + func (e *EntityTagsWithSingleSource) toEntity() types.Entity { cachedAll := e.cachedAll.Get() cachedOrchestrator := e.cachedOrchestrator.Get() diff --git a/comp/core/tagger/taggerimpl/tagstore/entity_tags_test.go b/comp/core/tagger/taggerimpl/tagstore/entity_tags_test.go index d41e99e7c7a5b..c93d764377aaf 100644 --- a/comp/core/tagger/taggerimpl/tagstore/entity_tags_test.go +++ b/comp/core/tagger/taggerimpl/tagstore/entity_tags_test.go @@ -15,11 +15,12 @@ import ( ) const ( - testEntityID = "testEntityID" testSource = "testSource" invalidSource = "invalidSource" ) +var testEntityID = types.NewEntityID("test", "EntityID") + func TestToEntity(t *testing.T) { entityTags := newEntityTagsWithSingleSource(testEntityID, testSource) diff --git a/comp/core/tagger/taggerimpl/tagstore/tagstore.go b/comp/core/tagger/taggerimpl/tagstore/tagstore.go index f5ac0bcd22855..ddd07cb17252b 100644 --- a/comp/core/tagger/taggerimpl/tagstore/tagstore.go +++ b/comp/core/tagger/taggerimpl/tagstore/tagstore.go @@ -16,12 +16,13 @@ import ( "github.com/benbjohnson/clock" + "github.com/DataDog/datadog-agent/comp/core/config" + genericstore "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/generic_store" "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/subscriber" "github.com/DataDog/datadog-agent/comp/core/tagger/telemetry" "github.com/DataDog/datadog-agent/comp/core/tagger/types" "github.com/DataDog/datadog-agent/pkg/status/health" "github.com/DataDog/datadog-agent/pkg/tagset" - "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -37,27 +38,29 @@ var ErrNotFound = errors.New("entity not found") type TagStore struct { sync.RWMutex - store map[string]EntityTags + store types.ObjectStore[EntityTags] telemetry map[string]map[string]float64 subscriber *subscriber.Subscriber clock clock.Clock + cfg config.Component telemetryStore *telemetry.Store } -// NewTagStore creates new TagStore. -func NewTagStore(telemetryStore *telemetry.Store) *TagStore { - return newTagStoreWithClock(clock.New(), telemetryStore) +// NewTagStore creates new LocalTaggerTagStore. +func NewTagStore(cfg config.Component, telemetryStore *telemetry.Store) *TagStore { + return newTagStoreWithClock(cfg, clock.New(), telemetryStore) } -func newTagStoreWithClock(clock clock.Clock, telemetryStore *telemetry.Store) *TagStore { +func newTagStoreWithClock(cfg config.Component, clock clock.Clock, telemetryStore *telemetry.Store) *TagStore { return &TagStore{ telemetry: make(map[string]map[string]float64), - store: make(map[string]EntityTags), + store: genericstore.NewObjectStore[EntityTags](cfg), subscriber: subscriber.NewSubscriber(telemetryStore), clock: clock, + cfg: cfg, telemetryStore: telemetryStore, } } @@ -99,7 +102,7 @@ func (s *TagStore) ProcessTagInfo(tagInfos []*types.TagInfo) { log.Tracef("ProcessTagInfo err: skipping nil message") continue } - if info.Entity == "" { + if info.EntityID.String() == "" { log.Tracef("ProcessTagInfo err: empty entity name, skipping message") continue } @@ -108,7 +111,7 @@ func (s *TagStore) ProcessTagInfo(tagInfos []*types.TagInfo) { continue } - storedTags, exist := s.store[info.Entity] + storedTags, exist := s.store.Get(info.EntityID) if info.DeleteEntity { if exist { @@ -133,8 +136,8 @@ func (s *TagStore) ProcessTagInfo(tagInfos []*types.TagInfo) { } } else { eventType = types.EventTypeAdded - storedTags = newEntityTags(info.Entity, info.Source) - s.store[info.Entity] = storedTags + storedTags = newEntityTags(info.EntityID, info.Source) + s.store.Set(info.EntityID, storedTags) } s.telemetryStore.UpdatedEntities.Inc() @@ -160,17 +163,17 @@ func (s *TagStore) collectTelemetry() { s.Lock() defer s.Unlock() - for entityName, entityTags := range s.store { - prefix, _ := containers.SplitEntityName(entityName) + s.store.ForEach(func(_ types.EntityID, et EntityTags) { + prefix := string(et.getEntityID().GetPrefix()) - for _, source := range entityTags.sources() { + for _, source := range et.sources() { if _, ok := s.telemetry[prefix]; !ok { s.telemetry[prefix] = make(map[string]float64) } s.telemetry[prefix][source]++ } - } + }) for prefix, sources := range s.telemetry { for source, storedEntities := range sources { @@ -187,13 +190,14 @@ func (s *TagStore) Subscribe(cardinality types.TagCardinality) chan []types.Enti s.RLock() defer s.RUnlock() - events := make([]types.EntityEvent, 0, len(s.store)) - for _, storedTags := range s.store { + events := make([]types.EntityEvent, 0, s.store.Size()) + + s.store.ForEach(func(_ types.EntityID, et EntityTags) { events = append(events, types.EntityEvent{ EventType: types.EventTypeAdded, - Entity: storedTags.toEntity(), + Entity: et.toEntity(), }) - } + }) return s.subscriber.Subscribe(cardinality, events) } @@ -216,27 +220,27 @@ func (s *TagStore) Prune() { now := s.clock.Now() events := []types.EntityEvent{} - for entity, storedTags := range s.store { - changed := storedTags.deleteExpired(now) + s.store.ForEach(func(eid types.EntityID, et EntityTags) { + changed := et.deleteExpired(now) - if !changed && !storedTags.shouldRemove() { - continue + if !changed && !et.shouldRemove() { + return } - if storedTags.shouldRemove() { + if et.shouldRemove() { s.telemetryStore.PrunedEntities.Inc() - delete(s.store, entity) + s.store.Unset(eid) events = append(events, types.EntityEvent{ EventType: types.EventTypeDeleted, - Entity: storedTags.toEntity(), + Entity: et.toEntity(), }) } else { events = append(events, types.EntityEvent{ EventType: types.EventTypeModified, - Entity: storedTags.toEntity(), + Entity: et.toEntity(), }) } - } + }) if len(events) > 0 { s.notifySubscribers(events) @@ -244,10 +248,10 @@ func (s *TagStore) Prune() { } // LookupHashed gets tags from the store and returns them as a HashedTags instance. -func (s *TagStore) LookupHashed(entity string, cardinality types.TagCardinality) tagset.HashedTags { +func (s *TagStore) LookupHashed(entityID types.EntityID, cardinality types.TagCardinality) tagset.HashedTags { s.RLock() defer s.RUnlock() - storedTags, present := s.store[entity] + storedTags, present := s.store.Get(entityID) if !present { return tagset.HashedTags{} @@ -256,12 +260,12 @@ func (s *TagStore) LookupHashed(entity string, cardinality types.TagCardinality) } // Lookup gets tags from the store and returns them concatenated in a string slice. -func (s *TagStore) Lookup(entity string, cardinality types.TagCardinality) []string { - return s.LookupHashed(entity, cardinality).Get() +func (s *TagStore) Lookup(entityID types.EntityID, cardinality types.TagCardinality) []string { + return s.LookupHashed(entityID, cardinality).Get() } // LookupStandard returns the standard tags recorded for a given entity -func (s *TagStore) LookupStandard(entityID string) ([]string, error) { +func (s *TagStore) LookupStandard(entityID types.EntityID) ([]string, error) { storedTags, err := s.getEntityTags(entityID) if err != nil { return nil, err @@ -279,8 +283,8 @@ func (s *TagStore) List() types.TaggerListResponse { s.RLock() defer s.RUnlock() - for entityID, et := range s.store { - r.Entities[entityID] = types.TaggerListEntity{ + for _, et := range s.store.ListObjects() { + r.Entities[et.getEntityID().String()] = types.TaggerListEntity{ Tags: et.tagsBySource(), } } @@ -289,7 +293,7 @@ func (s *TagStore) List() types.TaggerListResponse { } // GetEntity returns the entity corresponding to the specified id and an error -func (s *TagStore) GetEntity(entityID string) (*types.Entity, error) { +func (s *TagStore) GetEntity(entityID types.EntityID) (*types.Entity, error) { tags, err := s.getEntityTags(entityID) if err != nil { return nil, err @@ -299,11 +303,11 @@ func (s *TagStore) GetEntity(entityID string) (*types.Entity, error) { return &entity, nil } -func (s *TagStore) getEntityTags(entityID string) (EntityTags, error) { +func (s *TagStore) getEntityTags(entityID types.EntityID) (EntityTags, error) { s.RLock() defer s.RUnlock() - storedTags, present := s.store[entityID] + storedTags, present := s.store.Get(entityID) if !present { return nil, ErrNotFound } diff --git a/comp/core/tagger/taggerimpl/tagstore/tagstore_test.go b/comp/core/tagger/taggerimpl/tagstore/tagstore_test.go index 3dd7c6fc0cb22..9e64b8f314e0b 100644 --- a/comp/core/tagger/taggerimpl/tagstore/tagstore_test.go +++ b/comp/core/tagger/taggerimpl/tagstore/tagstore_test.go @@ -6,6 +6,7 @@ package tagstore import ( + "fmt" "sync" "testing" "time" @@ -20,13 +21,14 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger/types" "github.com/DataDog/datadog-agent/comp/core/telemetry" "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" + configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) type StoreTestSuite struct { suite.Suite - clock *clock.Mock - store *TagStore + clock *clock.Mock + tagstore *TagStore } func (s *StoreTestSuite) SetupTest() { @@ -35,52 +37,61 @@ func (s *StoreTestSuite) SetupTest() { s.clock = clock.NewMock() // set the mock clock to the current time s.clock.Add(time.Since(time.Unix(0, 0))) - s.store = newTagStoreWithClock(s.clock, telemetryStore) + + mockConfig := configmock.New(s.T()) + fmt.Println("New Checkpoint: ", mockConfig) + s.tagstore = newTagStoreWithClock(mockConfig, s.clock, telemetryStore) } func (s *StoreTestSuite) TestIngest() { - s.store.ProcessTagInfo([]*types.TagInfo{ + entityID := types.NewEntityID("", "test") + + s.tagstore.ProcessTagInfo([]*types.TagInfo{ { Source: "source1", - Entity: "test", + EntityID: entityID, LowCardTags: []string{"tag"}, OrchestratorCardTags: []string{"tag"}, HighCardTags: []string{"tag"}, }, { Source: "source2", - Entity: "test", + EntityID: entityID, LowCardTags: []string{"tag"}, }, }) - assert.Len(s.T(), s.store.store, 1) - assert.Len(s.T(), s.store.store["test"].sources(), 2) + assert.Equalf(s.T(), s.tagstore.store.Size(), 1, "expected tagstore to contain 1 TagEntity, but found: s.tagstore.store.size()") + + storedTags, exists := s.tagstore.store.Get(entityID) + require.True(s.T(), exists) + assert.Len(s.T(), storedTags.sources(), 2) } func (s *StoreTestSuite) TestLookup() { - s.store.ProcessTagInfo([]*types.TagInfo{ + entityID := types.NewEntityID("", "test") + s.tagstore.ProcessTagInfo([]*types.TagInfo{ { Source: "source1", - Entity: "test", + EntityID: entityID, LowCardTags: []string{"tag"}, HighCardTags: []string{"tag"}, }, { Source: "source2", - Entity: "test", + EntityID: entityID, LowCardTags: []string{"tag"}, }, { Source: "source3", - Entity: "test", + EntityID: entityID, OrchestratorCardTags: []string{"tag"}, }, }) - tagsHigh := s.store.Lookup("test", types.HighCardinality) - tagsOrch := s.store.Lookup("test", types.OrchestratorCardinality) - tagsLow := s.store.Lookup("test", types.LowCardinality) + tagsHigh := s.tagstore.Lookup(entityID, types.HighCardinality) + tagsOrch := s.tagstore.Lookup(entityID, types.OrchestratorCardinality) + tagsLow := s.tagstore.Lookup(entityID, types.LowCardinality) assert.Len(s.T(), tagsHigh, 4) assert.Len(s.T(), tagsLow, 2) @@ -88,54 +99,60 @@ func (s *StoreTestSuite) TestLookup() { } func (s *StoreTestSuite) TestLookupStandard() { - s.store.ProcessTagInfo([]*types.TagInfo{ + entityID := types.NewEntityID("", "test") + + s.tagstore.ProcessTagInfo([]*types.TagInfo{ { Source: "source1", - Entity: "test", + EntityID: entityID, LowCardTags: []string{"tag", "env:dev"}, StandardTags: []string{"env:dev"}, }, { Source: "source2", - Entity: "test", + EntityID: entityID, LowCardTags: []string{"tag", "service:foo"}, StandardTags: []string{"service:foo"}, }, }) - standard, err := s.store.LookupStandard("test") + standard, err := s.tagstore.LookupStandard(entityID) assert.Nil(s.T(), err) assert.Len(s.T(), standard, 2) assert.Contains(s.T(), standard, "env:dev") assert.Contains(s.T(), standard, "service:foo") - _, err = s.store.LookupStandard("not found") + _, err = s.tagstore.LookupStandard(types.NewEntityID("not", "found")) assert.NotNil(s.T(), err) } func (s *StoreTestSuite) TestLookupNotPresent() { - tags := s.store.Lookup("test", types.LowCardinality) + entityID := types.NewEntityID("", "test") + tags := s.tagstore.Lookup(entityID, types.LowCardinality) assert.Nil(s.T(), tags) } func (s *StoreTestSuite) TestPrune__deletedEntities() { - s.store.ProcessTagInfo([]*types.TagInfo{ + entityID1 := types.NewEntityID("", "test1") + entityID2 := types.NewEntityID("", "test2") + + s.tagstore.ProcessTagInfo([]*types.TagInfo{ // Adds { Source: "source1", - Entity: "test1", + EntityID: entityID1, LowCardTags: []string{"s1tag"}, OrchestratorCardTags: []string{"s1tag"}, HighCardTags: []string{"s1tag"}, }, { Source: "source2", - Entity: "test1", + EntityID: entityID1, HighCardTags: []string{"s2tag"}, }, { Source: "source1", - Entity: "test2", + EntityID: entityID2, LowCardTags: []string{"tag"}, HighCardTags: []string{"tag"}, }, @@ -143,119 +160,132 @@ func (s *StoreTestSuite) TestPrune__deletedEntities() { // Deletion, to be batched { Source: "source1", - Entity: "test1", + EntityID: entityID1, DeleteEntity: true, }, }) // Data should still be in the store - tagsHigh := s.store.Lookup("test1", types.HighCardinality) + tagsHigh := s.tagstore.Lookup(entityID1, types.HighCardinality) assert.Len(s.T(), tagsHigh, 4) - tagsOrch := s.store.Lookup("test1", types.OrchestratorCardinality) + tagsOrch := s.tagstore.Lookup(entityID1, types.OrchestratorCardinality) assert.Len(s.T(), tagsOrch, 2) - tagsHigh = s.store.Lookup("test2", types.HighCardinality) + tagsHigh = s.tagstore.Lookup(entityID2, types.HighCardinality) assert.Len(s.T(), tagsHigh, 2) s.clock.Add(10 * time.Minute) - s.store.Prune() + s.tagstore.Prune() // test1 should only have tags from source2, source1 should be removed - tagsHigh = s.store.Lookup("test1", types.HighCardinality) + tagsHigh = s.tagstore.Lookup(entityID1, types.HighCardinality) assert.Len(s.T(), tagsHigh, 1) - tagsOrch = s.store.Lookup("test1", types.OrchestratorCardinality) + tagsOrch = s.tagstore.Lookup(entityID1, types.OrchestratorCardinality) assert.Len(s.T(), tagsOrch, 0) // test2 should still be present - tagsHigh = s.store.Lookup("test2", types.HighCardinality) + tagsHigh = s.tagstore.Lookup(entityID2, types.HighCardinality) assert.Len(s.T(), tagsHigh, 2) - s.store.ProcessTagInfo([]*types.TagInfo{ + s.tagstore.ProcessTagInfo([]*types.TagInfo{ // re-add tags from removed source, then remove another one { Source: "source1", - Entity: "test1", + EntityID: entityID1, LowCardTags: []string{"s1tag"}, }, // Deletion, to be batched { Source: "source2", - Entity: "test1", + EntityID: entityID1, DeleteEntity: true, }, }) s.clock.Add(10 * time.Minute) - s.store.Prune() + s.tagstore.Prune() - tagsHigh = s.store.Lookup("test1", types.HighCardinality) + tagsHigh = s.tagstore.Lookup(entityID1, types.HighCardinality) assert.Len(s.T(), tagsHigh, 1) - tagsHigh = s.store.Lookup("test2", types.HighCardinality) + tagsHigh = s.tagstore.Lookup(entityID2, types.HighCardinality) assert.Len(s.T(), tagsHigh, 2) } func (s *StoreTestSuite) TestPrune__emptyEntries() { - s.store.ProcessTagInfo([]*types.TagInfo{ + entityID1 := types.NewEntityID("", "test1") + entityID2 := types.NewEntityID("", "test2") + entityID3 := types.NewEntityID("", "test3") + emptyEntityID1 := types.NewEntityID("", "emptyEntity1") + emptyEntityID2 := types.NewEntityID("", "emptyEntity2") + + s.tagstore.ProcessTagInfo([]*types.TagInfo{ { Source: "source1", - Entity: "test1", + EntityID: entityID1, LowCardTags: []string{"s1tag"}, OrchestratorCardTags: []string{"s1tag"}, HighCardTags: []string{"s1tag"}, }, { Source: "source2", - Entity: "test2", + EntityID: entityID2, HighCardTags: []string{"s2tag"}, }, { Source: "emptySource1", - Entity: "emptyEntity1", + EntityID: emptyEntityID1, LowCardTags: []string{}, }, { Source: "emptySource2", - Entity: "emptyEntity2", + EntityID: emptyEntityID2, StandardTags: []string{}, }, { Source: "emptySource3", - Entity: "test3", + EntityID: entityID3, LowCardTags: []string{}, }, { Source: "source3", - Entity: "test3", + EntityID: entityID3, LowCardTags: []string{"s3tag"}, }, }) - assert.Len(s.T(), s.store.store, 5) - s.store.Prune() - assert.Len(s.T(), s.store.store, 3) + tagStoreSize := s.tagstore.store.Size() + assert.Equalf(s.T(), tagStoreSize, 5, "should have 5 item(s), but has %d", tagStoreSize) + + s.tagstore.Prune() + + tagStoreSize = s.tagstore.store.Size() + assert.Equalf(s.T(), tagStoreSize, 3, "should have 3 item(s), but has %d", tagStoreSize) // Assert non-empty tags aren't deleted - tagsHigh := s.store.Lookup("test1", types.HighCardinality) + tagsHigh := s.tagstore.Lookup(entityID1, types.HighCardinality) assert.Len(s.T(), tagsHigh, 3) - tagsOrch := s.store.Lookup("test1", types.OrchestratorCardinality) + tagsOrch := s.tagstore.Lookup(entityID1, types.OrchestratorCardinality) assert.Len(s.T(), tagsOrch, 2) - tagsHigh = s.store.Lookup("test2", types.HighCardinality) + tagsHigh = s.tagstore.Lookup(entityID2, types.HighCardinality) assert.Len(s.T(), tagsHigh, 1) - tagsLow := s.store.Lookup("test3", types.LowCardinality) + tagsLow := s.tagstore.Lookup(entityID3, types.LowCardinality) assert.Len(s.T(), tagsLow, 1) // Assert empty entities are deleted - emptyTags1 := s.store.Lookup("emptyEntity1", types.HighCardinality) + emptyTags1 := s.tagstore.Lookup(emptyEntityID1, types.HighCardinality) assert.Len(s.T(), emptyTags1, 0) - emptyTags2 := s.store.Lookup("emptyEntity2", types.HighCardinality) + emptyTags2 := s.tagstore.Lookup(emptyEntityID2, types.HighCardinality) assert.Len(s.T(), emptyTags2, 0) } func (s *StoreTestSuite) TestList() { - s.store.ProcessTagInfo( + entityID1 := types.NewEntityID("", "entity-1") + entityID2 := types.NewEntityID("", "entity-2") + + s.tagstore.ProcessTagInfo( []*types.TagInfo{ { Source: "source-1", - Entity: "entity-1", + EntityID: entityID1, HighCardTags: []string{"h1:v1", "h2:v2"}, OrchestratorCardTags: []string{"o1:v1", "o2:v2"}, LowCardTags: []string{"l1:v1", "l2:v2", "service:s1"}, @@ -263,7 +293,7 @@ func (s *StoreTestSuite) TestList() { }, { Source: "source-1", - Entity: "entity-2", + EntityID: entityID2, HighCardTags: []string{"h3:v3", "h4:v4"}, OrchestratorCardTags: []string{"o3:v3", "o4:v4"}, LowCardTags: []string{"l3:v3", "l4:v4", "service:s1"}, @@ -272,10 +302,10 @@ func (s *StoreTestSuite) TestList() { }, ) - resultList := s.store.List() + resultList := s.tagstore.List() require.Equal(s.T(), 2, len(resultList.Entities)) - entity1, ok := resultList.Entities["entity-1"] + entity1, ok := resultList.Entities[entityID1.String()] require.True(s.T(), ok) require.Equal(s.T(), 1, len(entity1.Tags)) require.ElementsMatch( // Tags order is not important @@ -284,7 +314,7 @@ func (s *StoreTestSuite) TestList() { []string{"l1:v1", "l2:v2", "service:s1", "o1:v1", "o2:v2", "h1:v1", "h2:v2"}, ) - entity2, ok := resultList.Entities["entity-2"] + entity2, ok := resultList.Entities[entityID2.String()] require.True(s.T(), ok) require.Equal(s.T(), 1, len(entity2.Tags)) require.ElementsMatch( // Tags order is not important @@ -295,14 +325,15 @@ func (s *StoreTestSuite) TestList() { } func (s *StoreTestSuite) TestGetEntity() { - _, err := s.store.GetEntity("entity-1") + entityID1 := types.NewEntityID("", "entity-1") + _, err := s.tagstore.GetEntity(entityID1) require.Error(s.T(), err) - s.store.ProcessTagInfo( + s.tagstore.ProcessTagInfo( []*types.TagInfo{ { Source: "source-1", - Entity: "entity-1", + EntityID: entityID1, HighCardTags: []string{"h1:v1", "h2:v2"}, OrchestratorCardTags: []string{"o1:v1", "o2:v2"}, LowCardTags: []string{"l1:v1", "l2:v2", "service:s1"}, @@ -311,12 +342,12 @@ func (s *StoreTestSuite) TestGetEntity() { }, ) - entity, err := s.store.GetEntity("entity-1") + entity, err := s.tagstore.GetEntity(entityID1) require.NoError(s.T(), err) assert.Equal( s.T(), &types.Entity{ - ID: "entity-1", + ID: entityID1, HighCardinalityTags: []string{"h1:v1", "h2:v2"}, OrchestratorCardinalityTags: []string{"o1:v1", "o2:v2"}, LowCardinalityTags: []string{"l1:v1", "l2:v2", "service:s1"}, @@ -331,27 +362,29 @@ func TestStoreSuite(t *testing.T) { } func (s *StoreTestSuite) TestGetExpiredTags() { - s.store.ProcessTagInfo([]*types.TagInfo{ + entityIDA := types.NewEntityID("", "entityA") + entityIDB := types.NewEntityID("", "entityB") + s.tagstore.ProcessTagInfo([]*types.TagInfo{ { Source: "source", - Entity: "entityA", + EntityID: types.NewEntityID("", "entityA"), HighCardTags: []string{"expired"}, ExpiryDate: s.clock.Now().Add(-10 * time.Second), }, { Source: "source", - Entity: "entityB", + EntityID: types.NewEntityID("", "entityB"), HighCardTags: []string{"expiresSoon"}, ExpiryDate: s.clock.Now().Add(10 * time.Second), }, }) - s.store.Prune() + s.tagstore.Prune() - tagsHigh := s.store.Lookup("entityB", types.HighCardinality) + tagsHigh := s.tagstore.Lookup(entityIDB, types.HighCardinality) assert.Contains(s.T(), tagsHigh, "expiresSoon") - tagsHigh = s.store.Lookup("entityA", types.HighCardinality) + tagsHigh = s.tagstore.Lookup(entityIDA, types.HighCardinality) assert.NotContains(s.T(), tagsHigh, "expired") } @@ -367,7 +400,7 @@ func (s *StoreTestSuite) TestDuplicateSourceTags() { collectors.CollectorPriorities = originalCollectorPriorities }() - testEntity := "testEntity" + testEntityID := types.NewEntityID("", "testEntityID") // Mock collector priorities collectors.CollectorPriorities = map[string]types.CollectorPriority{ @@ -378,39 +411,39 @@ func (s *StoreTestSuite) TestDuplicateSourceTags() { nodeRuntimeTags := types.TagInfo{ Source: "sourceNodeRuntime", - Entity: testEntity, + EntityID: testEntityID, LowCardTags: []string{"foo", "tag1:sourceLow", "tag2:sourceLow"}, HighCardTags: []string{"tag3:sourceLow", "tag5:sourceLow"}, } nodeOrchestractorTags := types.TagInfo{ Source: "sourceNodeOrchestrator", - Entity: testEntity, + EntityID: testEntityID, LowCardTags: []string{"bar", "tag1:sourceHigh", "tag2:sourceHigh"}, HighCardTags: []string{"tag3:sourceHigh", "tag4:sourceHigh"}, } clusterOrchestratorTags := types.TagInfo{ Source: "sourceClusterOrchestrator", - Entity: testEntity, + EntityID: testEntityID, LowCardTags: []string{"tag1:sourceClusterLow", "tag3:sourceClusterHigh"}, HighCardTags: []string{"tag4:sourceClusterLow"}, } - s.store.ProcessTagInfo([]*types.TagInfo{ + s.tagstore.ProcessTagInfo([]*types.TagInfo{ &nodeRuntimeTags, &nodeOrchestractorTags, &clusterOrchestratorTags, }) - lowCardTags := s.store.Lookup(testEntity, types.LowCardinality) + lowCardTags := s.tagstore.Lookup(testEntityID, types.LowCardinality) assert.ElementsMatch( s.T(), lowCardTags, []string{"foo", "bar", "tag1:sourceClusterLow", "tag2:sourceHigh", "tag3:sourceClusterHigh"}, ) - highCardTags := s.store.Lookup(testEntity, types.HighCardinality) + highCardTags := s.tagstore.Lookup(testEntityID, types.HighCardinality) assert.ElementsMatch( s.T(), highCardTags, @@ -420,7 +453,7 @@ func (s *StoreTestSuite) TestDuplicateSourceTags() { type entityEventExpectation struct { eventType types.EventType - id string + id types.EntityID lowCardTags []string orchCardTags []string highCardTags []string @@ -430,23 +463,26 @@ func TestSubscribe(t *testing.T) { tel := fxutil.Test[telemetry.Component](t, telemetryimpl.MockModule()) telemetryStore := taggerTelemetry.NewStore(tel) clock := clock.NewMock() - store := newTagStoreWithClock(clock, telemetryStore) + mockConfig := configmock.New(t) + store := newTagStoreWithClock(mockConfig, clock, telemetryStore) collectors.CollectorPriorities["source2"] = types.ClusterOrchestrator collectors.CollectorPriorities["source"] = types.NodeRuntime + entityID1 := types.NewEntityID("", "test1") + entityID2 := types.NewEntityID("", "test2") var expectedEvents = []entityEventExpectation{ - {types.EventTypeAdded, "test1", []string{"low"}, []string{}, []string{"high"}}, - {types.EventTypeModified, "test1", []string{"low"}, []string{"orch"}, []string{"high:1", "high:2"}}, - {types.EventTypeAdded, "test2", []string{"low"}, []string{}, []string{"high"}}, - {types.EventTypeModified, "test1", []string{"low"}, []string{}, []string{"high"}}, - {types.EventTypeDeleted, "test1", nil, nil, nil}, + {types.EventTypeAdded, entityID1, []string{"low"}, []string{}, []string{"high"}}, + {types.EventTypeModified, entityID1, []string{"low"}, []string{"orch"}, []string{"high:1", "high:2"}}, + {types.EventTypeAdded, entityID2, []string{"low"}, []string{}, []string{"high"}}, + {types.EventTypeModified, entityID1, []string{"low"}, []string{}, []string{"high"}}, + {types.EventTypeDeleted, entityID1, nil, nil, nil}, } store.ProcessTagInfo([]*types.TagInfo{ { Source: "source", - Entity: "test1", + EntityID: types.NewEntityID("", "test1"), LowCardTags: []string{"low"}, HighCardTags: []string{"high"}, }, @@ -461,19 +497,19 @@ func TestSubscribe(t *testing.T) { store.ProcessTagInfo([]*types.TagInfo{ { Source: "source2", - Entity: "test1", + EntityID: entityID1, LowCardTags: []string{"low"}, OrchestratorCardTags: []string{"orch"}, HighCardTags: []string{"high:1", "high:2"}, }, { Source: "source2", - Entity: "test1", + EntityID: entityID1, DeleteEntity: true, }, { Source: "source", - Entity: "test2", + EntityID: entityID2, LowCardTags: []string{"low"}, HighCardTags: []string{"high"}, }, @@ -485,7 +521,7 @@ func TestSubscribe(t *testing.T) { store.ProcessTagInfo([]*types.TagInfo{ { Source: "source", - Entity: "test1", + EntityID: entityID1, DeleteEntity: true, }, }) diff --git a/comp/core/tagger/types/entity_id.go b/comp/core/tagger/types/entity_id.go new file mode 100644 index 0000000000000..62e44eb75f2f9 --- /dev/null +++ b/comp/core/tagger/types/entity_id.go @@ -0,0 +1,131 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package types defines types used by the Tagger component. +package types + +import ( + "fmt" + "strings" + + taggerutils "github.com/DataDog/datadog-agent/pkg/util/tagger" +) + +const separator = "://" + +// EntityID represents a tagger entityID +// An EntityID should be identified by a prefix and an id, and is represented as {prefix}://{id} +type EntityID interface { + // GetID returns a prefix-specific id (i.e. an ID unique given prefix) + GetID() string + // GetPrefix returns the prefix of the EntityID + GetPrefix() EntityIDPrefix + // String returns a string representation of EntityID under the format {prefix}://{id} + String() string +} + +// defaultEntityID implements EntityID as a plain string id +type defaultEntityID string + +// GetID implements EntityID#GetID +func (de defaultEntityID) GetID() string { + parts := strings.SplitN(string(de), separator, 2) + + if len(parts) != 2 { + return "" + } + + return parts[1] +} + +// GetPrefix implements EntityID#GetPrefix +func (de defaultEntityID) GetPrefix() EntityIDPrefix { + parts := strings.SplitN(string(de), separator, 2) + + if len(parts) != 2 { + return "" + } + + return EntityIDPrefix(parts[0]) +} + +// String implements EntityID#String +func (de defaultEntityID) String() string { + return string(de) +} + +func newDefaultEntityID(id string) EntityID { + return defaultEntityID(id) +} + +// compositeEntityID implements EntityID as a struct of prefix and id +type compositeEntityID struct { + Prefix EntityIDPrefix + ID string +} + +// GetPrefix implements EntityID#GetPrefix +func (eid compositeEntityID) GetPrefix() EntityIDPrefix { + return eid.Prefix +} + +// GetID implements EntityID#GetID +func (eid compositeEntityID) GetID() string { + return eid.ID +} + +// String implements EntityID#String +func (eid compositeEntityID) String() string { + return eid.Prefix.ToUID(eid.ID) +} + +// newcompositeEntityID returns a new EntityID based on a prefix and an id +func newCompositeEntityID(prefix EntityIDPrefix, id string) EntityID { + return compositeEntityID{ + Prefix: prefix, + ID: id, + } +} + +// NewEntityID builds and returns an EntityID object based on plain string uid +// Currently, it defaults to the default implementation of EntityID as a plain string +func NewEntityID(prefix EntityIDPrefix, id string) EntityID { + // TODO: use composite entity id always or use component framework for config component + if taggerutils.ShouldUseCompositeStore() { + return newCompositeEntityID(prefix, id) + } + return newDefaultEntityID(fmt.Sprintf("%s://%s", prefix, id)) +} + +// NewEntityIDFromString constructs EntityID from a plain string id +func NewEntityIDFromString(plainStringID string) (EntityID, error) { + if taggerutils.ShouldUseCompositeStore() { + if !strings.Contains(plainStringID, separator) { + return nil, fmt.Errorf("unsupported tagger entity id format %q, correct format is `{prefix}://{id}`", plainStringID) + } + parts := strings.Split(plainStringID, separator) + return newCompositeEntityID(EntityIDPrefix(parts[0]), parts[1]), nil + } + return newDefaultEntityID(plainStringID), nil +} + +const ( + // ContainerID is the prefix `container_id` + ContainerID EntityIDPrefix = "container_id" + // ContainerImageMetadata is the prefix `container_image_metadata` + ContainerImageMetadata EntityIDPrefix = "container_image_metadata" + // ECSTask is the prefix `ecs_task` + ECSTask EntityIDPrefix = "ecs_task" + // Host is the prefix `host` + Host EntityIDPrefix = "host" + // KubernetesDeployment is the prefix `deployment` + KubernetesDeployment EntityIDPrefix = "deployment" + // KubernetesMetadata is the prefix `kubernetes_metadata` + KubernetesMetadata EntityIDPrefix = "kubernetes_metadata" + // KubernetesPodUID is the prefix `kubernetes_pod_uid` + KubernetesPodUID EntityIDPrefix = "kubernetes_pod_uid" + // Process is the prefix `process` + Process EntityIDPrefix = "process" +) diff --git a/comp/core/tagger/types/entity_id_test.go b/comp/core/tagger/types/entity_id_test.go new file mode 100644 index 0000000000000..a11c4e5184d02 --- /dev/null +++ b/comp/core/tagger/types/entity_id_test.go @@ -0,0 +1,73 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package types defines types used by the Tagger component. +package types + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDefaultEntityID_GetID(t *testing.T) { + tests := []struct { + name string + entityID EntityID + expectedID string + }{ + { + name: "invalid format, not containing `://`", + entityID: newDefaultEntityID("invalid_entity_id"), + expectedID: "", + }, + { + name: "invalid format, multiple `://`", + entityID: newDefaultEntityID("invalid://entity://id"), + expectedID: "entity://id", + }, + { + name: "conforming format, single `://`", + entityID: newDefaultEntityID("good://format"), + expectedID: "format", + }, + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + assert.Equal(tt, test.expectedID, test.entityID.GetID()) + }) + } +} + +func TestDefaultEntityID_GetPrefix(t *testing.T) { + tests := []struct { + name string + entityID EntityID + expectedPrefix EntityIDPrefix + }{ + { + name: "invalid format, not containing `://`", + entityID: newDefaultEntityID("invalid_entity_id"), + expectedPrefix: "", + }, + { + name: "invalid format, multiple `://`", + entityID: newDefaultEntityID("invalid://entity://id"), + expectedPrefix: "invalid", + }, + { + name: "conforming format, single `://`", + entityID: newDefaultEntityID("good://format"), + expectedPrefix: "good", + }, + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + assert.Equal(tt, test.expectedPrefix, test.entityID.GetPrefix()) + }) + } +} diff --git a/comp/core/tagger/types/types.go b/comp/core/tagger/types/types.go index cbe90462ce2b3..e9e16653cf9aa 100644 --- a/comp/core/tagger/types/types.go +++ b/comp/core/tagger/types/types.go @@ -14,9 +14,29 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger/utils" ) +// ApplyFunc is a generic function applied to an object of type V +type ApplyFunc[V any] func(EntityID, V) + +// ObjectStore is a generic interface used as a key-value store in different tagstore implementations +// The key is of type EntityID +type ObjectStore[V any] interface { + // Get returns an object with the specified entity ID if it exists in the store + Get(EntityID) (V, bool) + // Set sets a given entityID to a given object in the store + Set(EntityID, V) + // Unset unsets a given entityID in the store + Unset(EntityID) + // Size returns the total number of objects in the store + Size() int + // ListObjects returns a slice containing all objects of the store + ListObjects() []V + // ForEach applies a given function to each object in the store + ForEach(ApplyFunc[V]) +} + // TaggerListResponse holds the tagger list response type TaggerListResponse struct { - Entities map[string]TaggerListEntity `json:"entities"` + Entities map[string]TaggerListEntity } // TaggerListEntity holds the tagging info about an entity @@ -28,7 +48,7 @@ type TaggerListEntity struct { // to be created from collectors and read by the store. type TagInfo struct { Source string // source collector's name - Entity string // entity name ready for lookup + EntityID EntityID // entity id for lookup HighCardTags []string // high cardinality tags that can create a lot of different timeseries (typically one per container, user request, etc.) OrchestratorCardTags []string // orchestrator cardinality tags that have as many combination as pods/tasks LowCardTags []string // low cardinality tags safe for every pipeline @@ -62,7 +82,7 @@ const ( // Entity is an entity ID + tags. type Entity struct { - ID string + ID EntityID HighCardinalityTags []string OrchestratorCardinalityTags []string LowCardinalityTags []string diff --git a/comp/core/workloadmeta/collectors/internal/docker/docker.go b/comp/core/workloadmeta/collectors/internal/docker/docker.go index cf4d5e68fb270..d65673750002a 100644 --- a/comp/core/workloadmeta/collectors/internal/docker/docker.go +++ b/comp/core/workloadmeta/collectors/internal/docker/docker.go @@ -33,6 +33,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/sbom/scanner" "github.com/DataDog/datadog-agent/pkg/status/health" "github.com/DataDog/datadog-agent/pkg/util/containers" + pkgcontainersimage "github.com/DataDog/datadog-agent/pkg/util/containers/image" "github.com/DataDog/datadog-agent/pkg/util/docker" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -364,7 +365,7 @@ func extractImage(ctx context.Context, container types.ContainerJSON, resolve re ) if strings.Contains(imageSpec, "@sha256") { - name, registry, shortName, tag, err = containers.SplitImageName(imageSpec) + name, registry, shortName, tag, err = pkgcontainersimage.SplitImageName(imageSpec) if err != nil { log.Debugf("cannot split image name %q for container %q: %s", imageSpec, container.ID, err) } @@ -377,13 +378,13 @@ func extractImage(ctx context.Context, container types.ContainerJSON, resolve re return image } - name, registry, shortName, tag, err = containers.SplitImageName(resolvedImageSpec) + name, registry, shortName, tag, err = pkgcontainersimage.SplitImageName(resolvedImageSpec) if err != nil { log.Debugf("cannot split image name %q for container %q: %s", resolvedImageSpec, container.ID, err) // fallback and try to parse the original imageSpec anyway - if errors.Is(err, containers.ErrImageIsSha256) { - name, registry, shortName, tag, err = containers.SplitImageName(imageSpec) + if errors.Is(err, pkgcontainersimage.ErrImageIsSha256) { + name, registry, shortName, tag, err = pkgcontainersimage.SplitImageName(imageSpec) if err != nil { log.Debugf("cannot split image name %q for container %q: %s", imageSpec, container.ID, err) return image diff --git a/comp/core/workloadmeta/collectors/internal/kubelet/kubelet.go b/comp/core/workloadmeta/collectors/internal/kubelet/kubelet.go index c346809148780..f4dec30571972 100644 --- a/comp/core/workloadmeta/collectors/internal/kubelet/kubelet.go +++ b/comp/core/workloadmeta/collectors/internal/kubelet/kubelet.go @@ -21,6 +21,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/config/env" "github.com/DataDog/datadog-agent/pkg/errors" "github.com/DataDog/datadog-agent/pkg/util/containers" + pkgcontainersimage "github.com/DataDog/datadog-agent/pkg/util/containers/image" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/kubelet" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -219,7 +220,7 @@ func (c *collector) parsePodContainers( image, err := workloadmeta.NewContainerImage(imageID, container.Image) if err != nil { - if stdErrors.Is(err, containers.ErrImageIsSha256) { + if stdErrors.Is(err, pkgcontainersimage.ErrImageIsSha256) { // try the resolved image ID if the image name in the container // status is a SHA256. this seems to happen sometimes when // pinning the image to a SHA256 diff --git a/comp/core/workloadmeta/def/types.go b/comp/core/workloadmeta/def/types.go index cad44b46ffb28..ad596604c67fa 100644 --- a/comp/core/workloadmeta/def/types.go +++ b/comp/core/workloadmeta/def/types.go @@ -20,7 +20,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" langUtil "github.com/DataDog/datadog-agent/pkg/languagedetection/util" - "github.com/DataDog/datadog-agent/pkg/util/containers" + pkgcontainersimage "github.com/DataDog/datadog-agent/pkg/util/containers/image" ) // TODO(component): it might make more sense to move the store into its own @@ -277,7 +277,7 @@ func NewContainerImage(imageID string, imageName string) (ContainerImage, error) Name: imageName, } - name, registry, shortName, tag, err := containers.SplitImageName(imageName) + name, registry, shortName, tag, err := pkgcontainersimage.SplitImageName(imageName) if err != nil { return image, err } diff --git a/comp/dogstatsd/bundle.go b/comp/dogstatsd/bundle.go index fb9eb5bdb9518..39b078206409c 100644 --- a/comp/dogstatsd/bundle.go +++ b/comp/dogstatsd/bundle.go @@ -17,12 +17,12 @@ import ( // team: agent-metrics-logs // Bundle defines the fx options for this bundle. -func Bundle() fxutil.BundleOptions { +func Bundle(params server.Params) fxutil.BundleOptions { return fxutil.Bundle( serverdebugimpl.Module(), replayfx.Module(), pidmapimpl.Module(), - server.Module()) + server.Module(params)) } // ClientBundle defines the fx options for this bundle. diff --git a/comp/dogstatsd/listeners/uds_linux.go b/comp/dogstatsd/listeners/uds_linux.go index 71c45eb5de04c..9958790658c37 100644 --- a/comp/dogstatsd/listeners/uds_linux.go +++ b/comp/dogstatsd/listeners/uds_linux.go @@ -14,12 +14,12 @@ import ( "golang.org/x/sys/unix" + "github.com/DataDog/datadog-agent/comp/core/tagger/types" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/comp/dogstatsd/packets" "github.com/DataDog/datadog-agent/comp/dogstatsd/pidmap" replay "github.com/DataDog/datadog-agent/comp/dogstatsd/replay/def" "github.com/DataDog/datadog-agent/pkg/util/cache" - "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/containers/metrics/provider" "github.com/DataDog/datadog-agent/pkg/util/optional" ) @@ -137,5 +137,5 @@ func entityForPID(pid int32, capture bool, wmeta optional.Option[workloadmeta.Co return "", errNoContainerMatch } - return containers.BuildTaggerEntityName(cID), nil + return types.NewEntityID(types.ContainerID, cID).String(), nil } diff --git a/comp/dogstatsd/server/component.go b/comp/dogstatsd/server/component.go index 9ffb2ce924afa..b1c86c69521a6 100644 --- a/comp/dogstatsd/server/component.go +++ b/comp/dogstatsd/server/component.go @@ -39,9 +39,10 @@ type Mock interface { } // Module defines the fx options for this component. -func Module() fxutil.Module { +func Module(params Params) fxutil.Module { return fxutil.Component( - fx.Provide(newServer)) + fx.Provide(newServer), + fx.Supply(params)) } // MockModule defines the fx options for the mock component. diff --git a/comp/dogstatsd/server/server_test.go b/comp/dogstatsd/server/server_test.go index c790f08c28130..e60036ae2a5fd 100644 --- a/comp/dogstatsd/server/server_test.go +++ b/comp/dogstatsd/server/server_test.go @@ -96,13 +96,12 @@ func fulfillDepsWithConfigOverride(t testing.TB, overrides map[string]interface{ fx.Replace(configComponent.MockParams{ Overrides: overrides, }), - fx.Supply(Params{Serverless: false}), replaymock.MockModule(), compressionimpl.MockModule(), pidmapimpl.Module(), demultiplexerimpl.FakeSamplerMockModule(), workloadmetafxmock.MockModule(workloadmeta.NewParams()), - Module(), + Module(Params{Serverless: false}), )) } @@ -113,13 +112,12 @@ func fulfillDepsWithConfigYaml(t testing.TB, yaml string) serverDeps { telemetryimpl.MockModule(), hostnameimpl.MockModule(), serverdebugimpl.MockModule(), - fx.Supply(Params{Serverless: false}), replaymock.MockModule(), compressionimpl.MockModule(), pidmapimpl.Module(), demultiplexerimpl.FakeSamplerMockModule(), workloadmetafxmock.MockModule(workloadmeta.NewParams()), - Module(), + Module(Params{Serverless: false}), )) } diff --git a/comp/forwarder/bundle.go b/comp/forwarder/bundle.go index 412d5e6860269..c7b7c8c64a934 100644 --- a/comp/forwarder/bundle.go +++ b/comp/forwarder/bundle.go @@ -18,9 +18,3 @@ func Bundle(params defaultforwarder.Params) fxutil.BundleOptions { return fxutil.Bundle( defaultforwarder.Module(params)) } - -// BundleWithProvider defines the fx options for this bundle with a provider. -func BundleWithProvider[T1 any, T2 any](provider func(T1, T2) defaultforwarder.Params) fxutil.BundleOptions { - return fxutil.Bundle( - defaultforwarder.ModuleWithProvider(provider)) -} diff --git a/comp/forwarder/defaultforwarder/component.go b/comp/forwarder/defaultforwarder/component.go index 81987ba6f9a6b..fbf381a2fbfb0 100644 --- a/comp/forwarder/defaultforwarder/component.go +++ b/comp/forwarder/defaultforwarder/component.go @@ -29,11 +29,12 @@ func Module(params Params) fxutil.Module { ) } -// ModuleWithProvider defines the fx options for this component. -func ModuleWithProvider[T1 any, T2 any](provider func(T1, T2) Params) fxutil.Module { +// ModulWithOptionTMP defines the fx options for this component with an option. +// This is a temporary function to until configsync is cleanup. +func ModulWithOptionTMP(option fx.Option) fxutil.Module { return fxutil.Component( fx.Provide(newForwarder), - fx.Provide(provider), + option, ) } diff --git a/comp/forwarder/defaultforwarder/default_forwarder.go b/comp/forwarder/defaultforwarder/default_forwarder.go index 426a86a995c5b..fee31b333d31d 100644 --- a/comp/forwarder/defaultforwarder/default_forwarder.go +++ b/comp/forwarder/defaultforwarder/default_forwarder.go @@ -191,6 +191,13 @@ func (o *Options) setRetryQueuePayloadsTotalMaxSizeFromQueueMax(v int) { o.RetryQueuePayloadsTotalMaxSize = v * maxPayloadSize } +// SetEnabledFeatures sets the features enabled +func (o *Options) SetEnabledFeatures(features []Features) { + for _, feature := range features { + o.EnabledFeatures = SetFeature(o.EnabledFeatures, feature) + } +} + // DefaultForwarder is the default implementation of the Forwarder. type DefaultForwarder struct { config config.Component diff --git a/comp/forwarder/defaultforwarder/forwarder.go b/comp/forwarder/defaultforwarder/forwarder.go index 1d0041e8f9126..9769bbaeb2ab0 100644 --- a/comp/forwarder/defaultforwarder/forwarder.go +++ b/comp/forwarder/defaultforwarder/forwarder.go @@ -13,6 +13,8 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/core/status" + "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder/resolver" + "github.com/DataDog/datadog-agent/pkg/config/utils" ) type dependencies struct { @@ -31,19 +33,46 @@ type provides struct { } func newForwarder(dep dependencies) provides { - return NewForwarder(dep.Config, dep.Log, dep.Lc, true, dep.Params) + options := createOptions(dep.Params, dep.Config, dep.Log) + return NewForwarder(dep.Config, dep.Log, dep.Lc, true, options, dep.Params.useNoopForwarder) +} + +func createOptions(params Params, config config.Component, log log.Component) *Options { + var options *Options + if !params.withResolver { + options = NewOptions(config, log, getMultipleEndpoints(config, log)) + } else { + keysPerDomain := getMultipleEndpoints(config, log) + options = NewOptionsWithResolvers(config, log, resolver.NewSingleDomainResolvers(keysPerDomain)) + } + // Override the DisableAPIKeyChecking only if WithFeatures was called + if disableAPIKeyChecking, ok := params.disableAPIKeyCheckingOverride.Get(); ok { + options.DisableAPIKeyChecking = disableAPIKeyChecking + } + options.SetEnabledFeatures(params.features) + + return options +} + +func getMultipleEndpoints(config config.Component, log log.Component) map[string][]string { + // Inject the config to make sure we can call GetMultipleEndpoints. + keysPerDomain, err := utils.GetMultipleEndpoints(config) + if err != nil { + log.Error("Misconfiguration of agent endpoints: ", err) + } + return keysPerDomain } // NewForwarder returns a new forwarder component. // //nolint:revive -func NewForwarder(config config.Component, log log.Component, lc fx.Lifecycle, ignoreLifeCycleError bool, params Params) provides { - if params.UseNoopForwarder { +func NewForwarder(config config.Component, log log.Component, lc fx.Lifecycle, ignoreLifeCycleError bool, options *Options, useNoopForwarder bool) provides { + if useNoopForwarder { return provides{ Comp: NoopForwarder{}, } } - forwarder := NewDefaultForwarder(config, log, params.Options) + forwarder := NewDefaultForwarder(config, log, options) lc.Append(fx.Hook{ OnStart: func(context.Context) error { diff --git a/comp/forwarder/defaultforwarder/go.mod b/comp/forwarder/defaultforwarder/go.mod index c19d13216e14b..38c5179b0f7e9 100644 --- a/comp/forwarder/defaultforwarder/go.mod +++ b/comp/forwarder/defaultforwarder/go.mod @@ -65,6 +65,7 @@ require ( github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 github.com/golang/protobuf v1.5.3 @@ -87,7 +88,6 @@ require ( github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/log/setup v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect diff --git a/comp/forwarder/defaultforwarder/params.go b/comp/forwarder/defaultforwarder/params.go index 754ae29962a98..1914e068b06ec 100644 --- a/comp/forwarder/defaultforwarder/params.go +++ b/comp/forwarder/defaultforwarder/params.go @@ -6,36 +6,54 @@ package defaultforwarder import ( - "github.com/DataDog/datadog-agent/comp/core/config" - log "github.com/DataDog/datadog-agent/comp/core/log/def" - "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder/resolver" - "github.com/DataDog/datadog-agent/pkg/config/utils" + "github.com/DataDog/datadog-agent/pkg/util/optional" ) // Params contains the parameters to create a forwarder. type Params struct { - UseNoopForwarder bool - // TODO: (components) When the code of the forwarder will be - // in /comp/forwarder move the content of forwarder.Options inside this struct. - Options *Options + useNoopForwarder bool + withResolver bool + + // Use optional to override Options.DisableAPIKeyChecking only if WithFeatures was called + disableAPIKeyCheckingOverride optional.Option[bool] + features []Features } +type option = func(*Params) + // NewParams initializes a new Params struct -func NewParams(config config.Component, log log.Component) Params { - return Params{Options: NewOptions(config, log, getMultipleEndpoints(config, log))} +func NewParams(options ...option) Params { + p := Params{} + for _, option := range options { + option(&p) + } + return p +} + +// WithResolvers enables the forwarder to use resolvers +func WithResolvers() option { + return func(p *Params) { + p.withResolver = true + } } -// NewParamsWithResolvers initializes a new Params struct with resolvers -func NewParamsWithResolvers(config config.Component, log log.Component) Params { - keysPerDomain := getMultipleEndpoints(config, log) - return Params{Options: NewOptionsWithResolvers(config, log, resolver.NewSingleDomainResolvers(keysPerDomain))} +// WithDisableAPIKeyChecking disables the API key checking +func WithDisableAPIKeyChecking() option { + return func(p *Params) { + p.disableAPIKeyCheckingOverride.Set(true) + } +} + +// WithFeatures sets a features to the forwarder +func WithFeatures(features ...Features) option { + return func(p *Params) { + p.features = features + } } -func getMultipleEndpoints(config config.Component, log log.Component) map[string][]string { - // Inject the config to make sure we can call GetMultipleEndpoints. - keysPerDomain, err := utils.GetMultipleEndpoints(config) - if err != nil { - log.Error("Misconfiguration of agent endpoints: ", err) +// WithNoopForwarder sets the forwarder to use the noop forwarder +func WithNoopForwarder() option { + return func(p *Params) { + p.useNoopForwarder = true } - return keysPerDomain } diff --git a/comp/logs/agent/agentimpl/agent.go b/comp/logs/agent/agentimpl/agent.go index 396dd20a51785..b5626e3d9b54b 100644 --- a/comp/logs/agent/agentimpl/agent.go +++ b/comp/logs/agent/agentimpl/agent.go @@ -22,6 +22,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/hostname" log "github.com/DataDog/datadog-agent/comp/core/log/def" statusComponent "github.com/DataDog/datadog-agent/comp/core/status" + "github.com/DataDog/datadog-agent/comp/core/tagger" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/comp/logs/agent" "github.com/DataDog/datadog-agent/comp/logs/agent/config" @@ -80,6 +81,7 @@ type dependencies struct { Hostname hostname.Component WMeta optional.Option[workloadmeta.Component] SchedulerProviders []schedulers.Scheduler `group:"log-agent-scheduler"` + Tagger tagger.Component } type provides struct { @@ -100,6 +102,7 @@ type logAgent struct { config pkgConfig.Reader inventoryAgent inventoryagent.Component hostname hostname.Component + tagger tagger.Component sources *sources.LogSources services *service.Services @@ -146,6 +149,7 @@ func newLogsAgent(deps dependencies) provides { wmeta: deps.WMeta, schedulerProviders: deps.SchedulerProviders, integrationsLogs: integrationsLogs, + tagger: deps.Tagger, } deps.Lc.Append(fx.Hook{ OnStart: logsAgent.start, diff --git a/comp/logs/agent/agentimpl/agent_core_init.go b/comp/logs/agent/agentimpl/agent_core_init.go index 40d5fa20ce3f1..903fbbce2da63 100644 --- a/comp/logs/agent/agentimpl/agent_core_init.go +++ b/comp/logs/agent/agentimpl/agent_core_init.go @@ -48,16 +48,23 @@ func (a *logAgent) SetupPipeline(processingRules []*config.ProcessingRule, wmeta // setup the launchers lnchrs := launchers.NewLaunchers(a.sources, pipelineProvider, auditor, a.tracker) + + fileLimits := a.config.GetInt("logs_config.open_files_limit") + fileValidatePodContainer := a.config.GetBool("logs_config.validate_pod_container_id") + fileScanPeriod := time.Duration(a.config.GetFloat64("logs_config.file_scan_period") * float64(time.Second)) + fileWildcardSelectionMode := a.config.GetString("logs_config.file_wildcard_selection_mode") lnchrs.AddLauncher(filelauncher.NewLauncher( - a.config.GetInt("logs_config.open_files_limit"), + fileLimits, filelauncher.DefaultSleepDuration, - a.config.GetBool("logs_config.validate_pod_container_id"), - time.Duration(a.config.GetFloat64("logs_config.file_scan_period")*float64(time.Second)), - a.config.GetString("logs_config.file_wildcard_selection_mode"), a.flarecontroller)) + fileValidatePodContainer, + fileScanPeriod, + fileWildcardSelectionMode, + a.flarecontroller, + a.tagger)) lnchrs.AddLauncher(listener.NewLauncher(a.config.GetInt("logs_config.frame_size"))) lnchrs.AddLauncher(journald.NewLauncher(a.flarecontroller)) lnchrs.AddLauncher(windowsevent.NewLauncher()) - lnchrs.AddLauncher(container.NewLauncher(a.sources, wmeta)) + lnchrs.AddLauncher(container.NewLauncher(a.sources, wmeta, a.tagger)) lnchrs.AddLauncher(integrationLauncher.NewLauncher( a.sources, integrationsLogs)) diff --git a/comp/logs/agent/agentimpl/agent_serverless_init.go b/comp/logs/agent/agentimpl/agent_serverless_init.go index d5276a6db9567..aff03ee85d561 100644 --- a/comp/logs/agent/agentimpl/agent_serverless_init.go +++ b/comp/logs/agent/agentimpl/agent_serverless_init.go @@ -53,13 +53,19 @@ func (a *logAgent) SetupPipeline( lnchrs := launchers.NewLaunchers(a.sources, pipelineProvider, a.auditor, a.tracker) lnchrs.AddLauncher(channel.NewLauncher()) + + fileLimits := a.config.GetInt("logs_config.open_files_limit") + fileValidatePodContainer := a.config.GetBool("logs_config.validate_pod_container_id") + fileScanPeriod := time.Duration(a.config.GetFloat64("logs_config.file_scan_period") * float64(time.Second)) + fileWildcardSelectionMode := a.config.GetString("logs_config.file_wildcard_selection_mode") lnchrs.AddLauncher(filelauncher.NewLauncher( - a.config.GetInt("logs_config.open_files_limit"), + fileLimits, filelauncher.DefaultSleepDuration, - a.config.GetBool("logs_config.validate_pod_container_id"), - time.Duration(a.config.GetFloat64("logs_config.file_scan_period")*float64(time.Second)), - a.config.GetString("logs_config.file_wildcard_selection_mode"), a.flarecontroller)) - + fileValidatePodContainer, + fileScanPeriod, + fileWildcardSelectionMode, + a.flarecontroller, + a.tagger)) a.schedulers = schedulers.NewSchedulers(a.sources, a.services) a.destinationsCtx = destinationsCtx a.pipelineProvider = pipelineProvider diff --git a/comp/logs/agent/agentimpl/agent_test.go b/comp/logs/agent/agentimpl/agent_test.go index a8f7e0b327ef9..d130981ae7920 100644 --- a/comp/logs/agent/agentimpl/agent_test.go +++ b/comp/logs/agent/agentimpl/agent_test.go @@ -26,6 +26,8 @@ import ( "github.com/DataDog/datadog-agent/comp/core/hostname/hostnameimpl" log "github.com/DataDog/datadog-agent/comp/core/log/def" logmock "github.com/DataDog/datadog-agent/comp/core/log/mock" + "github.com/DataDog/datadog-agent/comp/core/tagger" + "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" "github.com/DataDog/datadog-agent/comp/logs/agent/config" @@ -56,6 +58,7 @@ type AgentTestSuite struct { source *sources.LogSource configOverrides map[string]interface{} + tagger tagger.Component } type testDeps struct { @@ -91,6 +94,9 @@ func (suite *AgentTestSuite) SetupTest() { suite.configOverrides["logs_config.run_path"] = suite.testDir // Shorter grace period for tests. suite.configOverrides["logs_config.stop_grace_period"] = 1 + + fakeTagger := taggerimpl.SetupFakeTagger(suite.T()) + suite.tagger = fakeTagger } func (suite *AgentTestSuite) TearDownTest() { @@ -100,6 +106,7 @@ func (suite *AgentTestSuite) TearDownTest() { metrics.LogsSent.Set(0) metrics.DestinationErrors.Set(0) metrics.DestinationLogsDropped.Init() + suite.tagger.(tagger.Mock).ResetTagger() } func createAgent(suite *AgentTestSuite, endpoints *config.Endpoints) (*logAgent, *sources.LogSources, *service.Services) { @@ -119,6 +126,9 @@ func createAgent(suite *AgentTestSuite, endpoints *config.Endpoints) (*logAgent, inventoryagentimpl.MockModule(), )) + fakeTagger := taggerimpl.SetupFakeTagger(suite.T()) + defer fakeTagger.ResetTagger() + agent := &logAgent{ log: deps.Log, config: deps.Config, @@ -130,6 +140,7 @@ func createAgent(suite *AgentTestSuite, endpoints *config.Endpoints) (*logAgent, services: services, tracker: tailers.NewTailerTracker(), endpoints: endpoints, + tagger: fakeTagger, } agent.setupAgent() @@ -396,6 +407,9 @@ func (suite *AgentTestSuite) createDeps() dependencies { fx.Replace(configComponent.MockParams{Overrides: suite.configOverrides}), inventoryagentimpl.MockModule(), workloadmetafxmock.MockModule(workloadmeta.NewParams()), + fx.Provide(func() tagger.Component { + return suite.tagger + }), )) } diff --git a/comp/logs/agent/agentimpl/serverless.go b/comp/logs/agent/agentimpl/serverless.go index 66ec9b23ef7ee..22e06ae5a8beb 100644 --- a/comp/logs/agent/agentimpl/serverless.go +++ b/comp/logs/agent/agentimpl/serverless.go @@ -7,19 +7,21 @@ package agentimpl import ( "context" - flareController "github.com/DataDog/datadog-agent/comp/logs/agent/flare" + + "go.uber.org/atomic" logComponent "github.com/DataDog/datadog-agent/comp/core/log/impl" + "github.com/DataDog/datadog-agent/comp/core/tagger" "github.com/DataDog/datadog-agent/comp/logs/agent" + flareController "github.com/DataDog/datadog-agent/comp/logs/agent/flare" pkgConfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/logs/service" "github.com/DataDog/datadog-agent/pkg/logs/sources" "github.com/DataDog/datadog-agent/pkg/logs/tailers" - "go.uber.org/atomic" ) // NewServerlessLogsAgent creates a new instance of the logs agent for serverless -func NewServerlessLogsAgent() agent.ServerlessLogsAgent { +func NewServerlessLogsAgent(tagger tagger.Component) agent.ServerlessLogsAgent { logsAgent := &logAgent{ log: logComponent.NewTemporaryLoggerWithoutInit(), config: pkgConfig.Datadog(), @@ -29,6 +31,7 @@ func NewServerlessLogsAgent() agent.ServerlessLogsAgent { services: service.NewServices(), tracker: tailers.NewTailerTracker(), flarecontroller: flareController.NewFlareController(), + tagger: tagger, } return logsAgent } diff --git a/comp/logs/agent/config/integration_config.go b/comp/logs/agent/config/integration_config.go index 896fa36e49ef7..ff6f96ecfafb6 100644 --- a/comp/logs/agent/config/integration_config.go +++ b/comp/logs/agent/config/integration_config.go @@ -289,6 +289,21 @@ func (c *LogsConfig) AutoMultiLineEnabled(coreConfig pkgconfigmodel.Reader) bool return coreConfig.GetBool("logs_config.auto_multi_line_detection") } +// ExperimentalAutoMultiLineEnabled determines whether experimental auto multi line detection is enabled for this config. +// NOTE - this setting is subject to change as the feature is still experimental and being tested. +// If logs_config.experimental_auto_multi_line_detection, but the log source has AutoMultiLine explicitly set to false, +// disable the feature. +func (c *LogsConfig) ExperimentalAutoMultiLineEnabled(coreConfig pkgconfigmodel.Reader) bool { + if !coreConfig.GetBool("logs_config.experimental_auto_multi_line_detection") { + return false + } + + if c.AutoMultiLine != nil && !*c.AutoMultiLine { + return false + } + return true +} + // ShouldProcessRawMessage returns if the raw message should be processed instead // of only the message content. // This is tightly linked to how messages are transmitted through the pipeline. diff --git a/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks_test.go b/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks_test.go index aa3ca8c2baff3..8f0dd83c49cec 100644 --- a/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks_test.go +++ b/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks_test.go @@ -19,6 +19,8 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" logmock "github.com/DataDog/datadog-agent/comp/core/log/mock" + "github.com/DataDog/datadog-agent/comp/core/tagger" + "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" logsBundle "github.com/DataDog/datadog-agent/comp/logs" @@ -150,8 +152,18 @@ func TestGetPayload(t *testing.T) { // Register an error src.Status.Error(fmt.Errorf("No such file or directory")) logSources.AddSource(src) + fakeTagger := taggerimpl.SetupFakeTagger(t) + defer fakeTagger.ResetTagger() + mockLogAgent := fxutil.Test[optional.Option[logagent.Mock]]( - t, logsBundle.MockBundle(), core.MockBundle(), inventoryagentimpl.MockModule(), workloadmetafxmock.MockModule(workloadmeta.NewParams()), + t, + logsBundle.MockBundle(), + core.MockBundle(), + inventoryagentimpl.MockModule(), + workloadmetafxmock.MockModule(workloadmeta.NewParams()), + fx.Provide(func() tagger.Component { + return fakeTagger + }), ) logsAgent, _ := mockLogAgent.Get() logsAgent.SetSources(logSources) diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs.go index 193f41cd33824..bd3ef1584890a 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs.go @@ -41,9 +41,9 @@ func (ialp *infraAttributesLogProcessor) processLogs(_ context.Context, ld plog. // Get all unique tags from resource attributes and global tags for _, entityID := range entityIDs { - entityTags, err := ialp.tagger.Tag(entityID, ialp.cardinality) + entityTags, err := ialp.tagger.Tag(entityID.String(), ialp.cardinality) if err != nil { - ialp.logger.Error("Cannot get tags for entity", zap.String("entityID", entityID), zap.Error(err)) + ialp.logger.Error("Cannot get tags for entity", zap.String("entityID", entityID.String()), zap.Error(err)) continue } for _, tag := range entityTags { diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs_test.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs_test.go index d31e2d1dfe69d..054c6667d1fc1 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs_test.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs_test.go @@ -14,7 +14,7 @@ import ( "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/processor/processortest" - "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/collectors" + "github.com/DataDog/datadog-agent/comp/core/tagger/common" "github.com/DataDog/datadog-agent/comp/core/tagger/types" ) @@ -127,7 +127,7 @@ func TestInfraAttributesLogProcessor(t *testing.T) { tc := newTestTaggerClient() tc.tagMap["container_id://test"] = []string{"container:id"} tc.tagMap["deployment://namespace/deployment"] = []string{"deployment:name"} - tc.tagMap[collectors.GlobalEntityID] = []string{"global:tag"} + tc.tagMap[common.GetGlobalEntityID().String()] = []string{"global:tag"} factory := NewFactory(tc) flp, err := factory.CreateLogsProcessor( diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics.go index de83ea21b26a1..5dcbd2e1f974c 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics.go @@ -39,38 +39,39 @@ func newInfraAttributesMetricProcessor(set processor.Settings, cfg *Config, tagg // TODO: Replace OriginIDFromAttributes in opentelemetry-mapping-go with this method // entityIDsFromAttributes gets the entity IDs from resource attributes. // If not found, an empty string slice is returned. -func entityIDsFromAttributes(attrs pcommon.Map) []string { - entityIDs := make([]string, 0, 8) +func entityIDsFromAttributes(attrs pcommon.Map) []types.EntityID { + entityIDs := make([]types.EntityID, 0, 8) // Prefixes come from pkg/util/kubernetes/kubelet and pkg/util/containers. if containerID, ok := attrs.Get(conventions.AttributeContainerID); ok { - entityIDs = append(entityIDs, fmt.Sprintf("container_id://%v", containerID.AsString())) + entityIDs = append(entityIDs, types.NewEntityID(types.ContainerID, containerID.AsString())) } if containerImageID, ok := attrs.Get(conventions.AttributeContainerImageID); ok { splitImageID := strings.SplitN(containerImageID.AsString(), "@sha256:", 2) if len(splitImageID) == 2 { - entityIDs = append(entityIDs, fmt.Sprintf("container_image_metadata://sha256:%v", splitImageID[1])) + entityIDs = append(entityIDs, types.NewEntityID(types.ContainerImageMetadata, fmt.Sprintf("sha256:%v", splitImageID[1]))) } } if ecsTaskArn, ok := attrs.Get(conventions.AttributeAWSECSTaskARN); ok { - entityIDs = append(entityIDs, fmt.Sprintf("ecs_task://%v", ecsTaskArn.AsString())) + entityIDs = append(entityIDs, types.NewEntityID(types.ECSTask, ecsTaskArn.AsString())) } if deploymentName, ok := attrs.Get(conventions.AttributeK8SDeploymentName); ok { namespace, namespaceOk := attrs.Get(conventions.AttributeK8SNamespaceName) if namespaceOk { - entityIDs = append(entityIDs, fmt.Sprintf("deployment://%v/%v", namespace.AsString(), deploymentName.AsString())) + entityIDs = append(entityIDs, types.NewEntityID(types.KubernetesDeployment, fmt.Sprintf("%s/%s", namespace.AsString(), deploymentName.AsString()))) } } if namespace, ok := attrs.Get(conventions.AttributeK8SNamespaceName); ok { - entityIDs = append(entityIDs, fmt.Sprintf("kubernetes_metadata://%s", string(util.GenerateKubeMetadataEntityID("", "namespaces", "", namespace.AsString())))) + entityIDs = append(entityIDs, types.NewEntityID(types.KubernetesMetadata, string(util.GenerateKubeMetadataEntityID("", "namespaces", "", namespace.AsString())))) } + if nodeName, ok := attrs.Get(conventions.AttributeK8SNodeName); ok { - entityIDs = append(entityIDs, fmt.Sprintf("kubernetes_metadata://%s", string(util.GenerateKubeMetadataEntityID("", "nodes", "", nodeName.AsString())))) + entityIDs = append(entityIDs, types.NewEntityID(types.KubernetesMetadata, string(util.GenerateKubeMetadataEntityID("", "nodes", "", nodeName.AsString())))) } if podUID, ok := attrs.Get(conventions.AttributeK8SPodUID); ok { - entityIDs = append(entityIDs, fmt.Sprintf("kubernetes_pod_uid://%v", podUID.AsString())) + entityIDs = append(entityIDs, types.NewEntityID(types.KubernetesPodUID, podUID.AsString())) } if processPid, ok := attrs.Get(conventions.AttributeProcessPID); ok { - entityIDs = append(entityIDs, fmt.Sprintf("process://%v", processPid.AsString())) + entityIDs = append(entityIDs, types.NewEntityID(types.Process, processPid.AsString())) } return entityIDs } @@ -92,9 +93,9 @@ func (iamp *infraAttributesMetricProcessor) processMetrics(_ context.Context, md // Get all unique tags from resource attributes and global tags for _, entityID := range entityIDs { - entityTags, err := iamp.tagger.Tag(entityID, iamp.cardinality) + entityTags, err := iamp.tagger.Tag(entityID.String(), iamp.cardinality) if err != nil { - iamp.logger.Error("Cannot get tags for entity", zap.String("entityID", entityID), zap.Error(err)) + iamp.logger.Error("Cannot get tags for entity", zap.String("entityID", entityID.String()), zap.Error(err)) continue } for _, tag := range entityTags { diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics_test.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics_test.go index c852523df720e..023f60b904edc 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics_test.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics_test.go @@ -16,7 +16,7 @@ import ( "go.opentelemetry.io/collector/processor/processortest" conventions "go.opentelemetry.io/collector/semconv/v1.21.0" - "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/collectors" + "github.com/DataDog/datadog-agent/comp/core/tagger/common" "github.com/DataDog/datadog-agent/comp/core/tagger/types" ) @@ -130,7 +130,7 @@ func TestInfraAttributesMetricProcessor(t *testing.T) { tc := newTestTaggerClient() tc.tagMap["container_id://test"] = []string{"container:id"} tc.tagMap["deployment://namespace/deployment"] = []string{"deployment:name"} - tc.tagMap[collectors.GlobalEntityID] = []string{"global:tag"} + tc.tagMap[common.GetGlobalEntityID().String()] = []string{"global:tag"} factory := NewFactory(tc) fmp, err := factory.CreateMetricsProcessor( context.Background(), @@ -266,7 +266,11 @@ func TestEntityIDsFromAttributes(t *testing.T) { for _, testInstance := range tests { t.Run(testInstance.name, func(t *testing.T) { entityIDs := entityIDsFromAttributes(testInstance.attrs) - assert.Equal(t, testInstance.entityIDs, entityIDs) + entityIDsAsStrings := make([]string, len(entityIDs)) + for idx, entityID := range entityIDs { + entityIDsAsStrings[idx] = entityID.String() + } + assert.Equal(t, testInstance.entityIDs, entityIDsAsStrings) }) } } diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/taggerclient_test.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/taggerclient_test.go index 465162f172df1..41fc7ee5ca3b8 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/taggerclient_test.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/taggerclient_test.go @@ -6,7 +6,7 @@ package infraattributesprocessor import ( - "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/collectors" + "github.com/DataDog/datadog-agent/comp/core/tagger/common" "github.com/DataDog/datadog-agent/comp/core/tagger/types" ) @@ -29,5 +29,5 @@ func (t *testTaggerClient) Tag(entityID string, _ types.TagCardinality) ([]strin // GlobalTags mocks taggerimpl.GlobalTags functionality for purpose of testing, removing dependency on Taggerimpl func (t *testTaggerClient) GlobalTags(_ types.TagCardinality) ([]string, error) { - return t.tagMap[collectors.GlobalEntityID], nil + return t.tagMap[common.GetGlobalEntityID().String()], nil } diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces.go index 98950b8817ae2..d35f7b4009208 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces.go @@ -40,9 +40,9 @@ func (iasp *infraAttributesSpanProcessor) processTraces(_ context.Context, td pt // Get all unique tags from resource attributes and global tags for _, entityID := range entityIDs { - entityTags, err := iasp.tagger.Tag(entityID, iasp.cardinality) + entityTags, err := iasp.tagger.Tag(entityID.String(), iasp.cardinality) if err != nil { - iasp.logger.Error("Cannot get tags for entity", zap.String("entityID", entityID), zap.Error(err)) + iasp.logger.Error("Cannot get tags for entity", zap.String("entityID", entityID.String()), zap.Error(err)) continue } for _, tag := range entityTags { diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces_test.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces_test.go index 92d23fdb191fc..53cf8ad88f134 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces_test.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces_test.go @@ -14,7 +14,7 @@ import ( "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/collector/processor/processortest" - "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/collectors" + "github.com/DataDog/datadog-agent/comp/core/tagger/common" "github.com/DataDog/datadog-agent/comp/core/tagger/types" ) @@ -127,7 +127,7 @@ func TestInfraAttributesTraceProcessor(t *testing.T) { tc := newTestTaggerClient() tc.tagMap["container_id://test"] = []string{"container:id"} tc.tagMap["deployment://namespace/deployment"] = []string{"deployment:name"} - tc.tagMap[collectors.GlobalEntityID] = []string{"global:tag"} + tc.tagMap[common.GetGlobalEntityID().String()] = []string{"global:tag"} factory := NewFactory(tc) fmp, err := factory.CreateTracesProcessor( context.Background(), diff --git a/comp/otelcol/otlp/integrationtest/integration_test.go b/comp/otelcol/otlp/integrationtest/integration_test.go index 608c8a83b9754..e073c29ba1f3c 100644 --- a/comp/otelcol/otlp/integrationtest/integration_test.go +++ b/comp/otelcol/otlp/integrationtest/integration_test.go @@ -80,7 +80,7 @@ import ( func runTestOTelAgent(ctx context.Context, params *subcommands.GlobalParams) error { return fxutil.Run( - forwarder.BundleWithProvider(defaultforwarder.NewParams), + forwarder.Bundle(defaultforwarder.NewParams()), logtrace.Module(), inventoryagentimpl.Module(), workloadmetafx.Module(workloadmeta.NewParams()), diff --git a/comp/process/forwarders/forwardersimpl/forwarders.go b/comp/process/forwarders/forwardersimpl/forwarders.go index 38291418d9b56..2e81a8b290d8f 100644 --- a/comp/process/forwarders/forwardersimpl/forwarders.go +++ b/comp/process/forwarders/forwardersimpl/forwarders.go @@ -72,15 +72,15 @@ func newForwarders(deps dependencies) (forwarders.Component, error) { }, nil } -func createForwarder(deps dependencies, params defaultforwarder.Params) defaultforwarder.Component { - return defaultforwarder.NewForwarder(deps.Config, deps.Logger, deps.Lc, false, params).Comp +func createForwarder(deps dependencies, options *defaultforwarder.Options) defaultforwarder.Component { + return defaultforwarder.NewForwarder(deps.Config, deps.Logger, deps.Lc, false, options, false).Comp } -func createParams(config config.Component, log log.Component, queueBytes int, endpoints []apicfg.Endpoint) defaultforwarder.Params { +func createParams(config config.Component, log log.Component, queueBytes int, endpoints []apicfg.Endpoint) *defaultforwarder.Options { forwarderOpts := defaultforwarder.NewOptionsWithResolvers(config, log, resolver.NewSingleDomainResolvers(apicfg.KeysPerDomains(endpoints))) forwarderOpts.DisableAPIKeyChecking = true forwarderOpts.RetryQueuePayloadsTotalMaxSize = queueBytes // Allow more in-flight requests than the default - return defaultforwarder.Params{Options: forwarderOpts} + return forwarderOpts } func (f *forwardersComp) GetEventForwarder() defaultforwarder.Component { diff --git a/comp/trace/config/setup.go b/comp/trace/config/setup.go index e5420f60aefbc..86f192e10fdfb 100644 --- a/comp/trace/config/setup.go +++ b/comp/trace/config/setup.go @@ -122,7 +122,7 @@ func prepareConfig(c corecompcfg.Component) (*config.AgentConfig, error) { } func containerTagsFunc(cid string) ([]string, error) { - return tagger.Tag("container_id://"+cid, types.HighCardinality) + return tagger.Tag(types.NewEntityID(types.ContainerID, cid).String(), types.HighCardinality) } // appendEndpoints appends any endpoint configuration found at the given cfgKey. diff --git a/go.mod b/go.mod index 0d1452bea1a66..ded7d7e482a48 100644 --- a/go.mod +++ b/go.mod @@ -110,6 +110,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/cache => ./pkg/util/cache github.com/DataDog/datadog-agent/pkg/util/cgroups => ./pkg/util/cgroups github.com/DataDog/datadog-agent/pkg/util/common => ./pkg/util/common + github.com/DataDog/datadog-agent/pkg/util/containers/image => ./pkg/util/containers/image github.com/DataDog/datadog-agent/pkg/util/executable => ./pkg/util/executable github.com/DataDog/datadog-agent/pkg/util/filesystem => ./pkg/util/filesystem github.com/DataDog/datadog-agent/pkg/util/flavor => ./pkg/util/flavor @@ -162,8 +163,8 @@ require ( github.com/DataDog/watermarkpodautoscaler v0.6.1 github.com/DataDog/zstd v1.5.5 github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f // indirect - github.com/Masterminds/semver/v3 v3.2.1 - github.com/Masterminds/sprig/v3 v3.2.3 // indirect + github.com/Masterminds/semver/v3 v3.3.0 + github.com/Masterminds/sprig/v3 v3.3.0 // indirect github.com/Microsoft/go-winio v0.6.2 github.com/Microsoft/hcsshim v0.12.5 github.com/acobaugh/osrelease v0.1.0 @@ -182,7 +183,7 @@ require ( github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 github.com/cilium/ebpf v0.16.0 github.com/clbanning/mxj v1.8.4 - github.com/containerd/containerd v1.7.20 + github.com/containerd/containerd v1.7.21 github.com/containernetworking/cni v1.2.3 github.com/coreos/go-semver v0.3.1 github.com/coreos/go-systemd v22.5.0+incompatible @@ -238,7 +239,7 @@ require ( github.com/netsampler/goflow2 v1.3.3 github.com/olekukonko/tablewriter v0.0.5 github.com/oliveagle/jsonpath v0.0.0-20180606110733-2e52cf6e6852 - github.com/open-policy-agent/opa v0.67.1 + github.com/open-policy-agent/opa v0.68.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.104.0 // indirect github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.0 @@ -247,7 +248,7 @@ require ( github.com/pahanini/go-grpc-bidirectional-streaming-example v0.0.0-20211027164128-cc6111af44be github.com/patrickmn/go-cache v2.1.0+incompatible github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/client_golang v1.20.2 github.com/prometheus/client_model v0.6.1 github.com/prometheus/procfs v0.15.1 github.com/richardartoul/molecule v1.0.1-0.20240531184615-7ca0df43c0b3 // indirect @@ -260,7 +261,7 @@ require ( github.com/skydive-project/go-debouncer v1.0.0 github.com/smira/go-xz v0.1.0 github.com/spf13/afero v1.11.0 - github.com/spf13/cast v1.6.0 + github.com/spf13/cast v1.7.0 github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 github.com/streadway/amqp v1.1.0 @@ -308,7 +309,7 @@ require ( golang.org/x/tools v0.24.0 golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 google.golang.org/genproto v0.0.0-20240617180043-68d350f18fd4 // indirect - google.golang.org/grpc v1.65.0 + google.golang.org/grpc v1.66.0 google.golang.org/grpc/examples v0.0.0-20221020162917-9127159caf5a google.golang.org/protobuf v1.34.2 gopkg.in/DataDog/dd-trace-go.v1 v1.67.0 @@ -437,7 +438,7 @@ require ( github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/hcl v1.0.1-vault-5 // indirect github.com/hashicorp/serf v0.10.1 // indirect - github.com/huandu/xstrings v1.4.0 // indirect + github.com/huandu/xstrings v1.5.0 // indirect github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465 // indirect github.com/in-toto/in-toto-golang v0.9.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -500,7 +501,7 @@ require ( github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect - github.com/prometheus/common v0.54.0 + github.com/prometheus/common v0.55.0 github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/rivo/uniseg v0.4.7 // indirect @@ -511,7 +512,7 @@ require ( github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect github.com/sergi/go-diff v1.3.1 // indirect github.com/shibumi/go-pathspec v1.3.0 // indirect - github.com/shopspring/decimal v1.3.1 // indirect + github.com/shopspring/decimal v1.4.0 // indirect github.com/smira/go-ftp-protocol v0.0.0-20140829150050-066b75c2b70d // indirect github.com/spdx/tools-golang v0.5.4-0.20231108154018-0c0f394b5e1a // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect @@ -678,6 +679,7 @@ require ( github.com/DataDog/datadog-agent/pkg/util/backoff v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/cache v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/common v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/containers/image v0.56.2 github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/flavor v0.56.0-rc.3 @@ -719,7 +721,7 @@ require ( github.com/kr/pretty v0.3.1 // todo: update datadog connector with breaking changes from https://github.com/DataDog/datadog-agent/pull/26347. github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.103.0 - github.com/planetscale/vtprotobuf v0.6.0 + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 github.com/prometheus-community/pro-bing v0.4.1 github.com/rickar/props v1.0.0 github.com/sijms/go-ora/v2 v2.8.19 @@ -748,7 +750,7 @@ require ( code.cloudfoundry.org/go-diodes v0.0.0-20240604201846-c756bfed2ed3 // indirect code.cloudfoundry.org/go-loggregator v7.4.0+incompatible // indirect code.cloudfoundry.org/rfc5424 v0.0.0-20201103192249-000122071b78 // indirect - dario.cat/mergo v1.0.0 // indirect + dario.cat/mergo v1.0.1 // indirect filippo.io/edwards25519 v1.1.0 // indirect github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 // indirect @@ -808,7 +810,7 @@ require ( github.com/elastic/go-licenser v0.4.1 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/emirpasic/gods v1.18.1 // indirect - github.com/envoyproxy/go-control-plane v0.12.0 // indirect + github.com/envoyproxy/go-control-plane v0.12.1-0.20240621013728-1eb8caab5155 // indirect github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect github.com/evanphx/json-patch/v5 v5.8.0 // indirect github.com/expr-lang/expr v1.16.9 // indirect @@ -869,7 +871,8 @@ require ( github.com/mitchellh/go-testing-interface v1.14.1 // indirect github.com/moby/spdystream v0.2.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect - github.com/moby/sys/user v0.1.0 // indirect + github.com/moby/sys/user v0.3.0 // indirect + github.com/moby/sys/userns v0.1.0 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/ncruces/go-strftime v0.1.9 // indirect github.com/nxadm/tail v1.4.11 // indirect diff --git a/go.sum b/go.sum index 02cb1115b1299..6ade4f6e537a8 100644 --- a/go.sum +++ b/go.sum @@ -635,8 +635,8 @@ code.cloudfoundry.org/rfc5424 v0.0.0-20201103192249-000122071b78 h1:mrZQaZmuDIPh code.cloudfoundry.org/rfc5424 v0.0.0-20201103192249-000122071b78/go.mod h1:tkZo8GtzBjySJ7USvxm4E36lNQw1D3xM6oKHGqdaAJ4= code.cloudfoundry.org/tlsconfig v0.0.0-20200131000646-bbe0f8da39b3 h1:2Qal+q+tw/DmDOoJBWwDCPE3lIJNj/1o7oMkkb2c5SI= code.cloudfoundry.org/tlsconfig v0.0.0-20200131000646-bbe0f8da39b3/go.mod h1:eTbFJpyXRGuFVyg5+oaj9B2eIbIc+0/kZjH8ftbtdew= -dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= -dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= @@ -775,11 +775,10 @@ github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy86 github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= -github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= -github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= +github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= +github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= @@ -1088,8 +1087,8 @@ github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+Bu github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= -github.com/containerd/containerd v1.7.20 h1:Sl6jQYk3TRavaU83h66QMbI2Nqg9Jm6qzwX57Vsn1SQ= -github.com/containerd/containerd v1.7.20/go.mod h1:52GsS5CwquuqPuLncsXwG0t2CiUce+KsNHJZQJvAgR0= +github.com/containerd/containerd v1.7.21 h1:USGXRK1eOC/SX0L195YgxTHb0a00anxajOzgfN0qrCA= +github.com/containerd/containerd v1.7.21/go.mod h1:e3Jz1rYRUZ2Lt51YrH9Rz0zPyJBOlSvB3ghr2jbVD8g= github.com/containerd/containerd/api v1.7.19 h1:VWbJL+8Ap4Ju2mx9c9qS1uFSB1OVYr5JJrW2yT5vFoA= github.com/containerd/containerd/api v1.7.19/go.mod h1:fwGavl3LNwAV5ilJ0sbrABL44AQxmNjDRcwheXDb6Ig= github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8= @@ -1220,8 +1219,8 @@ github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go. github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= -github.com/envoyproxy/go-control-plane v0.12.0 h1:4X+VP1GHd1Mhj6IB5mMeGbLCleqxjletLK6K0rbxyZI= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= +github.com/envoyproxy/go-control-plane v0.12.1-0.20240621013728-1eb8caab5155 h1:IgJPqnrlY2Mr4pYB6oaMKvFvwJ9H+X6CCY5x1vCTcpc= +github.com/envoyproxy/go-control-plane v0.12.1-0.20240621013728-1eb8caab5155/go.mod h1:5Wkq+JduFtdAXihLmeTJf+tRYIT4KBc2vPXDhwVo1pA= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= @@ -1549,7 +1548,6 @@ github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -1720,9 +1718,8 @@ github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSo github.com/hjson/hjson-go/v4 v4.0.0/go.mod h1:KaYt3bTw3zhBjYqnXkYywcYctk0A2nxeEFTse3rH13E= github.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= -github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= +github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= github.com/iancoleman/orderedmap v0.2.0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= github.com/iancoleman/orderedmap v0.3.0 h1:5cbR2grmZR/DiVt+VJopEhtVs9YGInGIxAoMJn+Ichc= @@ -1737,7 +1734,6 @@ github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465/go.mod h1: github.com/iceber/iouring-go v0.0.0-20230403020409-002cfd2e2a90 h1:xrtfZokN++5kencK33hn2Kx3Uj8tGnjMEhdt6FMvHD0= github.com/iceber/iouring-go v0.0.0-20230403020409-002cfd2e2a90/go.mod h1:LEzdaZarZ5aqROlLIwJ4P7h3+4o71008fSy6wpaEB+s= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU= @@ -2009,8 +2005,10 @@ github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5 github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= github.com/moby/sys/signal v0.7.0 h1:25RW3d5TnQEoKvRbEKUGay6DCQ46IxAVTT9CUMgmsSI= github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= -github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg= -github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU= +github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= +github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= +github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -2100,8 +2098,8 @@ github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3 github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= -github.com/open-policy-agent/opa v0.67.1 h1:rzy26J6g1X+CKknAcx0Vfbt41KqjuSzx4E0A8DAZf3E= -github.com/open-policy-agent/opa v0.67.1/go.mod h1:aqKlHc8E2VAAylYE9x09zJYr/fYzGX+JKne89UGqFzk= +github.com/open-policy-agent/opa v0.68.0 h1:Jl3U2vXRjwk7JrHmS19U3HZO5qxQRinQbJ2eCJYSqJQ= +github.com/open-policy-agent/opa v0.68.0/go.mod h1:5E5SvaPwTpwt2WM177I9Z3eT7qUpmOGjk1ZdHs+TZ4w= github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.103.0 h1:Kpfqjwp+nlgqacXkSS8T8iGiTMTFo8NoT8AoRomDOpU= github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.103.0/go.mod h1:ymbGC/jEXTq8mgHsxzV1PjVGHmV5hSQXmkYkFfGfuLw= github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.104.0 h1:6dvpPt8pCcV+TfMnnanFk2NQYf9HN1voSS9iIHdW+L8= @@ -2278,8 +2276,8 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/planetscale/vtprotobuf v0.6.0 h1:nBeETjudeJ5ZgBHUz1fVHvbqUKnYOXNhsIEabROxmNA= -github.com/planetscale/vtprotobuf v0.6.0/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -2388,9 +2386,9 @@ github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFt github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY= github.com/shoenig/test v1.7.1/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/signalfx/sapm-proto v0.14.0 h1:KWh3I5E4EkelB19aP1/54Ik8khSioC/RVRW/riOfRGg= github.com/signalfx/sapm-proto v0.14.0/go.mod h1:Km6PskZh966cqNoUn3AmRyGRix5VfwnxVBvn2vjRC9U= @@ -2872,7 +2870,6 @@ golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= @@ -3658,8 +3655,8 @@ google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5v google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c= +google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/examples v0.0.0-20221020162917-9127159caf5a h1:p51n6zkL483uumoZhCSGtHCem9kDeU05G5jX/wYI9gw= google.golang.org/grpc/examples v0.0.0-20221020162917-9127159caf5a/go.mod h1:gxndsbNG1n4TZcHGgsYEfVGnTxqfEdfiDv6/DADXX9o= diff --git a/omnibus/config/software/datadog-agent-finalize.rb b/omnibus/config/software/datadog-agent-finalize.rb index b6df0ab46e821..2d545f58b498b 100644 --- a/omnibus/config/software/datadog-agent-finalize.rb +++ b/omnibus/config/software/datadog-agent-finalize.rb @@ -52,6 +52,9 @@ # load isn't supported by windows delete "#{conf_dir}/load.d" + # service_discovery isn't supported by windows + delete "#{conf_dir}/service_discovery.d" + # Remove .pyc files from embedded Python command "del /q /s #{windows_safe_path(install_dir)}\\*.pyc" end diff --git a/pkg/cli/subcommands/check/command.go b/pkg/cli/subcommands/check/command.go index 18cf25972173e..ea8c9642e824d 100644 --- a/pkg/cli/subcommands/check/command.go +++ b/pkg/cli/subcommands/check/command.go @@ -175,7 +175,7 @@ func MakeCommand(globalParamsGetter func() GlobalParams) *cobra.Command { fx.Provide(tagger.NewTaggerParamsForCoreAgent), taggerimpl.Module(), autodiscoveryimpl.Module(), - forwarder.Bundle(defaultforwarder.Params{UseNoopForwarder: true}), + forwarder.Bundle(defaultforwarder.NewParams(defaultforwarder.WithNoopForwarder())), inventorychecksimpl.Module(), // inventorychecksimpl depends on a collector and serializer when created to send payload. // Here we just want to collect metadata to be displayed, so we don't need a collector. diff --git a/pkg/clusteragent/admission/mutate/agent_sidecar/agent_sidecar_test.go b/pkg/clusteragent/admission/mutate/agent_sidecar/agent_sidecar_test.go index efdcf52d7de17..87b537b208324 100644 --- a/pkg/clusteragent/admission/mutate/agent_sidecar/agent_sidecar_test.go +++ b/pkg/clusteragent/admission/mutate/agent_sidecar/agent_sidecar_test.go @@ -17,6 +17,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + mutatecommon "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/common" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" apicommon "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" "github.com/DataDog/datadog-agent/pkg/util/pointer" @@ -180,6 +181,9 @@ func TestInjectAgentSidecar(t *testing.T) { return &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod-name", + Annotations: map[string]string{ + mutatecommon.K8sAutoscalerSafeToEvictVolumesAnnotation: "ddsockets", + }, }, Spec: corev1.PodSpec{ ShareProcessNamespace: pointer.Ptr(true), @@ -297,6 +301,9 @@ func TestInjectAgentSidecar(t *testing.T) { return &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod-name", + Annotations: map[string]string{ + mutatecommon.K8sAutoscalerSafeToEvictVolumesAnnotation: "ddsockets", + }, }, Spec: corev1.PodSpec{ ShareProcessNamespace: pointer.Ptr(true), diff --git a/pkg/clusteragent/admission/mutate/agent_sidecar/providers.go b/pkg/clusteragent/admission/mutate/agent_sidecar/providers.go index c4a2efea3341c..8e8d424e17eef 100644 --- a/pkg/clusteragent/admission/mutate/agent_sidecar/providers.go +++ b/pkg/clusteragent/admission/mutate/agent_sidecar/providers.go @@ -92,6 +92,10 @@ func applyFargateOverrides(pod *corev1.Pod) (bool, error) { volume, volumeMount := socketsVolume() injected := common.InjectVolume(pod, volume, volumeMount) + if injected { + common.MarkVolumeAsSafeToEvictForAutoscaler(pod, volume.Name) + } + mutated = mutated || injected // ShareProcessNamespace is required for the process collection feature diff --git a/pkg/clusteragent/admission/mutate/agent_sidecar/providers_test.go b/pkg/clusteragent/admission/mutate/agent_sidecar/providers_test.go index a277a52f6d888..ee8ceb1a544f5 100644 --- a/pkg/clusteragent/admission/mutate/agent_sidecar/providers_test.go +++ b/pkg/clusteragent/admission/mutate/agent_sidecar/providers_test.go @@ -14,7 +14,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + mutatecommon "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/common" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/util/pointer" ) @@ -99,6 +101,11 @@ func TestApplyProviderOverrides(t *testing.T) { }, }, expectedPodAfterOverride: &corev1.Pod{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{ + mutatecommon.K8sAutoscalerSafeToEvictVolumesAnnotation: "ddsockets", + }, + }, Spec: corev1.PodSpec{ ShareProcessNamespace: pointer.Ptr(true), Containers: []corev1.Container{ @@ -203,6 +210,11 @@ func TestApplyProviderOverrides(t *testing.T) { }, }, expectedPodAfterOverride: &corev1.Pod{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{ + mutatecommon.K8sAutoscalerSafeToEvictVolumesAnnotation: "ddsockets", + }, + }, Spec: corev1.PodSpec{ ShareProcessNamespace: pointer.Ptr(true), Containers: []corev1.Container{ diff --git a/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation_test.go b/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation_test.go index 1722cb5cb0832..d7b217f755020 100644 --- a/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation_test.go +++ b/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation_test.go @@ -262,6 +262,10 @@ func TestInjectAutoInstruConfigV2(t *testing.T) { require.Equal(t, etcVolume.Name, tt.pod.Spec.Volumes[1].Name, "expected datadog-etc volume to be injected") + volumesMarkedAsSafeToEvict := strings.Split(tt.pod.Annotations[common.K8sAutoscalerSafeToEvictVolumesAnnotation], ",") + require.Contains(t, volumesMarkedAsSafeToEvict, volumeName, "expected volume %s to be marked as safe to evict", volumeName) + require.Contains(t, volumesMarkedAsSafeToEvict, etcVolume.Name, "expected volume %s to be marked as safe to evict", etcVolume.Name) + require.Equal(t, len(tt.libInfo.libs)+1, len(tt.pod.Spec.InitContainers), "expected there to be one more than the number of libs to inject for init containers") diff --git a/pkg/clusteragent/admission/mutate/autoinstrumentation/mutators.go b/pkg/clusteragent/admission/mutate/autoinstrumentation/mutators.go index d113de8cf95e1..ecd3119fd6b0b 100644 --- a/pkg/clusteragent/admission/mutate/autoinstrumentation/mutators.go +++ b/pkg/clusteragent/admission/mutate/autoinstrumentation/mutators.go @@ -101,6 +101,8 @@ func (v volume) mount(mount corev1.VolumeMount) volumeMount { // mutatePod implements podMutator for volume. func (v volume) mutatePod(pod *corev1.Pod) error { + common.MarkVolumeAsSafeToEvictForAutoscaler(pod, v.Name) + vol := v.Volume for idx, i := range pod.Spec.Volumes { if i.Name == v.Volume.Name { diff --git a/pkg/clusteragent/admission/mutate/common/common.go b/pkg/clusteragent/admission/mutate/common/common.go index 0a08038db49fc..ba60a05f472b8 100644 --- a/pkg/clusteragent/admission/mutate/common/common.go +++ b/pkg/clusteragent/admission/mutate/common/common.go @@ -11,7 +11,9 @@ package common import ( "encoding/json" "fmt" + "slices" "strconv" + "strings" "github.com/wI2L/jsondiff" corev1 "k8s.io/api/core/v1" @@ -22,6 +24,10 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/log" ) +// K8sAutoscalerSafeToEvictVolumesAnnotation is the annotation used by the +// Kubernetes cluster-autoscaler to mark a volume as safe to evict +const K8sAutoscalerSafeToEvictVolumesAnnotation = "cluster-autoscaler.kubernetes.io/safe-to-evict-local-volumes" + // MutationFunc is a function that mutates a pod type MutationFunc func(pod *corev1.Pod, ns string, cl dynamic.Interface) (bool, error) @@ -181,3 +187,29 @@ func ContainerRegistry(specificConfigOpt string) string { return config.Datadog().GetString("admission_controller.container_registry") } + +// MarkVolumeAsSafeToEvictForAutoscaler adds the Kubernetes cluster-autoscaler +// annotation to the given pod, marking the specified local volume as safe to +// evict. This annotation allows the cluster-autoscaler to evict pods with the +// local volume mounted, enabling the node to scale down if necessary. +// This function will not add the volume to the annotation if it is already +// there. +// Ref: https://github.com/kubernetes/autoscaler/blob/cluster-autoscaler-release-1.31/cluster-autoscaler/FAQ.md#what-types-of-pods-can-prevent-ca-from-removing-a-node +func MarkVolumeAsSafeToEvictForAutoscaler(pod *corev1.Pod, volumeNameToAdd string) { + if pod.Annotations == nil { + pod.Annotations = make(map[string]string) + } + + currentVolumes := pod.Annotations[K8sAutoscalerSafeToEvictVolumesAnnotation] + var volumeList []string + if currentVolumes != "" { + volumeList = strings.Split(currentVolumes, ",") + } + + if slices.Contains(volumeList, volumeNameToAdd) { + return // Volume already in the list, no need to add + } + + volumeList = append(volumeList, volumeNameToAdd) + pod.Annotations[K8sAutoscalerSafeToEvictVolumesAnnotation] = strings.Join(volumeList, ",") +} diff --git a/pkg/clusteragent/admission/mutate/common/common_test.go b/pkg/clusteragent/admission/mutate/common/common_test.go index 1a5606f33f886..6423a17759e9d 100644 --- a/pkg/clusteragent/admission/mutate/common/common_test.go +++ b/pkg/clusteragent/admission/mutate/common/common_test.go @@ -218,3 +218,50 @@ func Test_injectVolume(t *testing.T) { }) } } + +func TestMarkVolumeAsSafeToEvictForAutoscaler(t *testing.T) { + tests := []struct { + name string + currentSafeToEvictAnnotationValue string + volumeToAdd string + expectedNewSafeToEvictAnnotationValue string + }{ + { + name: "the annotation is not set", + currentSafeToEvictAnnotationValue: "", + volumeToAdd: "datadog", + expectedNewSafeToEvictAnnotationValue: "datadog", + }, + { + name: "the annotation is already set", + currentSafeToEvictAnnotationValue: "someVolume1,someVolume2", + volumeToAdd: "datadog", + expectedNewSafeToEvictAnnotationValue: "someVolume1,someVolume2,datadog", + }, + { + name: "the annotation is already set and the volume is already in the list", + currentSafeToEvictAnnotationValue: "someVolume1,someVolume2", + volumeToAdd: "someVolume2", + expectedNewSafeToEvictAnnotationValue: "someVolume1,someVolume2", + }, + } + + for _, test := range tests { + t.Run(test.name, func(_ *testing.T) { + annotations := map[string]string{} + if test.currentSafeToEvictAnnotationValue != "" { + annotations["cluster-autoscaler.kubernetes.io/safe-to-evict-local-volumes"] = test.currentSafeToEvictAnnotationValue + } + pod := FakePodWithAnnotations(annotations) + + MarkVolumeAsSafeToEvictForAutoscaler(pod, test.volumeToAdd) + + assert.Equal( + t, + test.expectedNewSafeToEvictAnnotationValue, + pod.Annotations["cluster-autoscaler.kubernetes.io/safe-to-evict-local-volumes"], + ) + }) + } + +} diff --git a/pkg/clusteragent/admission/mutate/config/config.go b/pkg/clusteragent/admission/mutate/config/config.go index 4da8680f2a2ae..0d43bdfb3c229 100644 --- a/pkg/clusteragent/admission/mutate/config/config.go +++ b/pkg/clusteragent/admission/mutate/config/config.go @@ -186,6 +186,10 @@ func (w *Webhook) inject(pod *corev1.Pod, _ string, _ dynamic.Interface) (bool, case socket: volume, volumeMount := buildVolume(DatadogVolumeName, config.Datadog().GetString("admission_controller.inject_config.socket_path"), true) injectedVol := common.InjectVolume(pod, volume, volumeMount) + if injectedVol { + common.MarkVolumeAsSafeToEvictForAutoscaler(pod, DatadogVolumeName) + } + injectedEnv := common.InjectEnv(pod, traceURLSocketEnvVar) injectedEnv = common.InjectEnv(pod, dogstatsdURLSocketEnvVar) || injectedEnv injectedConfig = injectedEnv || injectedVol diff --git a/pkg/clusteragent/admission/mutate/config/config_test.go b/pkg/clusteragent/admission/mutate/config/config_test.go index 049d103cffab9..c8dd5437edf85 100644 --- a/pkg/clusteragent/admission/mutate/config/config_test.go +++ b/pkg/clusteragent/admission/mutate/config/config_test.go @@ -315,6 +315,7 @@ func TestInjectSocket(t *testing.T) { assert.Equal(t, pod.Spec.Volumes[0].Name, "datadog") assert.Equal(t, pod.Spec.Volumes[0].VolumeSource.HostPath.Path, "/var/run/datadog") assert.Equal(t, *pod.Spec.Volumes[0].VolumeSource.HostPath.Type, corev1.HostPathDirectoryOrCreate) + assert.Equal(t, "datadog", pod.Annotations[mutatecommon.K8sAutoscalerSafeToEvictVolumesAnnotation]) } func TestInjectSocketWithConflictingVolumeAndInitContainer(t *testing.T) { diff --git a/pkg/clusteragent/admission/mutate/cwsinstrumentation/cws_instrumentation.go b/pkg/clusteragent/admission/mutate/cwsinstrumentation/cws_instrumentation.go index b361c31e3f1ad..86c6c3ecf30c2 100644 --- a/pkg/clusteragent/admission/mutate/cwsinstrumentation/cws_instrumentation.go +++ b/pkg/clusteragent/admission/mutate/cwsinstrumentation/cws_instrumentation.go @@ -714,6 +714,8 @@ func injectCWSVolume(pod *corev1.Pod) { Name: cwsVolumeName, VolumeSource: volumeSource, }) + + common.MarkVolumeAsSafeToEvictForAutoscaler(pod, cwsVolumeName) } func injectCWSVolumeMount(container *corev1.Container) { diff --git a/pkg/clusteragent/admission/mutate/cwsinstrumentation/cws_instrumentation_test.go b/pkg/clusteragent/admission/mutate/cwsinstrumentation/cws_instrumentation_test.go index 2d3d1fa578eef..dcaecf230eae7 100644 --- a/pkg/clusteragent/admission/mutate/cwsinstrumentation/cws_instrumentation_test.go +++ b/pkg/clusteragent/admission/mutate/cwsinstrumentation/cws_instrumentation_test.go @@ -875,6 +875,7 @@ func Test_injectCWSPodInstrumentation(t *testing.T) { require.NotNil(t, annotations, "failed to annotate pod") if annotations != nil { require.Equal(t, cwsInstrumentationPodAnotationReady, annotations[cwsInstrumentationPodAnotationStatus], "CWS instrumentation annotation is missing") + require.Equal(t, cwsVolumeName, annotations[common.K8sAutoscalerSafeToEvictVolumesAnnotation], "CWS instrumentation volume should be marked as safe to evict") } } else { testNoInjectedCWSVolume(t, tt.args.pod) diff --git a/pkg/collector/corechecks/cluster/ksm/customresources/pod.go b/pkg/collector/corechecks/cluster/ksm/customresources/pod.go index ee749c094c128..4fcf26365d04e 100644 --- a/pkg/collector/corechecks/cluster/ksm/customresources/pod.go +++ b/pkg/collector/corechecks/cluster/ksm/customresources/pod.go @@ -238,15 +238,15 @@ func (f *extendedPodFactory) ExpectedType() interface{} { } // ListWatch returns a ListerWatcher for v1.Pod -// -//nolint:revive // TODO(CINT) Fix revive linter func (f *extendedPodFactory) ListWatch(customResourceClient interface{}, ns string, fieldSelector string) cache.ListerWatcher { client := customResourceClient.(clientset.Interface) return &cache.ListWatch{ ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + opts.FieldSelector = fieldSelector return client.CoreV1().Pods(ns).List(context.TODO(), opts) }, WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + opts.FieldSelector = fieldSelector return client.CoreV1().Pods(ns).Watch(context.TODO(), opts) }, } diff --git a/pkg/collector/corechecks/cluster/ksm/kubernetes_state.go b/pkg/collector/corechecks/cluster/ksm/kubernetes_state.go index 441469999d004..ef141ec4f61ef 100644 --- a/pkg/collector/corechecks/cluster/ksm/kubernetes_state.go +++ b/pkg/collector/corechecks/cluster/ksm/kubernetes_state.go @@ -28,6 +28,7 @@ import ( configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" kubestatemetrics "github.com/DataDog/datadog-agent/pkg/kubestatemetrics/builder" ksmstore "github.com/DataDog/datadog-agent/pkg/kubestatemetrics/store" + "github.com/DataDog/datadog-agent/pkg/util/flavor" hostnameUtil "github.com/DataDog/datadog-agent/pkg/util/hostname" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" @@ -67,6 +68,37 @@ var extendedCollectors = map[string]string{ var matchAllCap = regexp.MustCompile("([a-z0-9])([A-Z])") +type podCollectionMode string + +const ( + // defaultPodCollection is the default mode where pods are collected from + // the API server. + defaultPodCollection podCollectionMode = "default" + + // nodeKubeletPodCollection is the mode where pods are collected from the + // kubelet. + // + // This is meant to be enabled when the check is running on the node agent. + // This is useful in clusters with a large number of pods where emitting pod + // metrics from a single instance might be too much and cause performance + // issues. + // + // One thing to note is that when the node agent collects metrics from the + // kubelet and the cluster agent or cluster check runner collects metrics + // for other resources, label joins are not supported for pod metrics if the + // join source is not a pod. + nodeKubeletPodCollection podCollectionMode = "node_kubelet" + + // clusterUnassignedPodCollection is the mode where pods are collected from + // the API server but only unassigned pods. + // + // This is meant to be enabled when the check is running on the cluster + // agent or the cluster check runner and "nodeKubeletPodCollection" is + // enabled on the node agents, because unassigned pods cannot be collected + // from node agents. + clusterUnassignedPodCollection podCollectionMode = "cluster_unassigned" +) + // KSMConfig contains the check config parameters type KSMConfig struct { // Collectors defines the resource type collectors. @@ -149,6 +181,10 @@ type KSMConfig struct { // UseAPIServerCache enables the use of the API server cache for the check UseAPIServerCache bool `yaml:"use_apiserver_cache"` + + // PodCollectionMode defines how pods are collected. + // Accepted values are: "default", "node_kubelet", and "cluster_unassigned". + PodCollectionMode podCollectionMode `yaml:"pod_collection_mode"` } // KSMCheck wraps the config and the metric stores needed to run the check @@ -160,6 +196,7 @@ type KSMCheck struct { telemetry *telemetryCache cancel context.CancelFunc isCLCRunner bool + isRunningOnNodeAgent bool clusterNameTagValue string clusterNameRFC1123 string metricNamesMapper map[string]string @@ -337,6 +374,8 @@ func (k *KSMCheck) Configure(senderManager sender.SenderManager, integrationConf return err } + k.configurePodCollection(builder, collectors) + // Start the collection process k.allStores = builder.BuildStores() @@ -478,9 +517,14 @@ func (k *KSMCheck) Run() error { // Note that by design, some metrics cannot have hostnames (e.g kubernetes_state.pod.unschedulable) sender.DisableDefaultHostname(true) + // If KSM is running in the node agent, and it's configured to collect only + // pods and from the node agent, we don't need to run leader election, + // because each node agent is responsible for collecting its own pods. + podsFromKubeletInNodeAgent := k.isRunningOnNodeAgent && k.instance.PodCollectionMode == nodeKubeletPodCollection + // If the check is configured as a cluster check, the cluster check worker needs to skip the leader election section. // we also do a safety check for dedicated runners to avoid trying the leader election - if !k.isCLCRunner || !k.instance.LeaderSkip { + if (!k.isCLCRunner || !k.instance.LeaderSkip) && !podsFromKubeletInNodeAgent { // Only run if Leader Election is enabled. if !ddconfig.Datadog().GetBool("leader_election") { return log.Error("Leader Election not enabled. The cluster-agent will not run the kube-state-metrics core check.") @@ -795,6 +839,43 @@ func (k *KSMCheck) initTags() { } } +func (k *KSMCheck) configurePodCollection(builder *kubestatemetrics.Builder, collectors []string) { + switch k.instance.PodCollectionMode { + case "": + k.instance.PodCollectionMode = defaultPodCollection + case defaultPodCollection: + // No need to do anything + case nodeKubeletPodCollection: + if k.isRunningOnNodeAgent { + // If the check is running in a node agent, we can collect pods from + // the kubelet but only if it's the only collector enabled. When + // there are more collectors enabled, we need leader election and + // pods would only be collected from one of the agents. + if len(collectors) == 1 && collectors[0] == "pods" { + builder.WithPodCollectionFromKubelet() + } else { + log.Warnf("pod collection from the Kubelet is enabled but it's only supported when the only collector enabled is pods, " + + "so the check will collect pods from the API server instead of the Kubelet") + k.instance.PodCollectionMode = defaultPodCollection + } + } else { + log.Warnf("pod collection from the Kubelet is enabled but KSM is running in the cluster agent or cluster check runner, " + + "so the check will collect pods from the API server instead of the Kubelet") + k.instance.PodCollectionMode = defaultPodCollection + } + case clusterUnassignedPodCollection: + if k.isRunningOnNodeAgent { + log.Warnf("collection of unassigned pods is enabled but KSM is running in a node agent, so the option will be ignored") + k.instance.PodCollectionMode = defaultPodCollection + } else { + builder.WithUnassignedPodsCollection() + } + default: + log.Warnf("invalid pod collection mode %q, falling back to the default mode", k.instance.PodCollectionMode) + k.instance.PodCollectionMode = defaultPodCollection + } +} + // processTelemetry accumulates the telemetry metric values, it can be called multiple times // during a check run then sendTelemetry should be called to forward the calculated values func (k *KSMCheck) processTelemetry(metrics map[string][]ksmstore.DDMetricsFam) { @@ -868,13 +949,14 @@ func KubeStateMetricsFactoryWithParam(labelsMapper map[string]string, labelJoins func newKSMCheck(base core.CheckBase, instance *KSMConfig) *KSMCheck { return &KSMCheck{ - CheckBase: base, - instance: instance, - telemetry: newTelemetryCache(), - isCLCRunner: ddconfig.IsCLCRunner(), - metricNamesMapper: defaultMetricNamesMapper(), - metricAggregators: defaultMetricAggregators(), - metricTransformers: defaultMetricTransformers(), + CheckBase: base, + instance: instance, + telemetry: newTelemetryCache(), + isCLCRunner: ddconfig.IsCLCRunner(), + isRunningOnNodeAgent: flavor.GetFlavor() != flavor.ClusterAgent && !ddconfig.IsCLCRunner(), + metricNamesMapper: defaultMetricNamesMapper(), + metricAggregators: defaultMetricAggregators(), + metricTransformers: defaultMetricTransformers(), // metadata metrics are useful for label joins // but shouldn't be submitted to Datadog diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common_test.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common_test.go index 649feccfd5cf3..693751cb20d14 100644 --- a/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common_test.go +++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common_test.go @@ -57,7 +57,8 @@ func TestGetDDAlertType(t *testing.T) { func Test_getInvolvedObjectTags(t *testing.T) { telemetryComponent := fxutil.Test[coretelemetry.Component](t, telemetryimpl.MockModule()) telemetryStore := telemetry.NewStore(telemetryComponent) - taggerInstance := local.NewFakeTagger(telemetryStore) + cfg := configmock.New(t) + taggerInstance := local.NewFakeTagger(cfg, telemetryStore) taggerInstance.SetTags("kubernetes_metadata:///namespaces//default", "workloadmeta-kubernetes_node", []string{"team:container-int"}, nil, nil, nil) tests := []struct { name string diff --git a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/pod.go b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/pod.go index 9ad72ac74f223..8548c988a79ef 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/pod.go +++ b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/pod.go @@ -15,14 +15,13 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger" "github.com/DataDog/datadog-agent/comp/core/tagger/tags" - taggerTypes "github.com/DataDog/datadog-agent/comp/core/tagger/types" + taggertypes "github.com/DataDog/datadog-agent/comp/core/tagger/types" kubetypes "github.com/DataDog/datadog-agent/internal/third_party/kubernetes/pkg/kubelet/types" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors/common" k8sTransformers "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s" "github.com/DataDog/datadog-agent/pkg/orchestrator" "github.com/DataDog/datadog-agent/pkg/orchestrator/redact" - "github.com/DataDog/datadog-agent/pkg/util/kubernetes/kubelet" "github.com/DataDog/datadog-agent/pkg/util/log" corev1 "k8s.io/api/core/v1" @@ -59,7 +58,7 @@ func (h *PodHandlers) BeforeCacheCheck(ctx processors.ProcessorContext, resource } // insert tagger tags - taggerTags, err := tagger.Tag(kubelet.PodUIDToTaggerEntityName(string(r.UID)), taggerTypes.HighCardinality) + taggerTags, err := tagger.Tag(taggertypes.NewEntityID(taggertypes.KubernetesPodUID, string(r.UID)).String(), taggertypes.HighCardinality) if err != nil { log.Debugf("Could not retrieve tags for pod: %s", err.Error()) skip = true diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/ecs/task.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/ecs/task.go index a79f8162b0c58..0526ab2d659b1 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/ecs/task.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/ecs/task.go @@ -18,6 +18,7 @@ import ( jsoniter "github.com/json-iterator/go" model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/comp/core/tagger" "github.com/DataDog/datadog-agent/comp/core/tagger/types" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" @@ -56,7 +57,8 @@ func ExtractECSTask(task TaskWithContainers) *model.ECSTask { Containers: extractECSContainer(task.Containers), } - tags, err := tagger.Tag(fmt.Sprintf("ecs_task://%s", task.Task.EntityID.ID), types.HighCardinality) + entityID := types.NewEntityID(types.ECSTask, task.Task.EntityID.ID) + tags, err := tagger.Tag(entityID.String(), types.HighCardinality) if err != nil { log.Debugf("Could not retrieve tags for task: %s", err.Error()) } diff --git a/pkg/collector/corechecks/containerimage/processor.go b/pkg/collector/corechecks/containerimage/processor.go index 9c440ae603c59..668a84fd93bfa 100644 --- a/pkg/collector/corechecks/containerimage/processor.go +++ b/pkg/collector/corechecks/containerimage/processor.go @@ -74,7 +74,8 @@ func (p *processor) processRefresh(allImages []*workloadmeta.ContainerImageMetad } func (p *processor) processImage(img *workloadmeta.ContainerImageMetadata) { - ddTags, err := tagger.Tag("container_image_metadata://"+img.ID, types.HighCardinality) + entityID := types.NewEntityID(types.ContainerImageMetadata, img.ID) + ddTags, err := tagger.Tag(entityID.String(), types.HighCardinality) if err != nil { log.Errorf("Could not retrieve tags for container image %s: %v", img.ID, err) } diff --git a/pkg/collector/corechecks/containers/containerd/events.go b/pkg/collector/corechecks/containers/containerd/events.go index e924aee2dcb38..8e024071a19b8 100644 --- a/pkg/collector/corechecks/containers/containerd/events.go +++ b/pkg/collector/corechecks/containers/containerd/events.go @@ -52,7 +52,7 @@ func computeEvents(events []containerdEvent, sender sender.Sender, fil *containe alertType := event.AlertTypeInfo if split[1] == "containers" || split[1] == "tasks" { // For task events, we use the container ID in order to query the Tagger's API - t, err := tagger.Tag(containers.BuildTaggerEntityName(e.ID), types.HighCardinality) + t, err := tagger.Tag(types.NewEntityID(types.ContainerID, e.ID).String(), types.HighCardinality) if err != nil { // If there is an error retrieving tags from the Tagger, we can still submit the event as is. log.Errorf("Could not retrieve tags for the container %s: %v", e.ID, err) diff --git a/pkg/collector/corechecks/containers/containerd/utils.go b/pkg/collector/corechecks/containers/containerd/utils.go index 33adb6ac98e74..851c156bf2072 100644 --- a/pkg/collector/corechecks/containers/containerd/utils.go +++ b/pkg/collector/corechecks/containers/containerd/utils.go @@ -13,6 +13,7 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/containers/generic" "github.com/DataDog/datadog-agent/pkg/util/containers" + pkgcontainersimage "github.com/DataDog/datadog-agent/pkg/util/containers/image" ) func getProcessorFilter(legacyFilter *containers.Filter, store workloadmeta.Component) generic.ContainerFilter { @@ -26,7 +27,7 @@ func getProcessorFilter(legacyFilter *containers.Filter, store workloadmeta.Comp } func getImageTags(imageName string) []string { - long, _, short, tag, err := containers.SplitImageName(imageName) + long, _, short, tag, err := pkgcontainersimage.SplitImageName(imageName) if err != nil { return []string{fmt.Sprintf("image:%s", imageName)} } diff --git a/pkg/collector/corechecks/containers/docker/check.go b/pkg/collector/corechecks/containers/docker/check.go index aa05523b10710..1fea8ba33f979 100644 --- a/pkg/collector/corechecks/containers/docker/check.go +++ b/pkg/collector/corechecks/containers/docker/check.go @@ -17,6 +17,7 @@ import ( "time" dockerTypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/tagger" @@ -35,7 +36,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/kubernetes" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/optional" - "github.com/docker/docker/api/types/container" ) const ( @@ -239,8 +239,7 @@ func (d *DockerCheck) runDockerCustom(sender sender.Sender, du docker.Client, ra isContainerExcluded := d.containerFilter.IsExcluded(annotations, containerName, resolvedImageName, rawContainer.Labels[kubernetes.CriContainerNamespaceLabel]) isContainerRunning := rawContainer.State == string(workloadmeta.ContainerStatusRunning) - taggerEntityID := containers.BuildTaggerEntityName(rawContainer.ID) - + taggerEntityID := types.NewEntityID(types.ContainerID, rawContainer.ID).String() tags, err := getImageTagsFromContainer(taggerEntityID, resolvedImageName, isContainerExcluded || !isContainerRunning) if err != nil { log.Debugf("Unable to fetch tags for image: %s, err: %v", rawContainer.ImageID, err) diff --git a/pkg/collector/corechecks/containers/docker/eventbundle.go b/pkg/collector/corechecks/containers/docker/eventbundle.go index 21305ded89813..6f67d03d7a29a 100644 --- a/pkg/collector/corechecks/containers/docker/eventbundle.go +++ b/pkg/collector/corechecks/containers/docker/eventbundle.go @@ -18,7 +18,6 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger" "github.com/DataDog/datadog-agent/comp/core/tagger/types" "github.com/DataDog/datadog-agent/pkg/metrics/event" - "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/docker" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -93,7 +92,8 @@ func (b *dockerEventBundle) toDatadogEvent(hostname string) (event.Event, error) output.Text = strings.Join(textLines, "\n") for cid := range seenContainers { - tags, err := tagger.Tag(containers.BuildTaggerEntityName(cid), types.HighCardinality) + + tags, err := tagger.Tag(types.NewEntityID(types.ContainerID, cid).String(), types.HighCardinality) if err != nil { log.Debugf("no tags for %s: %s", cid, err) } else { diff --git a/pkg/collector/corechecks/containers/docker/events.go b/pkg/collector/corechecks/containers/docker/events.go index 0180d588870d0..b492305983408 100644 --- a/pkg/collector/corechecks/containers/docker/events.go +++ b/pkg/collector/corechecks/containers/docker/events.go @@ -18,7 +18,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck" "github.com/DataDog/datadog-agent/pkg/telemetry" - "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/docker" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -84,7 +83,7 @@ func (d *DockerCheck) reportExitCodes(events []*docker.ContainerEvent, sender se status = servicecheck.ServiceCheckCritical } - tags, err := tagger.Tag(containers.BuildTaggerEntityName(ev.ContainerID), types.HighCardinality) + tags, err := tagger.Tag(types.NewEntityID(types.ContainerID, ev.ContainerID).String(), types.HighCardinality) if err != nil { log.Debugf("no tags for %s: %s", ev.ContainerID, err) tags = []string{} diff --git a/pkg/collector/corechecks/containers/docker/unbundled_events.go b/pkg/collector/corechecks/containers/docker/unbundled_events.go index 050a2bf832e45..ddb763e97f77f 100644 --- a/pkg/collector/corechecks/containers/docker/unbundled_events.go +++ b/pkg/collector/corechecks/containers/docker/unbundled_events.go @@ -13,7 +13,6 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger" "github.com/DataDog/datadog-agent/comp/core/tagger/types" "github.com/DataDog/datadog-agent/pkg/metrics/event" - "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/docker" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -58,7 +57,7 @@ func (t *unbundledTransformer) Transform(events []*docker.ContainerEvent) ([]eve emittedEvents.Inc(string(alertType)) tags, err := tagger.Tag( - containers.BuildTaggerEntityName(ev.ContainerID), + types.NewEntityID(types.ContainerID, ev.ContainerID).String(), types.HighCardinality, ) if err != nil { diff --git a/pkg/collector/corechecks/containers/docker/unbundled_events_test.go b/pkg/collector/corechecks/containers/docker/unbundled_events_test.go index dbb929d86677c..984f59c642297 100644 --- a/pkg/collector/corechecks/containers/docker/unbundled_events_test.go +++ b/pkg/collector/corechecks/containers/docker/unbundled_events_test.go @@ -15,13 +15,14 @@ import ( "testing" "time" - "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl" - "github.com/DataDog/datadog-agent/pkg/metrics/event" - "github.com/DataDog/datadog-agent/pkg/util/containers" - "github.com/DataDog/datadog-agent/pkg/util/docker" "github.com/docker/docker/api/types/events" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl" + "github.com/DataDog/datadog-agent/comp/core/tagger/types" + "github.com/DataDog/datadog-agent/pkg/metrics/event" + "github.com/DataDog/datadog-agent/pkg/util/docker" ) func TestUnbundledEventsTransform(t *testing.T) { @@ -131,7 +132,7 @@ func TestUnbundledEventsTransform(t *testing.T) { defer fakeTagger.ResetTagger() for _, ev := range incomingEvents { fakeTagger.SetTags( - containers.BuildTaggerEntityName(ev.ContainerID), + types.NewEntityID(types.ContainerID, ev.ContainerID).String(), "docker", []string{fmt.Sprintf("image_name:%s", ev.ImageName), fmt.Sprintf("container_name:%s", ev.ContainerName)}, []string{}, diff --git a/pkg/collector/corechecks/containers/docker/utils.go b/pkg/collector/corechecks/containers/docker/utils.go index 8a17be87e82d0..500cf6cdde2d7 100644 --- a/pkg/collector/corechecks/containers/docker/utils.go +++ b/pkg/collector/corechecks/containers/docker/utils.go @@ -17,6 +17,7 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/containers/generic" "github.com/DataDog/datadog-agent/pkg/util/containers" + pkgcontainersimage "github.com/DataDog/datadog-agent/pkg/util/containers/image" ) func getProcessorFilter(legacyFilter *containers.Filter, store workloadmeta.Component) generic.ContainerFilter { @@ -43,7 +44,7 @@ func getImageTagsFromContainer(taggerEntityID string, resolvedImageName string, } func getImageTags(imageName string) ([]string, error) { - long, _, short, tag, err := containers.SplitImageName(imageName) + long, _, short, tag, err := pkgcontainersimage.SplitImageName(imageName) if err != nil { return nil, err } diff --git a/pkg/collector/corechecks/containers/generic/processor.go b/pkg/collector/corechecks/containers/generic/processor.go index 76ed6f2ee0a09..a40fe1512f4e3 100644 --- a/pkg/collector/corechecks/containers/generic/processor.go +++ b/pkg/collector/corechecks/containers/generic/processor.go @@ -13,7 +13,6 @@ import ( taggerUtils "github.com/DataDog/datadog-agent/comp/core/tagger/utils" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" - "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/containers/metrics" "github.com/DataDog/datadog-agent/pkg/util/containers/metrics/provider" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" @@ -72,8 +71,9 @@ func (p *Processor) Run(sender sender.Sender, cacheValidity time.Duration) error continue } - entityID := containers.BuildTaggerEntityName(container.ID) - tags, err := tagger.Tag(entityID, types.HighCardinality) + entityID := types.NewEntityID(types.ContainerID, container.ID) + + tags, err := tagger.Tag(entityID.String(), types.HighCardinality) if err != nil { log.Errorf("Could not collect tags for container %q, err: %v", container.ID[:12], err) continue diff --git a/pkg/collector/corechecks/containers/generic/processor_network.go b/pkg/collector/corechecks/containers/generic/processor_network.go index 4a092a8ba4cdb..12d9538d4b5cf 100644 --- a/pkg/collector/corechecks/containers/generic/processor_network.go +++ b/pkg/collector/corechecks/containers/generic/processor_network.go @@ -14,7 +14,6 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/util/containers/metrics" - "github.com/DataDog/datadog-agent/pkg/util/kubernetes/kubelet" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -97,8 +96,8 @@ func (pn *ProcessorNetwork) processGroupedContainerNetwork() { if containerNetworks.count == 1 { pn.generateNetworkMetrics(containerNetworks.tags, containerNetworks.stats) } else if containerNetworks.owner != nil && containerNetworks.owner.Kind == workloadmeta.KindKubernetesPod { - podEntityID := kubelet.PodUIDToTaggerEntityName(containerNetworks.owner.ID) - orchTags, err := tagger.Tag(podEntityID, types.HighCardinality) + podEntityID := types.NewEntityID(types.KubernetesPodUID, containerNetworks.owner.ID) + orchTags, err := tagger.Tag(podEntityID.String(), types.HighCardinality) if err != nil { log.Debugf("Unable to get orchestrator tags for pod: %s", containerNetworks.owner.ID) continue diff --git a/pkg/collector/corechecks/containers/generic/processor_network_test.go b/pkg/collector/corechecks/containers/generic/processor_network_test.go index 2aaa254d6f87f..0f9cc75bb80af 100644 --- a/pkg/collector/corechecks/containers/generic/processor_network_test.go +++ b/pkg/collector/corechecks/containers/generic/processor_network_test.go @@ -12,10 +12,8 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger/types" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/aggregator/mocksender" - "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/containers/metrics" "github.com/DataDog/datadog-agent/pkg/util/containers/metrics/mock" - "github.com/DataDog/datadog-agent/pkg/util/kubernetes/kubelet" "github.com/DataDog/datadog-agent/pkg/util/pointer" ) @@ -36,13 +34,14 @@ func TestNetworkProcessorExtension(t *testing.T) { // container4 is standalone is a namespace (should report with high tags) // container5 is using host network (should not report at all) // container6 & container7 share the same network namespace with unknown owner (should not report at all) - fakeTagger.SetTags(kubelet.PodUIDToTaggerEntityName("pod1"), "foo", []string{"low:common"}, []string{"orch:common12", "pod:test"}, nil, nil) + podEntityID := types.NewEntityID(types.KubernetesPodUID, "pod1").String() + fakeTagger.SetTags(podEntityID, "foo", []string{"low:common"}, []string{"orch:common12", "pod:test"}, nil, nil) container1 := CreateContainerMeta("docker", "1") container1.Owner = &workloadmeta.EntityID{ Kind: workloadmeta.KindKubernetesPod, ID: "pod1", } - fakeTagger.SetTags(containers.BuildTaggerEntityName(container1.ID), "foo", []string{"low:common"}, []string{"orch:common12"}, []string{"id:container1"}, nil) + fakeTagger.SetTags(types.NewEntityID(types.ContainerID, container1.ID).String(), "foo", []string{"low:common"}, []string{"orch:common12"}, []string{"id:container1"}, nil) mockCollector.SetContainerEntry(container1.ID, mock.ContainerEntry{ NetworkStats: &metrics.ContainerNetworkStats{ BytesSent: pointer.Ptr(12.0), @@ -67,7 +66,7 @@ func TestNetworkProcessorExtension(t *testing.T) { Kind: workloadmeta.KindKubernetesPod, ID: "pod1", } - fakeTagger.SetTags(containers.BuildTaggerEntityName(container2.ID), "foo", []string{"low:common"}, []string{"orch:common12"}, []string{"id:container2"}, nil) + fakeTagger.SetTags(types.NewEntityID(types.ContainerID, container2.ID).String(), "foo", []string{"low:common"}, []string{"orch:common12"}, []string{"id:container2"}, nil) mockCollector.SetContainerEntry(container2.ID, mock.ContainerEntry{ NetworkStats: &metrics.ContainerNetworkStats{ BytesSent: pointer.Ptr(12.0), @@ -88,7 +87,7 @@ func TestNetworkProcessorExtension(t *testing.T) { }) container3 := CreateContainerMeta("docker", "3") - fakeTagger.SetTags(containers.BuildTaggerEntityName(container3.ID), "foo", []string{"low:common"}, []string{"orch:standalone3"}, []string{"id:container3"}, nil) + fakeTagger.SetTags(types.NewEntityID(types.ContainerID, container3.ID).String(), "foo", []string{"low:common"}, []string{"orch:standalone3"}, []string{"id:container3"}, nil) mockCollector.SetContainerEntry(container3.ID, mock.ContainerEntry{ NetworkStats: &metrics.ContainerNetworkStats{ BytesSent: pointer.Ptr(3.0), @@ -107,7 +106,7 @@ func TestNetworkProcessorExtension(t *testing.T) { }) container4 := CreateContainerMeta("docker", "4") - fakeTagger.SetTags(containers.BuildTaggerEntityName(container4.ID), "foo", []string{"low:common"}, []string{"orch:standalone4"}, []string{"id:container4"}, nil) + fakeTagger.SetTags(types.NewEntityID(types.ContainerID, container4.ID).String(), "foo", []string{"low:common"}, []string{"orch:standalone4"}, []string{"id:container4"}, nil) mockCollector.SetContainerEntry(container4.ID, mock.ContainerEntry{ NetworkStats: &metrics.ContainerNetworkStats{ BytesSent: pointer.Ptr(4.0), @@ -128,7 +127,7 @@ func TestNetworkProcessorExtension(t *testing.T) { }) container5 := CreateContainerMeta("docker", "5") - fakeTagger.SetTags(containers.BuildTaggerEntityName(container5.ID), "foo", []string{"low:common"}, []string{"orch:standalone5"}, []string{"id:container5"}, nil) + fakeTagger.SetTags(types.NewEntityID(types.ContainerID, container5.ID).String(), "foo", []string{"low:common"}, []string{"orch:standalone5"}, []string{"id:container5"}, nil) mockCollector.SetContainerEntry(container5.ID, mock.ContainerEntry{ NetworkStats: &metrics.ContainerNetworkStats{ BytesSent: pointer.Ptr(5.0), @@ -149,7 +148,7 @@ func TestNetworkProcessorExtension(t *testing.T) { }) container6 := CreateContainerMeta("docker", "6") - fakeTagger.SetTags(containers.BuildTaggerEntityName(container6.ID), "foo", []string{"low:common"}, []string{"orch:common12"}, []string{"id:container6"}, nil) + fakeTagger.SetTags(types.NewEntityID(types.ContainerID, container6.ID).String(), "foo", []string{"low:common"}, []string{"orch:common12"}, []string{"id:container6"}, nil) mockCollector.SetContainerEntry(container6.ID, mock.ContainerEntry{ NetworkStats: &metrics.ContainerNetworkStats{ BytesSent: pointer.Ptr(12.0), @@ -170,7 +169,7 @@ func TestNetworkProcessorExtension(t *testing.T) { }) container7 := CreateContainerMeta("docker", "7") - fakeTagger.SetTags(containers.BuildTaggerEntityName(container2.ID), "foo", []string{"low:common"}, []string{"orch:common12"}, []string{"id:container7"}, nil) + fakeTagger.SetTags(types.NewEntityID(types.ContainerID, container2.ID).String(), "foo", []string{"low:common"}, []string{"orch:common12"}, []string{"id:container7"}, nil) mockCollector.SetContainerEntry(container7.ID, mock.ContainerEntry{ NetworkStats: &metrics.ContainerNetworkStats{ BytesSent: pointer.Ptr(12.0), diff --git a/pkg/collector/corechecks/containers/kubelet/common/pod.go b/pkg/collector/corechecks/containers/kubelet/common/pod.go index 4755beaf5aaf6..7949d89e5f8a0 100644 --- a/pkg/collector/corechecks/containers/kubelet/common/pod.go +++ b/pkg/collector/corechecks/containers/kubelet/common/pod.go @@ -85,7 +85,7 @@ func (p *PodUtils) PopulateForPod(pod *kubelet.Pod) { // computePodTagsByPVC stores the tags for a given pod in a global caching layer, indexed by pod namespace and persistent // volume name. func (p *PodUtils) computePodTagsByPVC(pod *kubelet.Pod) { - podUID := kubelet.PodUIDToTaggerEntityName(pod.Metadata.UID) + podUID := types.NewEntityID(types.KubernetesPodUID, pod.Metadata.UID).String() tags, _ := tagger.Tag(podUID, types.OrchestratorCardinality) if len(tags) == 0 { return @@ -192,7 +192,7 @@ func GetContainerID(store workloadmeta.Component, metric model.Metric, filter *c return "", ErrContainerExcluded } - cID := containers.BuildTaggerEntityName(container.ID) + cID := types.NewEntityID(types.ContainerID, container.ID).String() return cID, nil } diff --git a/pkg/collector/corechecks/containers/kubelet/common/testing/utils.go b/pkg/collector/corechecks/containers/kubelet/common/testing/utils.go index 5054431177d18..dbc422edad0af 100644 --- a/pkg/collector/corechecks/containers/kubelet/common/testing/utils.go +++ b/pkg/collector/corechecks/containers/kubelet/common/testing/utils.go @@ -23,6 +23,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator/mocksender" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/containers/kubelet/common" "github.com/DataDog/datadog-agent/pkg/util/containers" + pkgcontainersimage "github.com/DataDog/datadog-agent/pkg/util/containers/image" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/kubelet" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/kubelet/mock" @@ -187,7 +188,7 @@ func StorePopulatedFromFile(store workloadmetamock.Mock, filename string, podUti image, err := workloadmeta.NewContainerImage(container.ImageID, container.Image) if err != nil { - if errors.Is(err, containers.ErrImageIsSha256) { + if errors.Is(err, pkgcontainersimage.ErrImageIsSha256) { // try the resolved image ID if the image name in the container // status is a SHA256. this seems to happen sometimes when // pinning the image to a SHA256 diff --git a/pkg/collector/corechecks/containers/kubelet/provider/cadvisor/provider.go b/pkg/collector/corechecks/containers/kubelet/provider/cadvisor/provider.go index 2784dbc068f4a..d32cd95ab2027 100644 --- a/pkg/collector/corechecks/containers/kubelet/provider/cadvisor/provider.go +++ b/pkg/collector/corechecks/containers/kubelet/provider/cadvisor/provider.go @@ -18,6 +18,7 @@ import ( "github.com/prometheus/common/model" "github.com/DataDog/datadog-agent/comp/core/tagger" + taggercommon "github.com/DataDog/datadog-agent/comp/core/tagger/common" "github.com/DataDog/datadog-agent/comp/core/tagger/types" "github.com/DataDog/datadog-agent/comp/core/tagger/utils" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" @@ -151,7 +152,7 @@ func (p *Provider) processContainerMetric(metricType, metricName string, metricF // for static pods, see https://github.com/kubernetes/kubernetes/pull/59948 pod := p.getPodByMetricLabel(sample.Metric) if pod != nil && p.podUtils.IsStaticPendingPod(pod.ID) { - podTags, _ := tagger.Tag(fmt.Sprintf("kubernetes_pod_uid://%s", pod.EntityID.ID), types.HighCardinality) + podTags, _ := tagger.Tag(taggercommon.BuildTaggerEntityID(pod.GetID()).String(), types.HighCardinality) if len(podTags) == 0 { continue } @@ -206,7 +207,8 @@ func (p *Provider) processPodRate(metricName string, metricFam *prom.MetricFamil if strings.Contains(metricName, ".network.") && p.podUtils.IsHostNetworkedPod(podUID) { continue } - tags, _ := tagger.Tag(fmt.Sprintf("kubernetes_pod_uid://%s", pod.EntityID.ID), types.HighCardinality) + entityID := taggercommon.BuildTaggerEntityID(pod.GetID()) + tags, _ := tagger.Tag(entityID.String(), types.HighCardinality) if len(tags) == 0 { continue } @@ -247,7 +249,8 @@ func (p *Provider) processUsageMetric(metricName string, metricFam *prom.MetricF // for static pods, see https://github.com/kubernetes/kubernetes/pull/59948 pod := p.getPodByMetricLabel(sample.Metric) if pod != nil && p.podUtils.IsStaticPendingPod(pod.ID) { - podTags, _ := tagger.Tag(fmt.Sprintf("kubernetes_pod_uid://%s", pod.EntityID.ID), types.HighCardinality) + entityID := taggercommon.BuildTaggerEntityID(pod.EntityID) + podTags, _ := tagger.Tag(entityID.String(), types.HighCardinality) if len(podTags) == 0 { continue } diff --git a/pkg/collector/corechecks/containers/kubelet/provider/kubelet/provider.go b/pkg/collector/corechecks/containers/kubelet/provider/kubelet/provider.go index c5f0725dab3e5..ea3d40af9c990 100644 --- a/pkg/collector/corechecks/containers/kubelet/provider/kubelet/provider.go +++ b/pkg/collector/corechecks/containers/kubelet/provider/kubelet/provider.go @@ -165,7 +165,7 @@ func (p *Provider) appendPodTagsToVolumeMetrics(metricFam *prom.MetricFamily, se pvcName := metric.Metric["persistentvolumeclaim"] namespace := metric.Metric["namespace"] if pvcName == "" || namespace == "" || p.filter.IsExcluded(nil, "", "", string(namespace)) { - return + continue } tags := p.MetricTags(metric) if podTags := p.podUtils.GetPodTagsByPVC(string(namespace), string(pvcName)); len(podTags) > 0 { diff --git a/pkg/collector/corechecks/containers/kubelet/provider/kubelet/provider_test.go b/pkg/collector/corechecks/containers/kubelet/provider/kubelet/provider_test.go index dc00b58d4eaf7..3c3046f05a48a 100644 --- a/pkg/collector/corechecks/containers/kubelet/provider/kubelet/provider_test.go +++ b/pkg/collector/corechecks/containers/kubelet/provider/kubelet/provider_test.go @@ -285,6 +285,16 @@ func (suite *ProviderTestSuite) TestPVCMetricsExcludedByNamespace() { suite.T().Fatalf("unexpected error returned by call to provider.Provide: %v", err) } + // namespace not filtered still shows up + podWithPVCNotFilteredTags := append(commontesting.InstanceTags, "persistentvolumeclaim:ddagent-pvc-ddagent-test-2", "namespace:unit-test") + + suite.mockSender.AssertMetricTaggedWith(suite.T(), "Gauge", common.KubeletMetricsPrefix+"kubelet.volume.stats.capacity_bytes", podWithPVCNotFilteredTags) + suite.mockSender.AssertMetricTaggedWith(suite.T(), "Gauge", common.KubeletMetricsPrefix+"kubelet.volume.stats.used_bytes", podWithPVCNotFilteredTags) + suite.mockSender.AssertMetricTaggedWith(suite.T(), "Gauge", common.KubeletMetricsPrefix+"kubelet.volume.stats.available_bytes", podWithPVCNotFilteredTags) + suite.mockSender.AssertMetricTaggedWith(suite.T(), "Gauge", common.KubeletMetricsPrefix+"kubelet.volume.stats.inodes", podWithPVCNotFilteredTags) + suite.mockSender.AssertMetricTaggedWith(suite.T(), "Gauge", common.KubeletMetricsPrefix+"kubelet.volume.stats.inodes_used", podWithPVCNotFilteredTags) + suite.mockSender.AssertMetricTaggedWith(suite.T(), "Gauge", common.KubeletMetricsPrefix+"kubelet.volume.stats.inodes_free", podWithPVCNotFilteredTags) + // pvc tags show up podWithPVCTags := append(commontesting.InstanceTags, "persistentvolumeclaim:www-web-2", "namespace:default", "kube_namespace:default", "kube_service:nginx", "kube_stateful_set:web", "namespace:default") diff --git a/pkg/collector/corechecks/containers/kubelet/provider/pod/provider.go b/pkg/collector/corechecks/containers/kubelet/provider/pod/provider.go index 768bf01921b56..aa2a19af797c5 100644 --- a/pkg/collector/corechecks/containers/kubelet/provider/pod/provider.go +++ b/pkg/collector/corechecks/containers/kubelet/provider/pod/provider.go @@ -11,7 +11,6 @@ package pod import ( "context" - "fmt" "slices" "sort" "strings" @@ -211,7 +210,8 @@ func (r *runningAggregator) recordPod(p *Provider, pod *kubelet.Pod) { log.Debug("skipping pod with no uid") return } - tagList, _ := tagger.Tag(fmt.Sprintf("kubernetes_pod_uid://%s", podID), types.LowCardinality) + entityID := types.NewEntityID(types.KubernetesPodUID, podID).String() + tagList, _ := tagger.Tag(entityID, types.LowCardinality) if len(tagList) == 0 { return } diff --git a/pkg/collector/corechecks/containers/kubelet/provider/summary/provider.go b/pkg/collector/corechecks/containers/kubelet/provider/summary/provider.go index cb1b8a3276fd1..6e2d330d66d7d 100644 --- a/pkg/collector/corechecks/containers/kubelet/provider/summary/provider.go +++ b/pkg/collector/corechecks/containers/kubelet/provider/summary/provider.go @@ -149,7 +149,8 @@ func (p *Provider) processPodStats(sender sender.Sender, return } - podTags, _ := tagger.Tag(kubelet.PodUIDToTaggerEntityName(podStats.PodRef.UID), + entityID := types.NewEntityID(types.KubernetesPodUID, podStats.PodRef.UID) + podTags, _ := tagger.Tag(entityID.String(), types.OrchestratorCardinality) if len(podTags) == 0 { @@ -220,7 +221,7 @@ func (p *Provider) processContainerStats(sender sender.Sender, podStats.PodRef.Namespace) { continue } - tags, err := tagger.Tag(containers.BuildTaggerEntityName(ctr.ID), types.HighCardinality) + tags, err := tagger.Tag(types.NewEntityID(types.ContainerID, ctr.ID).String(), types.HighCardinality) if err != nil || len(tags) == 0 { log.Debugf("Tags not found for container: %s/%s/%s:%s - no metrics will be sent", podStats.PodRef.Namespace, podStats.PodRef.Name, containerName, ctr.ID) diff --git a/pkg/collector/corechecks/containers/kubelet/testdata/kubelet_metrics.txt b/pkg/collector/corechecks/containers/kubelet/testdata/kubelet_metrics.txt index 0a97c3c4e8a25..19a992b16646d 100644 --- a/pkg/collector/corechecks/containers/kubelet/testdata/kubelet_metrics.txt +++ b/pkg/collector/corechecks/containers/kubelet/testdata/kubelet_metrics.txt @@ -445,11 +445,11 @@ kubelet_volume_stats_inodes_used{namespace="default",persistentvolumeclaim="www2 kubelet_volume_stats_inodes_used{namespace="default",persistentvolumeclaim="web-2-ephemeralvolume"} 17955 # HELP kubelet_volume_stats_used_bytes Number of used bytes in the volume # TYPE kubelet_volume_stats_used_bytes gauge -kubelet_volume_stats_used_bytes{namespace="unit-test",persistentvolumeclaim="ddagent-pvc-ddagent-test-2"} 2.319220736e+09 -kubelet_volume_stats_used_bytes{namespace="unit-test",persistentvolumeclaim="ddagent-pvc-ddagent-test-3"} 1.53286656e+09 kubelet_volume_stats_used_bytes{namespace="default",persistentvolumeclaim="www-web-2"} 2.319220736e+09 kubelet_volume_stats_used_bytes{namespace="default",persistentvolumeclaim="www2-web-3"} 1.53286656e+09 kubelet_volume_stats_used_bytes{namespace="default",persistentvolumeclaim="web-2-ephemeralvolume"} 1.53286656e+09 +kubelet_volume_stats_used_bytes{namespace="unit-test",persistentvolumeclaim="ddagent-pvc-ddagent-test-2"} 2.319220736e+09 +kubelet_volume_stats_used_bytes{namespace="unit-test",persistentvolumeclaim="ddagent-pvc-ddagent-test-3"} 1.53286656e+09 # HELP kubernetes_build_info A metric with a constant '1' value labeled by major, minor, git version, git commit, git tree state, build date, Go version, and compiler from which Kubernetes was built, and platform on which it is running. # TYPE kubernetes_build_info gauge kubernetes_build_info{buildDate="2018-03-26T16:44:10Z",compiler="gc",gitCommit="fc32d2f3698e36b93322a3465f63a14e9f0eaead",gitTreeState="clean",gitVersion="v1.10.0",goVersion="go1.9.3",major="1",minor="10",platform="linux/amd64"} 1 diff --git a/pkg/collector/corechecks/ebpf/oomkill/oom_kill.go b/pkg/collector/corechecks/ebpf/oomkill/oom_kill.go index 7962f60cd1fe2..75797cb5352a0 100644 --- a/pkg/collector/corechecks/ebpf/oomkill/oom_kill.go +++ b/pkg/collector/corechecks/ebpf/oomkill/oom_kill.go @@ -21,6 +21,7 @@ import ( sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/tagger" + "github.com/DataDog/datadog-agent/comp/core/tagger/types" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/collector/check" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" @@ -29,7 +30,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/metrics/event" process_net "github.com/DataDog/datadog-agent/pkg/process/net" "github.com/DataDog/datadog-agent/pkg/util/cgroups" - "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/optional" ) @@ -115,7 +115,7 @@ func (m *OOMKillCheck) Run() error { log.Debugf("Unable to extract containerID from cgroup name: %s, err: %v", line.CgroupName, err) } - entityID := containers.BuildTaggerEntityName(containerID) + entityID := types.NewEntityID(types.ContainerID, containerID).String() var tags []string if entityID != "" { tags, err = tagger.Tag(entityID, tagger.ChecksCardinality()) diff --git a/pkg/collector/corechecks/ebpf/tcpqueuelength/tcp_queue_length.go b/pkg/collector/corechecks/ebpf/tcpqueuelength/tcp_queue_length.go index 6f1d38f9a1833..30fa918eb37f3 100644 --- a/pkg/collector/corechecks/ebpf/tcpqueuelength/tcp_queue_length.go +++ b/pkg/collector/corechecks/ebpf/tcpqueuelength/tcp_queue_length.go @@ -26,7 +26,6 @@ import ( dd_config "github.com/DataDog/datadog-agent/pkg/config" process_net "github.com/DataDog/datadog-agent/pkg/process/net" "github.com/DataDog/datadog-agent/pkg/util/cgroups" - "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/optional" ) @@ -111,7 +110,7 @@ func (t *TCPQueueLengthCheck) Run() error { continue } - entityID := containers.BuildTaggerEntityName(containerID) + entityID := types.NewEntityID(types.ContainerID, containerID).String() var tags []string if entityID != "" { tags, err = tagger.Tag(entityID, types.HighCardinality) diff --git a/pkg/collector/corechecks/sbom/processor.go b/pkg/collector/corechecks/sbom/processor.go index b19aedf9e24d9..637bf4ecdfbe8 100644 --- a/pkg/collector/corechecks/sbom/processor.go +++ b/pkg/collector/corechecks/sbom/processor.go @@ -272,7 +272,8 @@ func (p *processor) processImageSBOM(img *workloadmeta.ContainerImageMetadata) { return } - ddTags, err := tagger.Tag("container_image_metadata://"+img.ID, types.HighCardinality) + entityID := types.NewEntityID(types.ContainerImageMetadata, img.ID).String() + ddTags, err := tagger.Tag(entityID, types.HighCardinality) if err != nil { log.Errorf("Could not retrieve tags for container image %s: %v", img.ID, err) } diff --git a/pkg/collector/corechecks/servicediscovery/apm/detect.go b/pkg/collector/corechecks/servicediscovery/apm/detect.go index d64e60bada8c3..8a5419676d0f3 100644 --- a/pkg/collector/corechecks/servicediscovery/apm/detect.go +++ b/pkg/collector/corechecks/servicediscovery/apm/detect.go @@ -165,15 +165,12 @@ func pythonDetector(pid int, _ []string, _ map[string]string, _ usm.DetectorCont // entry for APM NodeJS instrumentation. Returns true if finding such // an entry, false otherwise. func isNodeInstrumented(f fs.File) bool { - // Don't try to read a non-regular file. - if fi, err := f.Stat(); err != nil || !fi.Mode().IsRegular() { + reader, err := usm.SizeVerifiedReader(f) + if err != nil { return false } - const readLimit = 1 * 1024 * 1024 // Read 1MiB max - - limitReader := io.LimitReader(f, readLimit) - bufferedReader := bufio.NewReader(limitReader) + bufferedReader := bufio.NewReader(reader) return nodeAPMCheckRegex.MatchReader(bufferedReader) } diff --git a/pkg/collector/corechecks/servicediscovery/errors.go b/pkg/collector/corechecks/servicediscovery/errors.go index 46b4af5fbb194..b38ecbcca2122 100644 --- a/pkg/collector/corechecks/servicediscovery/errors.go +++ b/pkg/collector/corechecks/servicediscovery/errors.go @@ -12,7 +12,6 @@ import ( type errCode string const ( - errorCodeProcfs errCode = "procfs" errorCodePortPoller errCode = "port_poller" errorCodeRepeatedServiceName errCode = "repeated_service_name" errorCodeSystemProbeConn errCode = "system_probe_conn" diff --git a/pkg/collector/corechecks/servicediscovery/events.go b/pkg/collector/corechecks/servicediscovery/events.go index 996268a0067a1..fe22df5e15326 100644 --- a/pkg/collector/corechecks/servicediscovery/events.go +++ b/pkg/collector/corechecks/servicediscovery/events.go @@ -39,6 +39,7 @@ type eventPayload struct { Ports []uint16 `json:"ports"` PID int `json:"pid"` CommandLine []string `json:"command_line"` + RSSMemory uint64 `json:"rss_memory"` } type event struct { @@ -66,13 +67,14 @@ func (ts *telemetrySender) newEvent(t eventType, svc serviceInfo) *event { Env: env, ServiceLanguage: svc.meta.Language, ServiceType: svc.meta.Type, - StartTime: int64(svc.process.Stat.StartTime), + StartTime: int64(svc.service.StartTimeSecs), LastSeen: svc.LastHeartbeat.Unix(), APMInstrumentation: svc.meta.APMInstrumentation, ServiceNameSource: svc.meta.NameSource, - Ports: svc.process.Ports, - PID: svc.process.PID, - CommandLine: svc.process.CmdLine, + Ports: svc.service.Ports, + PID: svc.service.PID, + CommandLine: svc.service.CommandLine, + RSSMemory: svc.service.RSS, }, } } @@ -86,9 +88,9 @@ func newTelemetrySender(sender sender.Sender) *telemetrySender { func (ts *telemetrySender) sendStartServiceEvent(svc serviceInfo) { log.Debugf("[pid: %d | name: %s | ports: %v] start-service", - svc.process.PID, + svc.service.PID, svc.meta.Name, - svc.process.Ports, + svc.service.Ports, ) e := ts.newEvent(eventTypeStartService, svc) @@ -103,7 +105,7 @@ func (ts *telemetrySender) sendStartServiceEvent(svc serviceInfo) { func (ts *telemetrySender) sendHeartbeatServiceEvent(svc serviceInfo) { log.Debugf("[pid: %d | name: %s] heartbeat-service", - svc.process.PID, + svc.service.PID, svc.meta.Name, ) @@ -119,7 +121,7 @@ func (ts *telemetrySender) sendHeartbeatServiceEvent(svc serviceInfo) { func (ts *telemetrySender) sendEndServiceEvent(svc serviceInfo) { log.Debugf("[pid: %d | name: %s] end-service", - svc.process.PID, + svc.service.PID, svc.meta.Name, ) diff --git a/pkg/collector/corechecks/servicediscovery/events_test.go b/pkg/collector/corechecks/servicediscovery/events_test.go index ec04b6318be2a..747d49e5dcbf2 100644 --- a/pkg/collector/corechecks/servicediscovery/events_test.go +++ b/pkg/collector/corechecks/servicediscovery/events_test.go @@ -17,6 +17,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface" "github.com/DataDog/datadog-agent/pkg/aggregator/mocksender" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/model" ) func mockSenderEvents(t *testing.T, m *mocksender.MockSender) []*event { @@ -54,14 +55,12 @@ func Test_telemetrySender(t *testing.T) { ts.hostname = mHostname svc := serviceInfo{ - process: processInfo{ - PID: 99, - CmdLine: []string{"test-service", "--args"}, - Env: nil, - Stat: procStat{ - StartTime: uint64(now.Add(-20 * time.Minute).Unix()), - }, - Ports: []uint16{80, 8080}, + service: model.Service{ + PID: 99, + CommandLine: []string{"test-service", "--args"}, + Ports: []uint16{80, 8080}, + StartTimeSecs: uint64(now.Add(-20 * time.Minute).Unix()), + RSS: 500 * 1024 * 1024, }, meta: ServiceMetadata{ Name: "test-service", @@ -95,6 +94,7 @@ func Test_telemetrySender(t *testing.T) { Ports: []uint16{80, 8080}, PID: 99, CommandLine: []string{"test-service", "--args"}, + RSSMemory: 500 * 1024 * 1024, }, }, { @@ -114,6 +114,7 @@ func Test_telemetrySender(t *testing.T) { Ports: []uint16{80, 8080}, PID: 99, CommandLine: []string{"test-service", "--args"}, + RSSMemory: 500 * 1024 * 1024, }, }, { @@ -133,6 +134,7 @@ func Test_telemetrySender(t *testing.T) { Ports: []uint16{80, 8080}, PID: 99, CommandLine: []string{"test-service", "--args"}, + RSSMemory: 500 * 1024 * 1024, }, }, } @@ -162,14 +164,10 @@ func Test_telemetrySender_name_provided(t *testing.T) { ts.hostname = mHostname svc := serviceInfo{ - process: processInfo{ - PID: 55, - CmdLine: []string{"foo", "--option"}, - Env: nil, - Stat: procStat{ - StartTime: uint64(now.Add(-20 * time.Minute).Unix()), - }, - Ports: nil, + service: model.Service{ + PID: 55, + CommandLine: []string{"foo", "--option"}, + StartTimeSecs: uint64(now.Add(-20 * time.Minute).Unix()), }, meta: ServiceMetadata{ Name: "test-service", diff --git a/pkg/collector/corechecks/servicediscovery/impl_linux.go b/pkg/collector/corechecks/servicediscovery/impl_linux.go index 3b5ac8452d451..db48fbfe6de20 100644 --- a/pkg/collector/corechecks/servicediscovery/impl_linux.go +++ b/pkg/collector/corechecks/servicediscovery/impl_linux.go @@ -8,16 +8,12 @@ package servicediscovery import ( - "fmt" "time" - "github.com/prometheus/procfs" - "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/model" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/servicetype" ddconfig "github.com/DataDog/datadog-agent/pkg/config" processnet "github.com/DataDog/datadog-agent/pkg/process/net" - "github.com/DataDog/datadog-agent/pkg/process/procutil" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -27,71 +23,29 @@ func init() { newOSImpl = newLinuxImpl } -const ( - maxCommandLine = 200 -) - -var ignoreCfgLinux = []string{ - "sshd", - "dhclient", - "systemd", - "systemd-resolved", - "systemd-networkd", - "datadog-agent", - "livenessprobe", - "docker-proxy", // remove when we have docker support in place -} - type linuxImpl struct { - procfs procFS getSysProbeClient func() (systemProbeClient, error) time timer - bootTime uint64 ignoreCfg map[string]bool ignoreProcs map[int]bool aliveServices map[int]*serviceInfo potentialServices map[int]*serviceInfo - - scrubber *procutil.DataScrubber } func newLinuxImpl(ignoreCfg map[string]bool) (osImpl, error) { - for _, i := range ignoreCfgLinux { - ignoreCfg[i] = true - } - pfs, err := procfs.NewDefaultFS() - if err != nil { - return nil, err - } - stat, err := pfs.Stat() - if err != nil { - return nil, err - } return &linuxImpl{ - procfs: wProcFS{pfs}, - bootTime: stat.BootTime, getSysProbeClient: getSysProbeClient, time: realTime{}, ignoreCfg: ignoreCfg, ignoreProcs: make(map[int]bool), aliveServices: make(map[int]*serviceInfo), potentialServices: make(map[int]*serviceInfo), - scrubber: procutil.NewDefaultDataScrubber(), }, nil } func (li *linuxImpl) DiscoverServices() (*discoveredServices, error) { - procs, err := li.aliveProcs() - if err != nil { - return nil, errWithCode{ - err: err, - code: errorCodeProcfs, - svc: nil, - } - } - sysProbe, err := li.getSysProbeClient() if err != nil { return nil, errWithCode{ @@ -108,6 +62,12 @@ func (li *linuxImpl) DiscoverServices() (*discoveredServices, error) { } } + // The endpoint could be refactored in the future to return a map to avoid this. + serviceMap := make(map[int]*model.Service, len(response.Services)) + for _, service := range response.Services { + serviceMap[service.PID] = &service + } + events := serviceEvents{} now := li.time.Now() @@ -115,8 +75,9 @@ func (li *linuxImpl) DiscoverServices() (*discoveredServices, error) { // potentialServices contains processes that we scanned in the previous iteration and had open ports. // we check if they are still alive in this iteration, and if so, we send a start-service telemetry event. for pid, svc := range li.potentialServices { - if _, ok := procs[pid]; ok { + if service, ok := serviceMap[pid]; ok { svc.LastHeartbeat = now + svc.service.RSS = service.RSS li.aliveServices[pid] = svc events.start = append(events.start, *svc) } @@ -132,53 +93,37 @@ func (li *linuxImpl) DiscoverServices() (*discoveredServices, error) { if _, ok := li.aliveServices[pid]; !ok { log.Debugf("[pid: %d] found new process with open ports", pid) - p, ok := procs[pid] - if !ok { - log.Debugf("[pid: %d] process with open ports was not found in alive procs", pid) - continue - } - - svc, err := li.getServiceInfo(p, service) - if err != nil { - telemetryFromError(errWithCode{ - err: err, - code: errorCodeProcfs, - svc: nil, - }) - log.Errorf("[pid: %d] failed to get process info: %v", pid, err) - li.ignoreProcs[pid] = true - continue - } + svc := li.getServiceInfo(service) if li.ignoreCfg[svc.meta.Name] { log.Debugf("[pid: %d] process ignored from config: %s", pid, svc.meta.Name) li.ignoreProcs[pid] = true continue } log.Debugf("[pid: %d] adding process to potential: %s", pid, svc.meta.Name) - li.potentialServices[pid] = svc + li.potentialServices[pid] = &svc } } // check if services previously marked as alive still are. for pid, svc := range li.aliveServices { - if _, ok := procs[pid]; !ok { + if service, ok := serviceMap[pid]; !ok { delete(li.aliveServices, pid) events.stop = append(events.stop, *svc) } else if now.Sub(svc.LastHeartbeat).Truncate(time.Minute) >= heartbeatTime { svc.LastHeartbeat = now + svc.service.RSS = service.RSS events.heartbeat = append(events.heartbeat, *svc) } } // check if services previously marked as ignore are still alive. for pid := range li.ignoreProcs { - if _, ok := procs[pid]; !ok { + if _, ok := serviceMap[pid]; !ok { delete(li.ignoreProcs, pid) } } return &discoveredServices{ - aliveProcsCount: len(procs), ignoreProcs: li.ignoreProcs, potentials: li.potentialServices, runningServices: li.aliveServices, @@ -186,94 +131,12 @@ func (li *linuxImpl) DiscoverServices() (*discoveredServices, error) { }, nil } -func (li *linuxImpl) aliveProcs() (map[int]proc, error) { - procs, err := li.procfs.AllProcs() - if err != nil { - return nil, err - } - procMap := map[int]proc{} - for _, v := range procs { - procMap[v.PID()] = v - } - return procMap, nil -} - -// countAndAddElements is a helper for truncateCmdline used to be able to -// pre-calculate the size of the output slice to improve performance. -func countAndAddElements(cmdline []string, inElements int) (int, []string) { - var out []string - - if inElements != 0 { - out = make([]string, 0, inElements) - } - - elements := 0 - total := 0 - for _, arg := range cmdline { - if total >= maxCommandLine { - break - } - - this := len(arg) - if this == 0 { - // To avoid ending up with a large array with empty strings - continue - } - - if total+this > maxCommandLine { - this = maxCommandLine - total - } - - if inElements != 0 { - out = append(out, arg[:this]) - } - - elements++ - total += this - } - - return elements, out -} - -// truncateCmdline truncates the command line length to maxCommandLine. -func truncateCmdline(cmdline []string) []string { - elements, _ := countAndAddElements(cmdline, 0) - _, out := countAndAddElements(cmdline, elements) - return out -} - -func (li *linuxImpl) getServiceInfo(p proc, service model.Service) (*serviceInfo, error) { - cmdline, err := p.CmdLine() - if err != nil { - return nil, err - } - - stat, err := p.Stat() - if err != nil { - return nil, fmt.Errorf("failed to read /proc/{pid}/stat: %w", err) - } - +func (li *linuxImpl) getServiceInfo(service model.Service) serviceInfo { // if the process name is docker-proxy, we should talk to docker to get the process command line and env vars // have to see how far this can go but not for the initial release // for now, docker-proxy is going on the ignore list - // calculate the start time - // divide Starttime by 100 to go from clicks since boot to seconds since boot - startTimeSecs := li.bootTime + (stat.Starttime / 100) - - cmdline, _ = li.scrubber.ScrubCommand(cmdline) - cmdline = truncateCmdline(cmdline) - - pInfo := processInfo{ - PID: p.PID(), - Stat: procStat{ - StartTime: startTimeSecs, - }, - Ports: service.Ports, - CmdLine: cmdline, - } - serviceType := servicetype.Detect(service.Name, service.Ports) meta := ServiceMetadata{ @@ -284,45 +147,11 @@ func (li *linuxImpl) getServiceInfo(p proc, service model.Service) (*serviceInfo NameSource: service.NameSource, } - return &serviceInfo{ - process: pInfo, + return serviceInfo{ meta: meta, + service: service, LastHeartbeat: li.time.Now(), - }, nil -} - -type proc interface { - PID() int - CmdLine() ([]string, error) - Stat() (procfs.ProcStat, error) -} - -type wProc struct { - procfs.Proc -} - -func (w wProc) PID() int { - return w.Proc.PID -} - -type procFS interface { - AllProcs() ([]proc, error) -} - -type wProcFS struct { - procfs.FS -} - -func (w wProcFS) AllProcs() ([]proc, error) { - procs, err := w.FS.AllProcs() - if err != nil { - return nil, err - } - var res []proc - for _, p := range procs { - res = append(res, wProc{p}) } - return res, nil } type systemProbeClient interface { diff --git a/pkg/collector/corechecks/servicediscovery/impl_linux_mock.go b/pkg/collector/corechecks/servicediscovery/impl_linux_mock.go index b4e5ca0aac9e0..2022d4bcce6c9 100644 --- a/pkg/collector/corechecks/servicediscovery/impl_linux_mock.go +++ b/pkg/collector/corechecks/servicediscovery/impl_linux_mock.go @@ -16,114 +16,8 @@ import ( model "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/model" gomock "github.com/golang/mock/gomock" - procfs "github.com/prometheus/procfs" ) -// Mockproc is a mock of proc interface. -type Mockproc struct { - ctrl *gomock.Controller - recorder *MockprocMockRecorder -} - -// MockprocMockRecorder is the mock recorder for Mockproc. -type MockprocMockRecorder struct { - mock *Mockproc -} - -// NewMockproc creates a new mock instance. -func NewMockproc(ctrl *gomock.Controller) *Mockproc { - mock := &Mockproc{ctrl: ctrl} - mock.recorder = &MockprocMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *Mockproc) EXPECT() *MockprocMockRecorder { - return m.recorder -} - -// CmdLine mocks base method. -func (m *Mockproc) CmdLine() ([]string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CmdLine") - ret0, _ := ret[0].([]string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CmdLine indicates an expected call of CmdLine. -func (mr *MockprocMockRecorder) CmdLine() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CmdLine", reflect.TypeOf((*Mockproc)(nil).CmdLine)) -} - -// PID mocks base method. -func (m *Mockproc) PID() int { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PID") - ret0, _ := ret[0].(int) - return ret0 -} - -// PID indicates an expected call of PID. -func (mr *MockprocMockRecorder) PID() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PID", reflect.TypeOf((*Mockproc)(nil).PID)) -} - -// Stat mocks base method. -func (m *Mockproc) Stat() (procfs.ProcStat, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Stat") - ret0, _ := ret[0].(procfs.ProcStat) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Stat indicates an expected call of Stat. -func (mr *MockprocMockRecorder) Stat() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stat", reflect.TypeOf((*Mockproc)(nil).Stat)) -} - -// MockprocFS is a mock of procFS interface. -type MockprocFS struct { - ctrl *gomock.Controller - recorder *MockprocFSMockRecorder -} - -// MockprocFSMockRecorder is the mock recorder for MockprocFS. -type MockprocFSMockRecorder struct { - mock *MockprocFS -} - -// NewMockprocFS creates a new mock instance. -func NewMockprocFS(ctrl *gomock.Controller) *MockprocFS { - mock := &MockprocFS{ctrl: ctrl} - mock.recorder = &MockprocFSMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockprocFS) EXPECT() *MockprocFSMockRecorder { - return m.recorder -} - -// AllProcs mocks base method. -func (m *MockprocFS) AllProcs() ([]proc, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AllProcs") - ret0, _ := ret[0].([]proc) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// AllProcs indicates an expected call of AllProcs. -func (mr *MockprocFSMockRecorder) AllProcs() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllProcs", reflect.TypeOf((*MockprocFS)(nil).AllProcs)) -} - // MocksystemProbeClient is a mock of systemProbeClient interface. type MocksystemProbeClient struct { ctrl *gomock.Controller diff --git a/pkg/collector/corechecks/servicediscovery/impl_linux_test.go b/pkg/collector/corechecks/servicediscovery/impl_linux_test.go index d51004b64277b..563dabbbd8dd4 100644 --- a/pkg/collector/corechecks/servicediscovery/impl_linux_test.go +++ b/pkg/collector/corechecks/servicediscovery/impl_linux_test.go @@ -9,16 +9,12 @@ package servicediscovery import ( "cmp" - "errors" - "strings" "testing" "time" "github.com/golang/mock/gomock" gocmp "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" - "github.com/prometheus/procfs" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" @@ -29,102 +25,67 @@ import ( ) type testProc struct { - pid int - cmdline []string - env []string - cwd string - stat procfs.ProcStat + pid int + env []string + cwd string } var ( bootTimeSeconds = uint64(time.Date(2000, 01, 01, 0, 0, 0, 0, time.UTC).Unix()) // procLaunched is number of clicks (100 per second) since bootTime when the process started // assume it's 12 hours later - procLaunchedClicks = uint64((12 * time.Hour).Seconds()) * 100 - pythonCommandLine = []string{"python", "-m", "foobar.main", "--", "--password", "secret", - "--other-stuff", "--more-things", "--even-more", - "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", - "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", - "AAAAAAAAAAAAAAAAAAAAAAAAA", - "--a-long-argument-total-over-max-length", - } - eventPythonCommandLine = []string{"python", "-m", "foobar.main", "--", "--password", "********", - "--other-stuff", "--more-things", "--even-more", - "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", - "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", - "AAAAAAAAAAAAAAAAAAAAAAAAA", - "--a-long-argument"} + procLaunchedSeconds = bootTimeSeconds + uint64((12 * time.Hour).Seconds()) + pythonCommandLine = []string{"python", "-m", "foobar.main"} ) var ( - procSSHD = testProc{ - pid: 6, - cmdline: []string{"sshd"}, - env: nil, - cwd: "", - stat: procfs.ProcStat{ - Starttime: procLaunchedClicks, - }, - } procTestService1 = testProc{ - pid: 99, - cmdline: []string{"test-service-1"}, - env: []string{}, - cwd: "", - stat: procfs.ProcStat{ - Starttime: procLaunchedClicks, - }, + pid: 99, + env: []string{}, + cwd: "", } procPythonService = testProc{ - pid: 500, - cmdline: pythonCommandLine, - env: []string{}, - cwd: "", - stat: procfs.ProcStat{ - Starttime: procLaunchedClicks, - }, + pid: 500, + env: []string{}, + cwd: "", } procIgnoreService1 = testProc{ - pid: 100, - cmdline: []string{"ignore-1"}, - env: nil, - cwd: "", - stat: procfs.ProcStat{ - Starttime: procLaunchedClicks, - }, + pid: 100, + env: nil, + cwd: "", } procTestService1Repeat = testProc{ - pid: 101, - cmdline: []string{"test-service-1"}, // same name as procTestService1 - env: []string{}, - cwd: "", - stat: procfs.ProcStat{ - Starttime: procLaunchedClicks, - }, + pid: 101, + env: []string{}, + cwd: "", } procTestService1DifferentPID = testProc{ - pid: 102, - cmdline: []string{"test-service-1"}, - env: []string{}, - cwd: "", - stat: procfs.ProcStat{ - Starttime: procLaunchedClicks, - }, + pid: 102, + env: []string{}, + cwd: "", } ) var ( - portTCP22 = model.Service{ - PID: procSSHD.pid, - Name: "sshd", - Ports: []uint16{22}, - } portTCP8080 = model.Service{ PID: procTestService1.pid, Name: "test-service-1", Ports: []uint16{8080}, APMInstrumentation: string(apm.None), NameSource: "provided", + RSS: 100 * 1024 * 1024, + CommandLine: []string{"test-service-1"}, + StartTimeSecs: procLaunchedSeconds, + } + portTCP8080UpdatedRSS = model.Service{ + PID: procTestService1.pid, + Name: "test-service-1", + Ports: []uint16{8080}, + APMInstrumentation: string(apm.None), + NameSource: "provided", + RSS: 200 * 1024 * 1024, + CommandLine: []string{"test-service-1"}, + StartTimeSecs: procLaunchedSeconds, } portTCP8080DifferentPID = model.Service{ PID: procTestService1DifferentPID.pid, @@ -132,38 +93,34 @@ var ( Ports: []uint16{8080}, APMInstrumentation: string(apm.Injected), NameSource: "generated", + CommandLine: []string{"test-service-1"}, + StartTimeSecs: procLaunchedSeconds, } portTCP8081 = model.Service{ - PID: procIgnoreService1.pid, - Name: "ignore-1", - Ports: []uint16{8081}, + PID: procIgnoreService1.pid, + Name: "ignore-1", + Ports: []uint16{8081}, + StartTimeSecs: procLaunchedSeconds, } portTCP5000 = model.Service{ - PID: procPythonService.pid, - Name: "python-service", - Language: "python", - Ports: []uint16{5000}, + PID: procPythonService.pid, + Name: "python-service", + Language: "python", + Ports: []uint16{5000}, + CommandLine: pythonCommandLine, + StartTimeSecs: procLaunchedSeconds, } portTCP5432 = model.Service{ - PID: procTestService1Repeat.pid, - Name: "test-service-1", - Ports: []uint16{5432}, + PID: procTestService1Repeat.pid, + Name: "test-service-1", + Ports: []uint16{5432}, + CommandLine: []string{"test-service-1"}, + StartTimeSecs: procLaunchedSeconds, } ) -func mockProc( - ctrl *gomock.Controller, - p testProc, -) proc { - m := NewMockproc(ctrl) - m.EXPECT().PID().Return(p.pid).AnyTimes() - m.EXPECT().CmdLine().Return(p.cmdline, nil).AnyTimes() - m.EXPECT().Stat().Return(p.stat, nil).AnyTimes() - return m -} - func calcTime(additionalTime time.Duration) time.Time { - unix := time.Unix(int64(bootTimeSeconds+(procLaunchedClicks/100)), 0) + unix := time.Unix(int64(procLaunchedSeconds), 0) return unix.Add(additionalTime) } @@ -201,7 +158,6 @@ func Test_linuxImpl(t *testing.T) { t.Setenv("DD_DISCOVERY_ENABLED", "true") type checkRun struct { - aliveProcs []testProc servicesResp *model.ServicesResponse time time.Time } @@ -215,14 +171,7 @@ func Test_linuxImpl(t *testing.T) { name: "basic", checkRun: []*checkRun{ { - aliveProcs: []testProc{ - procSSHD, - procIgnoreService1, - procTestService1, - procPythonService, - }, servicesResp: &model.ServicesResponse{Services: []model.Service{ - portTCP22, portTCP5000, portTCP8080, portTCP8081, @@ -230,14 +179,7 @@ func Test_linuxImpl(t *testing.T) { time: calcTime(0), }, { - aliveProcs: []testProc{ - procSSHD, - procIgnoreService1, - procTestService1, - procPythonService, - }, servicesResp: &model.ServicesResponse{Services: []model.Service{ - portTCP22, portTCP5000, portTCP8080, portTCP8081, @@ -245,27 +187,15 @@ func Test_linuxImpl(t *testing.T) { time: calcTime(1 * time.Minute), }, { - aliveProcs: []testProc{ - procSSHD, - procIgnoreService1, - procTestService1, - procPythonService, - }, servicesResp: &model.ServicesResponse{Services: []model.Service{ - portTCP22, portTCP5000, - portTCP8080, + portTCP8080UpdatedRSS, portTCP8081, }}, time: calcTime(20 * time.Minute), }, { - aliveProcs: []testProc{ - procSSHD, - procPythonService, - }, servicesResp: &model.ServicesResponse{Services: []model.Service{ - portTCP22, portTCP5000, }}, time: calcTime(21 * time.Minute), @@ -288,6 +218,7 @@ func Test_linuxImpl(t *testing.T) { CommandLine: []string{"test-service-1"}, APMInstrumentation: "none", ServiceNameSource: "provided", + RSSMemory: 100 * 1024 * 1024, }, }, { @@ -306,6 +237,7 @@ func Test_linuxImpl(t *testing.T) { CommandLine: []string{"test-service-1"}, APMInstrumentation: "none", ServiceNameSource: "provided", + RSSMemory: 200 * 1024 * 1024, }, }, { @@ -324,6 +256,7 @@ func Test_linuxImpl(t *testing.T) { CommandLine: []string{"test-service-1"}, APMInstrumentation: "none", ServiceNameSource: "provided", + RSSMemory: 200 * 1024 * 1024, }, }, { @@ -340,7 +273,7 @@ func Test_linuxImpl(t *testing.T) { Ports: []uint16{5000}, PID: 500, ServiceLanguage: "python", - CommandLine: eventPythonCommandLine, + CommandLine: pythonCommandLine, }, }, { @@ -357,7 +290,7 @@ func Test_linuxImpl(t *testing.T) { Ports: []uint16{5000}, PID: 500, ServiceLanguage: "python", - CommandLine: eventPythonCommandLine, + CommandLine: pythonCommandLine, }, }, }, @@ -366,14 +299,7 @@ func Test_linuxImpl(t *testing.T) { name: "repeated_service_name", checkRun: []*checkRun{ { - aliveProcs: []testProc{ - procSSHD, - procIgnoreService1, - procTestService1, - procTestService1Repeat, - }, servicesResp: &model.ServicesResponse{Services: []model.Service{ - portTCP22, portTCP8080, portTCP8081, portTCP5432, @@ -381,14 +307,7 @@ func Test_linuxImpl(t *testing.T) { time: calcTime(0), }, { - aliveProcs: []testProc{ - procSSHD, - procIgnoreService1, - procTestService1, - procTestService1Repeat, - }, servicesResp: &model.ServicesResponse{Services: []model.Service{ - portTCP22, portTCP8080, portTCP8081, portTCP5432, @@ -396,14 +315,7 @@ func Test_linuxImpl(t *testing.T) { time: calcTime(1 * time.Minute), }, { - aliveProcs: []testProc{ - procSSHD, - procIgnoreService1, - procTestService1, - procTestService1Repeat, - }, servicesResp: &model.ServicesResponse{Services: []model.Service{ - portTCP22, portTCP8080, portTCP8081, portTCP5432, @@ -411,12 +323,7 @@ func Test_linuxImpl(t *testing.T) { time: calcTime(20 * time.Minute), }, { - aliveProcs: []testProc{ - procSSHD, - procTestService1, - }, servicesResp: &model.ServicesResponse{Services: []model.Service{ - portTCP22, portTCP8080, }}, time: calcTime(21 * time.Minute), @@ -455,6 +362,7 @@ func Test_linuxImpl(t *testing.T) { CommandLine: []string{"test-service-1"}, APMInstrumentation: "none", ServiceNameSource: "provided", + RSSMemory: 100 * 1024 * 1024, }, }, { @@ -505,6 +413,7 @@ func Test_linuxImpl(t *testing.T) { CommandLine: []string{"test-service-1"}, APMInstrumentation: "none", ServiceNameSource: "provided", + RSSMemory: 100 * 1024 * 1024, }, }, }, @@ -515,49 +424,27 @@ func Test_linuxImpl(t *testing.T) { name: "restart_service", checkRun: []*checkRun{ { - aliveProcs: []testProc{ - procSSHD, - procIgnoreService1, - procTestService1, - }, servicesResp: &model.ServicesResponse{Services: []model.Service{ - portTCP22, portTCP8080, portTCP8081, }}, time: calcTime(0), }, { - aliveProcs: []testProc{ - procSSHD, - procIgnoreService1, - procTestService1, - }, servicesResp: &model.ServicesResponse{Services: []model.Service{ - portTCP22, portTCP8080, portTCP8081, }}, time: calcTime(1 * time.Minute), }, { - aliveProcs: []testProc{ - procSSHD, - procTestService1DifferentPID, - }, servicesResp: &model.ServicesResponse{Services: []model.Service{ - portTCP22, portTCP8080DifferentPID, }}, time: calcTime(21 * time.Minute), }, { - aliveProcs: []testProc{ - procSSHD, - procTestService1DifferentPID, - }, servicesResp: &model.ServicesResponse{Services: []model.Service{ - portTCP22, portTCP8080DifferentPID, }}, time: calcTime(22 * time.Minute), @@ -580,6 +467,7 @@ func Test_linuxImpl(t *testing.T) { CommandLine: []string{"test-service-1"}, APMInstrumentation: "none", ServiceNameSource: "provided", + RSSMemory: 100 * 1024 * 1024, }, }, { @@ -631,26 +519,16 @@ func Test_linuxImpl(t *testing.T) { Return(cr.servicesResp, nil). Times(1) - var procs []proc - for _, p := range cr.aliveProcs { - procs = append(procs, mockProc(ctrl, p)) - } - _, mHostname := hostnameinterface.NewMock(hostnameinterface.MockHostname(host)) - mProcFS := NewMockprocFS(ctrl) - mProcFS.EXPECT().AllProcs().Return(procs, nil).Times(1) - mTimer := NewMocktimer(ctrl) mTimer.EXPECT().Now().Return(cr.time).AnyTimes() // set mocks - check.os.(*linuxImpl).procfs = mProcFS check.os.(*linuxImpl).getSysProbeClient = func() (systemProbeClient, error) { return mSysProbe, nil } check.os.(*linuxImpl).time = mTimer - check.os.(*linuxImpl).bootTime = bootTimeSeconds check.sender.hostname = mHostname err = check.Run() @@ -668,74 +546,3 @@ func Test_linuxImpl(t *testing.T) { }) } } - -type errorProcFS struct{} - -func (errorProcFS) AllProcs() ([]proc, error) { - return nil, errors.New("procFS failure") -} - -func Test_linuxImpl_errors(t *testing.T) { - t.Setenv("DD_DISCOVERY_ENABLED", "true") - - // bad procFS - { - li := linuxImpl{ - procfs: errorProcFS{}, - } - ds, err := li.DiscoverServices() - if ds != nil { - t.Error("expected nil discovery service") - } - var expected errWithCode - if errors.As(err, &expected) { - if expected.Code() != errorCodeProcfs { - t.Errorf("expected error code procfs: %#v", expected) - } - } else { - t.Error("expected errWithCode, got", err) - } - } -} - -func TestTruncateCmdline(t *testing.T) { - type testData struct { - original []string - result []string - } - - tests := []testData{ - { - original: []string{}, - result: nil, - }, - { - original: []string{"a", "b", "", "c", "d"}, - result: []string{"a", "b", "c", "d"}, - }, - { - original: []string{"x", strings.Repeat("A", maxCommandLine-1)}, - result: []string{"x", strings.Repeat("A", maxCommandLine-1)}, - }, - { - original: []string{strings.Repeat("A", maxCommandLine), "B"}, - result: []string{strings.Repeat("A", maxCommandLine)}, - }, - { - original: []string{strings.Repeat("A", maxCommandLine+1)}, - result: []string{strings.Repeat("A", maxCommandLine)}, - }, - { - original: []string{strings.Repeat("A", maxCommandLine-1), "", "B"}, - result: []string{strings.Repeat("A", maxCommandLine-1), "B"}, - }, - { - original: []string{strings.Repeat("A", maxCommandLine-1), "BCD"}, - result: []string{strings.Repeat("A", maxCommandLine-1), "B"}, - }, - } - - for _, test := range tests { - assert.Equal(t, test.result, truncateCmdline(test.original)) - } -} diff --git a/pkg/collector/corechecks/servicediscovery/language/language.go b/pkg/collector/corechecks/servicediscovery/language/language.go index 63e8378c116a9..dd4b224f9bc47 100644 --- a/pkg/collector/corechecks/servicediscovery/language/language.go +++ b/pkg/collector/corechecks/servicediscovery/language/language.go @@ -9,6 +9,8 @@ package language import ( + "path/filepath" + "github.com/DataDog/datadog-agent/pkg/languagedetection" "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" "github.com/DataDog/datadog-agent/pkg/languagedetection/privileged" @@ -61,7 +63,7 @@ type ProcessInfo struct { } // FindInArgs tries to detect the language only using the provided command line arguments. -func FindInArgs(args []string) Language { +func FindInArgs(exe string, args []string) Language { // empty slice passed in if len(args) == 0 { return "" @@ -71,7 +73,7 @@ func FindInArgs(args []string) Language { // Pid doesn't matter since sysprobeConfig is nil Pid: 0, Cmdline: args, - Comm: args[0], + Comm: filepath.Base(exe), }}, nil) if len(langs) == 0 { return "" diff --git a/pkg/collector/corechecks/servicediscovery/language/language_nix_test.go b/pkg/collector/corechecks/servicediscovery/language/language_nix_test.go index 34de49f0d7ae1..fcedb451399a2 100644 --- a/pkg/collector/corechecks/servicediscovery/language/language_nix_test.go +++ b/pkg/collector/corechecks/servicediscovery/language/language_nix_test.go @@ -21,33 +21,44 @@ import ( func Test_findInArgs(t *testing.T) { data := []struct { name string + exe string args []string lang Language }{ { name: "empty", + exe: "", args: nil, lang: "", }, { name: "simple_java", + exe: "", args: strings.Split("java -jar MyApp.jar MyApp", " "), lang: Java, }, { name: "path_java", + exe: "", args: strings.Split("/usr/bin/java -jar MyApp.jar MyApp", " "), lang: Java, }, { name: "just_command", + exe: "", args: strings.Split("./mybinary arg1 arg2 arg3", " "), lang: "", }, + { + name: "exe fallback", + exe: "/usr/local/bin/python3.10", + args: strings.Split("gunicorn: worker [foo]", " "), + lang: Python, + }, } for _, d := range data { t.Run(d.name, func(t *testing.T) { - result := FindInArgs(d.args) + result := FindInArgs(d.exe, d.args) if result != d.lang { t.Errorf("got %v, want %v", result, d.lang) } diff --git a/pkg/collector/corechecks/servicediscovery/model/model.go b/pkg/collector/corechecks/servicediscovery/model/model.go index 2c27e9e1379c9..2c563860fcfed 100644 --- a/pkg/collector/corechecks/servicediscovery/model/model.go +++ b/pkg/collector/corechecks/servicediscovery/model/model.go @@ -14,6 +14,9 @@ type Service struct { Ports []uint16 `json:"ports"` APMInstrumentation string `json:"apm_instrumentation"` Language string `json:"language"` + RSS uint64 `json:"rss"` + CommandLine []string `json:"cmdline"` + StartTimeSecs uint64 `json:"start_time"` } // ServicesResponse is the response for the system-probe /discovery/services endpoint. diff --git a/pkg/collector/corechecks/servicediscovery/module/cmdline.go b/pkg/collector/corechecks/servicediscovery/module/cmdline.go new file mode 100644 index 0000000000000..2e54c714a8e61 --- /dev/null +++ b/pkg/collector/corechecks/servicediscovery/module/cmdline.go @@ -0,0 +1,65 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build linux + +package module + +import "github.com/DataDog/datadog-agent/pkg/process/procutil" + +const ( + maxCommandLine = 200 +) + +// countAndAddElements is a helper for truncateCmdline used to be able to +// pre-calculate the size of the output slice to improve performance. +func countAndAddElements(cmdline []string, inElements int) (int, []string) { + var out []string + + if inElements != 0 { + out = make([]string, 0, inElements) + } + + elements := 0 + total := 0 + for _, arg := range cmdline { + if total >= maxCommandLine { + break + } + + this := len(arg) + if this == 0 { + // To avoid ending up with a large array with empty strings + continue + } + + if total+this > maxCommandLine { + this = maxCommandLine - total + } + + if inElements != 0 { + out = append(out, arg[:this]) + } + + elements++ + total += this + } + + return elements, out +} + +// truncateCmdline truncates the command line length to maxCommandLine. +func truncateCmdline(cmdline []string) []string { + elements, _ := countAndAddElements(cmdline, 0) + _, out := countAndAddElements(cmdline, elements) + return out +} + +// sanitizeCmdLine scubs the command line of sensitive data and truncates it +// to a fixed size to limit memory usage. +func sanitizeCmdLine(scrubber *procutil.DataScrubber, cmdline []string) []string { + cmdline, _ = scrubber.ScrubCommand(cmdline) + return truncateCmdline(cmdline) +} diff --git a/pkg/collector/corechecks/servicediscovery/module/cmdline_test.go b/pkg/collector/corechecks/servicediscovery/module/cmdline_test.go new file mode 100644 index 0000000000000..452c497891a7d --- /dev/null +++ b/pkg/collector/corechecks/servicediscovery/module/cmdline_test.go @@ -0,0 +1,57 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build linux + +package module + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestTruncateCmdline(t *testing.T) { + type testData struct { + original []string + result []string + } + + tests := []testData{ + { + original: []string{}, + result: nil, + }, + { + original: []string{"a", "b", "", "c", "d"}, + result: []string{"a", "b", "c", "d"}, + }, + { + original: []string{"x", strings.Repeat("A", maxCommandLine-1)}, + result: []string{"x", strings.Repeat("A", maxCommandLine-1)}, + }, + { + original: []string{strings.Repeat("A", maxCommandLine), "B"}, + result: []string{strings.Repeat("A", maxCommandLine)}, + }, + { + original: []string{strings.Repeat("A", maxCommandLine+1)}, + result: []string{strings.Repeat("A", maxCommandLine)}, + }, + { + original: []string{strings.Repeat("A", maxCommandLine-1), "", "B"}, + result: []string{strings.Repeat("A", maxCommandLine-1), "B"}, + }, + { + original: []string{strings.Repeat("A", maxCommandLine-1), "BCD"}, + result: []string{strings.Repeat("A", maxCommandLine-1), "B"}, + }, + } + + for _, test := range tests { + assert.Equal(t, test.result, truncateCmdline(test.original)) + } +} diff --git a/pkg/collector/corechecks/servicediscovery/module/impl_linux.go b/pkg/collector/corechecks/servicediscovery/module/impl_linux.go index 3b5602be0b4f2..690b74e5e6e7d 100644 --- a/pkg/collector/corechecks/servicediscovery/module/impl_linux.go +++ b/pkg/collector/corechecks/servicediscovery/module/impl_linux.go @@ -6,25 +6,29 @@ package module import ( + "bufio" + "errors" "fmt" + "io" "net/http" + "os" + "path/filepath" "strconv" "strings" + "sync" - "github.com/prometheus/procfs" "github.com/shirou/gopsutil/v3/process" "github.com/DataDog/datadog-agent/cmd/system-probe/api/module" sysconfigtypes "github.com/DataDog/datadog-agent/cmd/system-probe/config/types" "github.com/DataDog/datadog-agent/cmd/system-probe/utils" - "github.com/DataDog/datadog-agent/comp/core/telemetry" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/apm" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/language" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/model" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/usm" "github.com/DataDog/datadog-agent/pkg/languagedetection/privileged" + "github.com/DataDog/datadog-agent/pkg/process/procutil" "github.com/DataDog/datadog-agent/pkg/util/kernel" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -43,22 +47,30 @@ type serviceInfo struct { nameFromDDService bool language language.Language apmInstrumentation apm.Instrumentation + cmdLine []string + startTimeSecs uint64 } // discovery is an implementation of the Module interface for the discovery module. type discovery struct { + mux *sync.RWMutex // cache maps pids to data that should be cached between calls to the endpoint. cache map[int32]*serviceInfo // privilegedDetector is used to detect the language of a process. privilegedDetector privileged.LanguageDetector + + // scrubber is used to remove potentially sensitive data from the command line + scrubber *procutil.DataScrubber } // NewDiscoveryModule creates a new discovery system probe module. -func NewDiscoveryModule(*sysconfigtypes.Config, workloadmeta.Component, telemetry.Component) (module.Module, error) { +func NewDiscoveryModule(*sysconfigtypes.Config, module.FactoryDependencies) (module.Module, error) { return &discovery{ + mux: &sync.RWMutex{}, cache: make(map[int32]*serviceInfo), privilegedDetector: privileged.NewLanguageDetector(), + scrubber: procutil.NewDefaultDataScrubber(), }, nil } @@ -70,12 +82,14 @@ func (s *discovery) GetStats() map[string]interface{} { // Register registers the discovery module with the provided HTTP mux. func (s *discovery) Register(httpMux *module.Router) error { httpMux.HandleFunc("/status", s.handleStatusEndpoint) - httpMux.HandleFunc(pathServices, s.handleServices) + httpMux.HandleFunc(pathServices, utils.WithConcurrencyLimit(utils.DefaultMaxConcurrentRequests, s.handleServices)) return nil } // Close cleans resources used by the discovery module. func (s *discovery) Close() { + s.mux.Lock() + defer s.mux.Unlock() clear(s.cache) } @@ -101,24 +115,29 @@ func (s *discovery) handleServices(w http.ResponseWriter, _ *http.Request) { utils.WriteAsJSON(w, resp) } -// getSockets get a list of socket inode numbers opened by a process. Based on -// snapshotBoundSockets() in -// pkg/security/security_profile/activity_tree/process_node_snapshot.go. The -// socket inode information from /proc/../fd is needed to map the connection -// from the net/tcp (and similar) files to actual ports. -func getSockets(p *process.Process) ([]uint64, error) { - FDs, err := p.OpenFiles() +const prefix = "socket:[" + +// getSockets get a list of socket inode numbers opened by a process +func getSockets(pid int32) ([]uint64, error) { + statPath := kernel.HostProc(fmt.Sprintf("%d/fd", pid)) + d, err := os.Open(statPath) if err != nil { return nil, err } + defer d.Close() + fnames, err := d.Readdirnames(-1) - // sockets have the following pattern "socket:[inode]" + if err != nil { + return nil, err + } var sockets []uint64 - for _, fd := range FDs { - const prefix = "socket:[" - if strings.HasPrefix(fd.Path, prefix) { - inodeStr := strings.TrimPrefix(fd.Path[:len(fd.Path)-1], prefix) - sock, err := strconv.ParseUint(inodeStr, 10, 64) + for _, fd := range fnames { + fullPath, err := os.Readlink(filepath.Join(statPath, fd)) + if err != nil { + continue + } + if strings.HasPrefix(fullPath, prefix) { + sock, err := strconv.ParseUint(fullPath[len(prefix):len(fullPath)-1], 10, 64) if err != nil { continue } @@ -149,51 +168,117 @@ const ( udpListen = tcpClose ) -// addSockets adds only listening sockets to a map to be used for later looksups. -func addSockets[P procfs.NetTCP | procfs.NetUDP](sockMap map[uint64]socketInfo, sockets P, state uint64) { - for _, sock := range sockets { - if sock.St != state { +const ( + // readLimit is used by io.LimitReader while reading the content of the + // /proc/net/udp{,6} files. The number of lines inside such a file is dynamic + // as each line represents a single used socket. + // In theory, the number of available sockets is 65535 (2^16 - 1) per IP. + // With e.g. 150 Byte per line and the maximum number of 65535, + // the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP. + // Taken from net_ip_socket.go from github.com/prometheus/procfs. + readLimit = 4294967296 // Byte -> 4 GiB +) + +var ( + errInvalidLine = errors.New("invalid line") + errInvalidState = errors.New("invalid state field") + errUnsupportedState = errors.New("unsupported state field") + errInvalidLocalIP = errors.New("invalid local ip format") + errInvalidLocalPort = errors.New("invalid local port format") + errInvalidInode = errors.New("invalid inode format") +) + +// parseNetIPSocketLine parses a single line, represented by a list of fields. +// It returns the inode and local port of the socket if the line is valid. +// Based on parseNetIPSocketLine() in net_ip_socket.go from github.com/prometheus/procfs. +func parseNetIPSocketLine(fields []string, expectedState uint64) (uint64, uint16, error) { + if len(fields) < 10 { + return 0, 0, errInvalidLine + } + var localPort uint16 + var inode uint64 + + if state, err := strconv.ParseUint(fields[3], 16, 64); err != nil { + return 0, 0, errInvalidState + } else if state != expectedState { + return 0, 0, errUnsupportedState + } + + // local_address + l := strings.Split(fields[1], ":") + if len(l) != 2 { + return 0, 0, errInvalidLocalIP + } + localPortTemp, err := strconv.ParseUint(l[1], 16, 64) + if err != nil { + return 0, 0, errInvalidLocalPort + } + localPort = uint16(localPortTemp) + + if inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil { + return 0, 0, errInvalidInode + } + + return inode, localPort, nil +} + +// newNetIPSocket reads the content of the provided file and returns a map of socket inodes to ports. +// Based on newNetIPSocket() in net_ip_socket.go from github.com/prometheus/procfs +func newNetIPSocket(file string, expectedState uint64) (map[uint64]uint16, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + netIPSocket := make(map[uint64]uint16) + + lr := io.LimitReader(f, readLimit) + s := bufio.NewScanner(lr) + s.Scan() // skip first line with headers + for s.Scan() { + fields := strings.Fields(s.Text()) + inode, port, err := parseNetIPSocketLine(fields, expectedState) + if err != nil { continue } - sockMap[sock.Inode] = socketInfo{port: uint16(sock.LocalPort)} + netIPSocket[inode] = port } + if err := s.Err(); err != nil { + return nil, err + } + return netIPSocket, nil } // getNsInfo gets the list of open ports with socket inodes for all supported // protocols for the provided namespace. Based on snapshotBoundSockets() in // pkg/security/security_profile/activity_tree/process_node_snapshot.go. func getNsInfo(pid int) (*namespaceInfo, error) { - path := kernel.HostProc(fmt.Sprintf("%d", pid)) - proc, err := procfs.NewFS(path) - if err != nil { - log.Warnf("error while opening procfs (pid: %v): %s", pid, err) - return nil, err - } - - TCP, err := proc.NetTCP() + tcp, err := newNetIPSocket(kernel.HostProc(fmt.Sprintf("%d/net/tcp", pid)), tcpListen) if err != nil { log.Debugf("couldn't snapshot TCP sockets: %v", err) } - UDP, err := proc.NetUDP() + udp, err := newNetIPSocket(kernel.HostProc(fmt.Sprintf("%d/net/udp", pid)), udpListen) if err != nil { log.Debugf("couldn't snapshot UDP sockets: %v", err) } - TCP6, err := proc.NetTCP6() + tcpv6, err := newNetIPSocket(kernel.HostProc(fmt.Sprintf("%d/net/tcp6", pid)), tcpListen) if err != nil { log.Debugf("couldn't snapshot TCP6 sockets: %v", err) } - UDP6, err := proc.NetUDP6() + udpv6, err := newNetIPSocket(kernel.HostProc(fmt.Sprintf("%d/net/udp6", pid)), udpListen) if err != nil { log.Debugf("couldn't snapshot UDP6 sockets: %v", err) } - listeningSockets := make(map[uint64]socketInfo) - - addSockets(listeningSockets, TCP, tcpListen) - addSockets(listeningSockets, TCP6, tcpListen) - addSockets(listeningSockets, UDP, udpListen) - addSockets(listeningSockets, UDP6, udpListen) - + listeningSockets := make(map[uint64]socketInfo, len(tcp)+len(udp)+len(tcpv6)+len(udpv6)) + for _, mmap := range []map[uint64]uint16{tcp, udp, tcpv6, udpv6} { + for inode, info := range mmap { + listeningSockets[inode] = socketInfo{ + port: info, + } + } + } return &namespaceInfo{ listeningSockets: listeningSockets, }, nil @@ -219,11 +304,21 @@ func (s *discovery) getServiceInfo(proc *process.Process) (*serviceInfo, error) return nil, err } + exe, err := proc.Exe() + if err != nil { + return nil, err + } + + createTime, err := proc.CreateTime() + if err != nil { + return nil, err + } + contextMap := make(usm.DetectorContextMap) root := kernel.HostProc(strconv.Itoa(int(proc.Pid)), "root") name, fromDDService := servicediscovery.GetServiceName(cmdline, envs, root, contextMap) - lang := language.FindInArgs(cmdline) + lang := language.FindInArgs(exe, cmdline) if lang == "" { lang = language.FindUsingPrivilegedDetector(s.privilegedDetector, proc.Pid) } @@ -234,17 +329,58 @@ func (s *discovery) getServiceInfo(proc *process.Process) (*serviceInfo, error) language: lang, apmInstrumentation: apmInstrumentation, nameFromDDService: fromDDService, + cmdLine: sanitizeCmdLine(s.scrubber, cmdline), + startTimeSecs: uint64(createTime / 1000), }, nil } +// customNewProcess is the same implementation as process.NewProcess but without calling CreateTimeWithContext, which +// is not needed and costly for the discovery module. +func customNewProcess(pid int32) (*process.Process, error) { + p := &process.Process{ + Pid: pid, + } + + exists, err := process.PidExists(pid) + if err != nil { + return p, err + } + if !exists { + return p, process.ErrorProcessNotRunning + } + return p, nil +} + +// ignoreComms is a list of process names (matched against /proc/PID/comm) to +// never report as a service. Note that comm is limited to 16 characters. +var ignoreComms = map[string]struct{}{ + "sshd": {}, + "dhclient": {}, + "systemd": {}, + "systemd-resolved": {}, + "systemd-networkd": {}, + "datadog-agent": {}, + "livenessprobe": {}, + "docker-proxy": {}, +} + // getService gets information for a single service. func (s *discovery) getService(context parsingContext, pid int32) *model.Service { - proc, err := process.NewProcess(pid) + proc, err := customNewProcess(pid) if err != nil { return nil } - sockets, err := getSockets(proc) + comm, err := proc.Name() + if err != nil { + return nil + } + + if _, found := ignoreComms[comm]; found { + return nil + } + + sockets, err := getSockets(pid) if err != nil { return nil } @@ -289,8 +425,16 @@ func (s *discovery) getService(context parsingContext, pid int32) *model.Service return nil } + rss, err := getRSS(proc) + if err != nil { + return nil + } + var info *serviceInfo - if cached, ok := s.cache[pid]; ok { + s.mux.RLock() + cached, ok := s.cache[pid] + s.mux.RUnlock() + if ok { info = cached } else { info, err = s.getServiceInfo(proc) @@ -298,7 +442,9 @@ func (s *discovery) getService(context parsingContext, pid int32) *model.Service return nil } + s.mux.Lock() s.cache[pid] = info + s.mux.Unlock() } nameSource := "generated" @@ -313,6 +459,9 @@ func (s *discovery) getService(context parsingContext, pid int32) *model.Service Ports: ports, APMInstrumentation: string(info.apmInstrumentation), Language: string(info.language), + RSS: rss, + CommandLine: info.cmdLine, + StartTimeSecs: info.startTimeSecs, } } @@ -320,6 +469,8 @@ func (s *discovery) getService(context parsingContext, pid int32) *model.Service // shrink the map but should free memory for the service name strings referenced // from it. func (s *discovery) cleanCache(alivePids map[int32]struct{}) { + s.mux.Lock() + defer s.mux.Unlock() for pid := range s.cache { if _, alive := alivePids[pid]; alive { continue diff --git a/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go b/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go index 6806e878040ca..8028da0225c70 100644 --- a/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go +++ b/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go @@ -20,6 +20,7 @@ import ( "path/filepath" "regexp" "runtime" + "strconv" "strings" "syscall" "testing" @@ -49,6 +50,7 @@ import ( fileopener "github.com/DataDog/datadog-agent/pkg/network/usm/sharedlibraries/testutil" usmtestutil "github.com/DataDog/datadog-agent/pkg/network/usm/testutil" "github.com/DataDog/datadog-agent/pkg/util/fxutil" + "github.com/DataDog/datadog-agent/pkg/util/kernel" ) func setupDiscoveryModule(t *testing.T) string { @@ -227,6 +229,7 @@ func TestBasic(t *testing.T) { serviceMap := getServicesMap(t, url) for _, pid := range expectedPIDs { require.Contains(t, serviceMap[pid].Ports, uint16(expectedPorts[pid])) + assertStat(t, serviceMap[pid]) } } @@ -368,7 +371,7 @@ func buildFakeServer(t *testing.T) string { serverBin, err := usmtestutil.BuildGoBinaryWrapper(filepath.Join(curDir, "testutil"), "fake_server") require.NoError(t, err) - for _, alias := range []string{"java", "node"} { + for _, alias := range []string{"java", "node", "sshd"} { makeAlias(t, alias, serverBin) } @@ -473,11 +476,95 @@ func TestAPMInstrumentationProvided(t *testing.T) { assert.Contains(collect, portMap, pid) assert.Equal(collect, string(test.language), portMap[pid].Language) assert.Equal(collect, string(apm.Provided), portMap[pid].APMInstrumentation) + assertStat(t, portMap[pid]) }, 30*time.Second, 100*time.Millisecond) }) } } +func assertStat(t assert.TestingT, svc model.Service) { + proc, err := process.NewProcess(int32(svc.PID)) + if !assert.NoError(t, err) { + return + } + + meminfo, err := proc.MemoryInfo() + if !assert.NoError(t, err) { + return + } + + // Allow a 20% variation to avoid potential flakiness due to difference in + // time of sampling the RSS. + assert.InEpsilon(t, meminfo.RSS, svc.RSS, 0.20) + + createTimeMs, err := proc.CreateTime() + if !assert.NoError(t, err) { + return + } + + assert.Equal(t, uint64(createTimeMs/1000), svc.StartTimeSecs) +} + +func TestCommandLineSanitization(t *testing.T) { + serverDir := buildFakeServer(t) + url := setupDiscoveryModule(t) + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(func() { cancel() }) + + bin := filepath.Join(serverDir, "node") + + actualCommandLine := []string{bin, "--password", "secret", strings.Repeat("A", maxCommandLine*10)} + sanitizedCommandLine := []string{bin, "--password", "********", "placeholder"} + sanitizedCommandLine[3] = strings.Repeat("A", maxCommandLine-(len(bin)+len(sanitizedCommandLine[1])+len(sanitizedCommandLine[2]))) + + cmd := exec.CommandContext(ctx, bin, actualCommandLine[1:]...) + require.NoError(t, cmd.Start()) + + pid := cmd.Process.Pid + + require.EventuallyWithT(t, func(collect *assert.CollectT) { + svcMap := getServicesMap(t, url) + assert.Contains(collect, svcMap, pid) + assert.Equal(collect, sanitizedCommandLine, svcMap[pid].CommandLine) + }, 30*time.Second, 100*time.Millisecond) +} + +func TestIgnore(t *testing.T) { + serverDir := buildFakeServer(t) + url := setupDiscoveryModule(t) + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(func() { cancel() }) + + badBin := filepath.Join(serverDir, "sshd") + badCmd := exec.CommandContext(ctx, badBin) + require.NoError(t, badCmd.Start()) + + // Also run a non-ignored server so that we can use it in the eventually + // loop below so that we don't have to wait a long time to be sure that we + // really ignored badBin and just didn't miss it because of a race. + goodBin := filepath.Join(serverDir, "node") + goodCmd := exec.CommandContext(ctx, goodBin) + require.NoError(t, goodCmd.Start()) + + goodPid := goodCmd.Process.Pid + badPid := badCmd.Process.Pid + + require.EventuallyWithT(t, func(collect *assert.CollectT) { + svcMap := getServicesMap(t, url) + assert.Contains(collect, svcMap, goodPid) + require.NotContains(t, svcMap, badPid) + }, 30*time.Second, 100*time.Millisecond) +} + +func TestIgnoreCommsLengths(t *testing.T) { + for comm := range ignoreComms { + // /proc/PID/comm is limited to 16 characters. + assert.LessOrEqual(t, len(comm), 16, "Process name %q too big", comm) + } +} + func TestNodeDocker(t *testing.T) { cert, key, err := testutil.GetCertsPaths() require.NoError(t, err) @@ -494,6 +581,7 @@ func TestNodeDocker(t *testing.T) { assert.Contains(collect, svcMap, pid) assert.Equal(collect, "nodejs-https-server", svcMap[pid].Name) assert.Equal(collect, "provided", svcMap[pid].APMInstrumentation) + assertStat(collect, svcMap[pid]) }, 30*time.Second, 100*time.Millisecond) } @@ -529,6 +617,7 @@ func TestAPMInstrumentationProvidedPython(t *testing.T) { assert.Contains(collect, portMap, pid) assert.Equal(collect, string(language.Python), portMap[pid].Language) assert.Equal(collect, string(apm.Provided), portMap[pid].APMInstrumentation) + assertStat(collect, portMap[pid]) }, 30*time.Second, 100*time.Millisecond) } @@ -634,7 +723,10 @@ func TestCache(t *testing.T) { core.MockBundle(), wmmock.MockModule(workloadmeta.NewParams()), ) - module, err := NewDiscoveryModule(nil, wmeta, nil) + deps := module.FactoryDependencies{ + WMeta: wmeta, + } + module, err := NewDiscoveryModule(nil, deps) require.NoError(t, err) discovery := module.(*discovery) @@ -694,3 +786,188 @@ func TestCache(t *testing.T) { discovery.Close() require.Empty(t, discovery.cache) } + +func BenchmarkOldProcess(b *testing.B) { + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + process.NewProcess(int32(os.Getpid())) + } +} + +func BenchmarkNewProcess(b *testing.B) { + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + customNewProcess(int32(os.Getpid())) + } +} + +func getSocketsOld(p *process.Process) ([]uint64, error) { + FDs, err := p.OpenFiles() + if err != nil { + return nil, err + } + + // sockets have the following pattern "socket:[inode]" + var sockets []uint64 + for _, fd := range FDs { + if strings.HasPrefix(fd.Path, prefix) { + inodeStr := strings.TrimPrefix(fd.Path[:len(fd.Path)-1], prefix) + sock, err := strconv.ParseUint(inodeStr, 10, 64) + if err != nil { + continue + } + sockets = append(sockets, sock) + } + } + + return sockets, nil +} + +const ( + numberFDs = 100 +) + +func createFilesAndSockets(tb testing.TB) { + listeningSockets := make([]net.Listener, 0, numberFDs) + tb.Cleanup(func() { + for _, l := range listeningSockets { + l.Close() + } + }) + for i := 0; i < numberFDs; i++ { + l, err := net.Listen("tcp", "localhost:0") + require.NoError(tb, err) + listeningSockets = append(listeningSockets, l) + } + regularFDs := make([]*os.File, 0, numberFDs) + tb.Cleanup(func() { + for _, f := range regularFDs { + f.Close() + } + }) + for i := 0; i < numberFDs; i++ { + f, err := os.CreateTemp("", "") + require.NoError(tb, err) + regularFDs = append(regularFDs, f) + } +} + +func TestGetSockets(t *testing.T) { + createFilesAndSockets(t) + p, err := process.NewProcess(int32(os.Getpid())) + require.NoError(t, err) + + sockets, err := getSockets(p.Pid) + require.NoError(t, err) + + sockets2, err := getSocketsOld(p) + require.NoError(t, err) + + require.Equal(t, sockets, sockets2) +} + +func BenchmarkGetSockets(b *testing.B) { + createFilesAndSockets(b) + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + getSockets(int32(os.Getpid())) + } +} + +func BenchmarkOldGetSockets(b *testing.B) { + createFilesAndSockets(b) + p, err := process.NewProcess(int32(os.Getpid())) + require.NoError(b, err) + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + getSocketsOld(p) + } +} + +// addSockets adds only listening sockets to a map to be used for later looksups. +func addSockets[P procfs.NetTCP | procfs.NetUDP](sockMap map[uint64]socketInfo, sockets P, state uint64) { + for _, sock := range sockets { + if sock.St != state { + continue + } + sockMap[sock.Inode] = socketInfo{port: uint16(sock.LocalPort)} + } +} + +func getNsInfoOld(pid int) (*namespaceInfo, error) { + path := kernel.HostProc(fmt.Sprintf("%d", pid)) + proc, err := procfs.NewFS(path) + if err != nil { + return nil, err + } + + TCP, _ := proc.NetTCP() + UDP, _ := proc.NetUDP() + TCP6, _ := proc.NetTCP6() + UDP6, _ := proc.NetUDP6() + + listeningSockets := make(map[uint64]socketInfo) + + addSockets(listeningSockets, TCP, tcpListen) + addSockets(listeningSockets, TCP6, tcpListen) + addSockets(listeningSockets, UDP, udpListen) + addSockets(listeningSockets, UDP6, udpListen) + + return &namespaceInfo{ + listeningSockets: listeningSockets, + }, nil +} + +func TestGetNSInfo(t *testing.T) { + lTCP, err := net.Listen("tcp", "localhost:0") + require.NoError(t, err) + defer lTCP.Close() + + res, err := getNsInfo(os.Getpid()) + require.NoError(t, err) + resOld, err := getNsInfoOld(os.Getpid()) + require.NoError(t, err) + require.Equal(t, res, resOld) +} + +func BenchmarkGetNSInfo(b *testing.B) { + sockets := make([]net.Listener, 0) + for i := 0; i < 100; i++ { + l, err := net.Listen("tcp", "localhost:0") + require.NoError(b, err) + sockets = append(sockets, l) + } + defer func() { + for _, l := range sockets { + l.Close() + } + }() + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + getNsInfo(os.Getpid()) + } +} + +func BenchmarkGetNSInfoOld(b *testing.B) { + sockets := make([]net.Listener, 0) + for i := 0; i < 100; i++ { + l, err := net.Listen("tcp", "localhost:0") + require.NoError(b, err) + sockets = append(sockets, l) + } + defer func() { + for _, l := range sockets { + l.Close() + } + }() + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + getNsInfoOld(os.Getpid()) + } +} diff --git a/pkg/collector/corechecks/servicediscovery/module/stat.go b/pkg/collector/corechecks/servicediscovery/module/stat.go new file mode 100644 index 0000000000000..4e12e840d741c --- /dev/null +++ b/pkg/collector/corechecks/servicediscovery/module/stat.go @@ -0,0 +1,49 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build linux + +package module + +import ( + "errors" + "os" + "strconv" + "strings" + + "github.com/shirou/gopsutil/v3/process" + + "github.com/DataDog/datadog-agent/pkg/util/kernel" +) + +// pageSize stores the page size of the system in bytes, since the values in +// statm are in pages. +var pageSize = uint64(os.Getpagesize()) + +// getRSS returns the RSS for the process, in bytes. Compare MemoryInfo() in +// gopsutil which does the same thing but which parses several other fields +// which we're not interested in. +func getRSS(proc *process.Process) (uint64, error) { + statmPath := kernel.HostProc(strconv.Itoa(int(proc.Pid)), "statm") + + // This file is very small so just read it fully. + contents, err := os.ReadFile(statmPath) + if err != nil { + return 0, err + } + + // See proc(5) for a description of the format of statm and the fields. + fields := strings.Split(string(contents), " ") + if len(fields) < 6 { + return 0, errors.New("invalid statm") + } + + rssPages, err := strconv.ParseUint(fields[1], 10, 64) + if err != nil { + return 0, err + } + + return rssPages * pageSize, nil +} diff --git a/pkg/collector/corechecks/servicediscovery/module/testutil/fake_server/.gitignore b/pkg/collector/corechecks/servicediscovery/module/testutil/fake_server/.gitignore index 0463fd64b64eb..16df22f27b688 100644 --- a/pkg/collector/corechecks/servicediscovery/module/testutil/fake_server/.gitignore +++ b/pkg/collector/corechecks/servicediscovery/module/testutil/fake_server/.gitignore @@ -1,3 +1,4 @@ fake_server java node +sshd diff --git a/pkg/collector/corechecks/servicediscovery/servicediscovery.go b/pkg/collector/corechecks/servicediscovery/servicediscovery.go index 9578aa783ea47..3e8bf7eb7dab6 100644 --- a/pkg/collector/corechecks/servicediscovery/servicediscovery.go +++ b/pkg/collector/corechecks/servicediscovery/servicediscovery.go @@ -18,6 +18,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/collector/check" "github.com/DataDog/datadog-agent/pkg/collector/corechecks" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/model" pkgconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/optional" @@ -34,23 +35,11 @@ const ( ) type serviceInfo struct { - process processInfo meta ServiceMetadata + service model.Service LastHeartbeat time.Time } -type procStat struct { - StartTime uint64 -} - -type processInfo struct { - PID int - CmdLine []string - Env map[string]string - Stat procStat - Ports []uint16 -} - type serviceEvents struct { start []serviceInfo stop []serviceInfo @@ -58,8 +47,6 @@ type serviceEvents struct { } type discoveredServices struct { - aliveProcsCount int - ignoreProcs map[int]bool potentials map[int]*serviceInfo runningServices map[int]*serviceInfo @@ -162,8 +149,7 @@ func (c *Check) Run() error { return err } - log.Debugf("aliveProcs: %d | ignoreProcs: %d | runningServices: %d | potentials: %d", - disc.aliveProcsCount, + log.Debugf("ignoreProcs: %d | runningServices: %d | potentials: %d", len(disc.ignoreProcs), len(disc.runningServices), len(disc.potentials), @@ -179,7 +165,7 @@ func (c *Check) Run() error { continue } for _, svc := range svcs { - if c.sentRepeatedEventPIDs[svc.process.PID] { + if c.sentRepeatedEventPIDs[svc.service.PID] { continue } err := fmt.Errorf("found repeated service name: %s", svc.meta.Name) @@ -189,7 +175,7 @@ func (c *Check) Run() error { svc: &svc.meta, }) // track the PID, so we don't increase this counter in every run of the check. - c.sentRepeatedEventPIDs[svc.process.PID] = true + c.sentRepeatedEventPIDs[svc.service.PID] = true } } @@ -213,9 +199,9 @@ func (c *Check) Run() error { continue } eventsByName.addStop(p) - if c.sentRepeatedEventPIDs[p.process.PID] { + if c.sentRepeatedEventPIDs[p.service.PID] { // delete this process from the map, so we track it if the PID gets reused - delete(c.sentRepeatedEventPIDs, p.process.PID) + delete(c.sentRepeatedEventPIDs, p.service.PID) } } diff --git a/pkg/collector/corechecks/servicediscovery/usm/jboss.go b/pkg/collector/corechecks/servicediscovery/usm/jboss.go index f54a28dc9db60..242da6b72b3f6 100644 --- a/pkg/collector/corechecks/servicediscovery/usm/jboss.go +++ b/pkg/collector/corechecks/servicediscovery/usm/jboss.go @@ -160,8 +160,13 @@ func (j jbossExtractor) customExtractWarContextRoot(warFS fs.FS) (string, bool) } defer file.Close() + reader, err := SizeVerifiedReader(file) + if err != nil { + log.Debugf("jboss: ignoring %q: %v", jbossWebXMLFileWebInf, err) + return "", false + } var jwx jbossWebXML - if xml.NewDecoder(file).Decode(&jwx) != nil || len(jwx.ContextRoot) == 0 { + if xml.NewDecoder(reader).Decode(&jwx) != nil || len(jwx.ContextRoot) == 0 { return "", false } return jwx.ContextRoot, true @@ -213,8 +218,13 @@ func jbossDomainFindDeployments(basePathFs fs.FS, configFile string, serverName return nil, err } defer file.Close() + reader, err := SizeVerifiedReader(file) + if err != nil { + log.Debugf("jboss: ignoring %q: %v", jbossWebXMLFileWebInf, err) + return nil, err + } var descriptor jbossDomainXML - err = xml.NewDecoder(file).Decode(&descriptor) + err = xml.NewDecoder(reader).Decode(&descriptor) if err != nil { return nil, err } @@ -257,8 +267,12 @@ func jbossStandaloneFindDeployments(basePathFs fs.FS, configFile string) ([]jbos return nil, err } defer file.Close() + reader, err := SizeVerifiedReader(file) + if err != nil { + return nil, err + } var descriptor jbossStandaloneXML - err = xml.NewDecoder(file).Decode(&descriptor) + err = xml.NewDecoder(reader).Decode(&descriptor) if err != nil { return nil, err } @@ -289,7 +303,11 @@ func jbossFindServerGroup(domainFs fs.FS, serverName string) (string, bool, erro return "", false, err } defer file.Close() - decoder := xml.NewDecoder(file) + reader, err := SizeVerifiedReader(file) + if err != nil { + return "", false, err + } + decoder := xml.NewDecoder(reader) var decoded jbossHostXML err = decoder.Decode(&decoded) if err != nil || len(decoded.Servers) == 0 { diff --git a/pkg/collector/corechecks/servicediscovery/usm/jee.go b/pkg/collector/corechecks/servicediscovery/usm/jee.go index 48a822508a71e..94d1c3d7f1218 100644 --- a/pkg/collector/corechecks/servicediscovery/usm/jee.go +++ b/pkg/collector/corechecks/servicediscovery/usm/jee.go @@ -105,11 +105,15 @@ type jeeExtractor struct { // extractContextRootFromApplicationXML parses a standard application.xml file extracting // mount points for web application (aka context roots). func extractContextRootFromApplicationXML(fs fs.FS) ([]string, error) { - reader, err := fs.Open(applicationXMLPath) + file, err := fs.Open(applicationXMLPath) + if err != nil { + return nil, err + } + defer file.Close() + reader, err := SizeVerifiedReader(file) if err != nil { return nil, err } - defer reader.Close() var a applicationXML err = xml.NewDecoder(reader).Decode(&a) if err != nil { @@ -220,6 +224,17 @@ func vfsAndTypeFromAppPath(deployment *jeeDeployment, filesystem fs.SubFS) (*fil if err != nil { return nil, dt, err } + + // Re-stat after opening to avoid races with attributes changing before + // previous stat and open. + fi, err = f.Stat() + if err != nil { + return nil, dt, err + } + if !fi.Mode().IsRegular() { + return nil, dt, err + } + r, err := zip.NewReader(f.(io.ReaderAt), fi.Size()) if err != nil { _ = f.Close() diff --git a/pkg/collector/corechecks/servicediscovery/usm/laravel.go b/pkg/collector/corechecks/servicediscovery/usm/laravel.go index 1ff0b341949e1..ee2b32107375a 100644 --- a/pkg/collector/corechecks/servicediscovery/usm/laravel.go +++ b/pkg/collector/corechecks/servicediscovery/usm/laravel.go @@ -12,6 +12,8 @@ import ( "path" "regexp" "strings" + + "github.com/DataDog/datadog-agent/pkg/util/log" ) type laravelParser struct { @@ -47,7 +49,13 @@ func getFirstMatchFromRegex(pattern string, content []byte) (string, bool) { func trimPrefixFromLine(fs fs.SubFS, file string, prefix string) (string, bool) { if f, err := fs.Open(file); err == nil { defer f.Close() - scn := bufio.NewScanner(f) + reader, err := SizeVerifiedReader(f) + if err != nil { + log.Debugf("laravel: ignoring %q: %v", file, err) + return "", false + } + + scn := bufio.NewScanner(reader) for scn.Scan() { if value, ok := strings.CutPrefix(scn.Text(), prefix); ok { return value, true @@ -76,7 +84,12 @@ func (l laravelParser) getLaravelAppNameFromConfig(dir string) (string, bool) { if l.ctx.fs != nil { if f, err := l.ctx.fs.Open(configFileName); err == nil { defer f.Close() - configFileContent, err := io.ReadAll(f) + reader, err := SizeVerifiedReader(f) + if err != nil { + log.Debugf("laravel: ignoring %q: %v", configFileName, err) + return "", false + } + configFileContent, err := io.ReadAll(reader) if err != nil { return "", false } diff --git a/pkg/collector/corechecks/servicediscovery/usm/nodejs.go b/pkg/collector/corechecks/servicediscovery/usm/nodejs.go index 1dab090692a67..2a611fb907c76 100644 --- a/pkg/collector/corechecks/servicediscovery/usm/nodejs.go +++ b/pkg/collector/corechecks/servicediscovery/usm/nodejs.go @@ -112,16 +112,12 @@ func (n nodeDetector) maybeExtractServiceName(filename string) (string, bool) { return "", false } defer file.Close() - ok, err := canSafelyParse(file) + reader, err := SizeVerifiedReader(file) if err != nil { - //file not accessible or don't exist. Continuing searching up - return "", false - } - if !ok { - log.Debugf("skipping package.js (%q) because too large", filename) + log.Debugf("skipping package.js (%q). Err: %v", filename, err) return "", true // stops here } - bytes, err := io.ReadAll(file) + bytes, err := io.ReadAll(reader) if err != nil { log.Debugf("unable to read a package.js file (%q). Err: %v", filename, err) return "", true diff --git a/pkg/collector/corechecks/servicediscovery/usm/php.go b/pkg/collector/corechecks/servicediscovery/usm/php.go index dcc9e16ce41a6..764d9f9f5b826 100644 --- a/pkg/collector/corechecks/servicediscovery/usm/php.go +++ b/pkg/collector/corechecks/servicediscovery/usm/php.go @@ -32,19 +32,18 @@ func (p phpDetector) detect(args []string) (ServiceMetadata, bool) { } } prevArgIsFlag := false - shouldSkipArg := false for _, arg := range args { - hasFlagPrefix := shouldSkipArg || strings.HasPrefix(arg, "-") - includesAssignment := shouldSkipArg || strings.ContainsRune(arg, '=') || strings.HasPrefix(arg, "-d") - shouldSkipArg := prevArgIsFlag || hasFlagPrefix || includesAssignment + hasFlagPrefix := strings.HasPrefix(arg, "-") - if !shouldSkipArg { + // If the previous argument was a flag, or is the current arg is a flag, skip the argument. Otherwise, process it. + if !prevArgIsFlag && !hasFlagPrefix { basePath := removeFilePath(arg) if isRuneLetterAt(basePath, 0) && basePath == artisanConsole { return NewServiceMetadata(newLaravelParser(p.ctx).GetLaravelAppName(arg)), true } } + includesAssignment := strings.ContainsRune(arg, '=') prevArgIsFlag = hasFlagPrefix && !includesAssignment } diff --git a/pkg/collector/corechecks/servicediscovery/usm/php_test.go b/pkg/collector/corechecks/servicediscovery/usm/php_test.go index 16260dc33b139..dcf9c61921b90 100644 --- a/pkg/collector/corechecks/servicediscovery/usm/php_test.go +++ b/pkg/collector/corechecks/servicediscovery/usm/php_test.go @@ -1,11 +1,12 @@ // Unless explicitly stated otherwise all files in this repository are licensed // under the Apache License Version 2.0. // This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. +// Copyright 2024-present Datadog, Inc. package usm import ( + "github.com/stretchr/testify/require" "testing" ) @@ -30,6 +31,16 @@ func TestServiceNameFromCLI(t *testing.T) { args: []string{"php", "-d", "datadog.service=service_name", "server.php"}, expected: "service_name", }, + { + name: "artisan command with -x flag", + args: []string{"php", "-x", "a", "artisan", "serve"}, + expected: "laravel", + }, + { + name: "artisan command with -x flag and assignment", + args: []string{"php", "-x=a", "artisan", "serve"}, + expected: "laravel", + }, { name: "Nothing found", args: []string{"php", "server.php"}, @@ -41,16 +52,10 @@ func TestServiceNameFromCLI(t *testing.T) { t.Run(tt.name, func(t *testing.T) { value, ok := instance.detect(tt.args) if len(tt.expected) > 0 { - if !ok { - t.Errorf("expected ok to be true, got false") - } - if value.Name != tt.expected { - t.Errorf("expected %s, got %s", tt.expected, value.Name) - } + require.True(t, ok) + require.Equal(t, tt.expected, value.Name) } else { - if ok { - t.Errorf("expected ok to be false, got true") - } + require.False(t, ok) } }) } diff --git a/pkg/collector/corechecks/servicediscovery/usm/python.go b/pkg/collector/corechecks/servicediscovery/usm/python.go index c09f85a73886f..ed55e63f7f16f 100644 --- a/pkg/collector/corechecks/servicediscovery/usm/python.go +++ b/pkg/collector/corechecks/servicediscovery/usm/python.go @@ -125,6 +125,9 @@ func (g gunicornDetector) detect(args []string) (ServiceMetadata, bool) { } if name, ok := extractGunicornNameFrom(args); ok { + // gunicorn replaces the cmdline with something like "gunicorn: master + // [package]", so strip out the square brackets. + name = strings.Trim(name, "[]") return NewServiceMetadata(name), true } return NewServiceMetadata("gunicorn"), true diff --git a/pkg/collector/corechecks/servicediscovery/usm/service.go b/pkg/collector/corechecks/servicediscovery/usm/service.go index 9dd9b9fa07d0c..f8bed0ad59541 100644 --- a/pkg/collector/corechecks/servicediscovery/usm/service.go +++ b/pkg/collector/corechecks/servicediscovery/usm/service.go @@ -7,6 +7,9 @@ package usm import ( + "errors" + "fmt" + "io" "io/fs" "os" "path" @@ -122,13 +125,28 @@ func abs(p string, cwd string) string { return path.Join(cwd, p) } -// canSafelyParse determines if a file's size is less than the maximum allowed to prevent OOM when parsing. -func canSafelyParse(file fs.File) (bool, error) { +// SizeVerifiedReader returns a reader for the file after ensuring that the file +// is a regular file and that the size that can be read from the reader will not +// exceed a pre-defined safety limit to control memory usage. +func SizeVerifiedReader(file fs.File) (io.Reader, error) { fi, err := file.Stat() if err != nil { - return false, err + return nil, err } - return fi.Size() <= maxParseFileSize, nil + + // Don't try to read device files, etc. + if !fi.Mode().IsRegular() { + return nil, errors.New("not a regular file") + } + + size := fi.Size() + if size > maxParseFileSize { + return nil, fmt.Errorf("file too large (%d bytes)", size) + } + + // Additional limit the reader to avoid suprises if the file size changes + // while reading it. + return io.LimitReader(file, min(size, maxParseFileSize)), nil } // List of binaries that usually have additional process context of what's running diff --git a/pkg/collector/corechecks/servicediscovery/usm/service_test.go b/pkg/collector/corechecks/servicediscovery/usm/service_test.go index 6fd7e4ad3f9b1..a0403966e141b 100644 --- a/pkg/collector/corechecks/servicediscovery/usm/service_test.go +++ b/pkg/collector/corechecks/servicediscovery/usm/service_test.go @@ -16,8 +16,9 @@ import ( "runtime" "testing" - "github.com/DataDog/datadog-agent/pkg/network/protocols/http/testutil" "github.com/stretchr/testify/require" + + "github.com/DataDog/datadog-agent/pkg/network/protocols/http/testutil" ) const ( @@ -536,6 +537,24 @@ func TestExtractServiceMetadata(t *testing.T) { envs: map[string]string{"WSGI_APP": "test:app"}, expectedServiceTag: "test", }, + { + name: "gunicorn with replaced cmdline with colon", + cmdline: []string{ + "gunicorn:", + "master", + "[domains.foo.apps.bar:create_server()]", + }, + expectedServiceTag: "domains.foo.apps.bar", + }, + { + name: "gunicorn with replaced cmdline", + cmdline: []string{ + "gunicorn:", + "master", + "[mcservice]", + }, + expectedServiceTag: "mcservice", + }, } for _, tt := range tests { diff --git a/pkg/collector/corechecks/servicediscovery/usm/spring.go b/pkg/collector/corechecks/servicediscovery/usm/spring.go index 4dab88aeaf008..73a6048758ebf 100644 --- a/pkg/collector/corechecks/servicediscovery/usm/spring.go +++ b/pkg/collector/corechecks/servicediscovery/usm/spring.go @@ -185,7 +185,11 @@ func (s springBootParser) newPropertySourceFromFile(filename string) (props.Prop if err != nil { return nil, err } - return newPropertySourceFromStream(f, filename, uint64(fi.Size())) + reader, err := SizeVerifiedReader(f) + if err != nil { + return nil, err + } + return newPropertySourceFromStream(reader, filename, uint64(fi.Size())) } // longestPathPrefix extracts the longest path's portion that's not a pattern (i.e. /test/**/*.xml will return /test/) @@ -299,6 +303,9 @@ func (s springBootParser) GetSpringBootAppName(jarname string) (string, bool) { if err != nil { return "", false } + if !fi.Mode().IsRegular() { + return "", false + } reader, err := zip.NewReader(file.(io.ReaderAt), fi.Size()) if err != nil { return "", false diff --git a/pkg/collector/corechecks/servicediscovery/usm/tomcat.go b/pkg/collector/corechecks/servicediscovery/usm/tomcat.go index 0271bd6772010..d9877cf3832d0 100644 --- a/pkg/collector/corechecks/servicediscovery/usm/tomcat.go +++ b/pkg/collector/corechecks/servicediscovery/usm/tomcat.go @@ -133,8 +133,13 @@ func (te tomcatExtractor) parseServerXML(domainHome string) *tomcatServerXML { return nil } defer file.Close() + reader, err := SizeVerifiedReader(file) + if err != nil { + log.Debugf("Invalid tomcat server.xml (%q). Err %v", xmlFilePath, err) + return nil + } var serverXML tomcatServerXML - err = xml.NewDecoder(file).Decode(&serverXML) + err = xml.NewDecoder(reader).Decode(&serverXML) if err != nil { log.Debugf("Unable to parse tomcat server.xml (%q). Err: %v", xmlFilePath, err) return nil diff --git a/pkg/collector/corechecks/servicediscovery/usm/weblogic.go b/pkg/collector/corechecks/servicediscovery/usm/weblogic.go index fd56df2eba14a..17c70dcfa1999 100644 --- a/pkg/collector/corechecks/servicediscovery/usm/weblogic.go +++ b/pkg/collector/corechecks/servicediscovery/usm/weblogic.go @@ -63,12 +63,13 @@ func (we weblogicExtractor) findDeployedApps(domainHome string) ([]jeeDeployment return nil, false } defer serverConfigFile.Close() - if ok, err := canSafelyParse(serverConfigFile); !ok { + reader, err := SizeVerifiedReader(serverConfigFile) + if err != nil { log.Debugf("weblogic: config.xml looks too big. Err: %v", err) return nil, false } var deployInfos weblogicDeploymentInfo - err = xml.NewDecoder(serverConfigFile).Decode(&deployInfos) + err = xml.NewDecoder(reader).Decode(&deployInfos) if err != nil { log.Debugf("weblogic: cannot parse config.xml. Err: %v", err) diff --git a/pkg/collector/corechecks/servicediscovery/usm/websphere.go b/pkg/collector/corechecks/servicediscovery/usm/websphere.go index 52f58931703bc..592c525ef0526 100644 --- a/pkg/collector/corechecks/servicediscovery/usm/websphere.go +++ b/pkg/collector/corechecks/servicediscovery/usm/websphere.go @@ -52,11 +52,12 @@ func isApplicationDeployed(fs fs.FS, descriptorPath string, nodeName string, ser return false, err } defer file.Close() - if ok, _ := canSafelyParse(file); !ok { + reader, err := SizeVerifiedReader(file) + if err != nil { return false, err } var appDeployment websphereAppDeployment - err = xml.NewDecoder(file).Decode(&appDeployment) + err = xml.NewDecoder(reader).Decode(&appDeployment) if err != nil { return false, err } diff --git a/pkg/config/remote/client/client.go b/pkg/config/remote/client/client.go index 07f539e953465..a97cecda5f899 100644 --- a/pkg/config/remote/client/client.go +++ b/pkg/config/remote/client/client.go @@ -74,8 +74,8 @@ type Client struct { // Elements that can be changed during the execution of listeners // They are atomics so that they don't have to share the top-level mutex // when in use - updaterPackagesState *atomic.Value // []*pbgo.PackageState - cwsWorkloads *atomic.Value // []string + installerState *atomic.Value // []*pbgo.PackageState + cwsWorkloads *atomic.Value // []string } // Options describes the client options @@ -276,21 +276,21 @@ func newClient(cf ConfigFetcher, opts ...func(opts *Options)) (*Client, error) { cwsWorkloads := &atomic.Value{} cwsWorkloads.Store([]string{}) - updaterPackagesState := &atomic.Value{} - updaterPackagesState.Store([]*pbgo.PackageState{}) + installerState := &atomic.Value{} + installerState.Store([]*pbgo.PackageState{}) return &Client{ - Options: options, - ID: generateID(), - startupSync: sync.Once{}, - ctx: ctx, - closeFn: cloneFn, - cwsWorkloads: cwsWorkloads, - updaterPackagesState: updaterPackagesState, - state: repository, - backoffPolicy: backoffPolicy, - listeners: make(map[string][]func(update map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus))), - configFetcher: cf, + Options: options, + ID: generateID(), + startupSync: sync.Once{}, + ctx: ctx, + closeFn: cloneFn, + cwsWorkloads: cwsWorkloads, + installerState: installerState, + state: repository, + backoffPolicy: backoffPolicy, + listeners: make(map[string][]func(update map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus))), + configFetcher: cf, }, nil } @@ -356,9 +356,14 @@ func (c *Client) SetCWSWorkloads(workloads []string) { c.cwsWorkloads.Store(workloads) } -// SetUpdaterPackagesState sets the updater package state -func (c *Client) SetUpdaterPackagesState(packages []*pbgo.PackageState) { - c.updaterPackagesState.Store(packages) +// GetInstallerState gets the installer state +func (c *Client) GetInstallerState() []*pbgo.PackageState { + return c.installerState.Load().([]*pbgo.PackageState) +} + +// SetInstallerState sets the installer state +func (c *Client) SetInstallerState(packages []*pbgo.PackageState) { + c.installerState.Store(packages) } func (c *Client) startFn() { @@ -560,15 +565,15 @@ func (c *Client) newUpdateRequest() (*pbgo.ClientGetConfigsRequest, error) { switch c.Options.isUpdater { case true: - updaterPackagesState, ok := c.updaterPackagesState.Load().([]*pbgo.PackageState) + installerState, ok := c.installerState.Load().([]*pbgo.PackageState) if !ok { - return nil, errors.New("could not load updaterPackagesState") + return nil, errors.New("could not load installerState") } req.Client.IsUpdater = true req.Client.ClientUpdater = &pbgo.ClientUpdater{ Tags: c.Options.updaterTags, - Packages: updaterPackagesState, + Packages: installerState, } case false: cwsWorkloads, ok := c.cwsWorkloads.Load().([]string) diff --git a/pkg/config/setup/config.go b/pkg/config/setup/config.go index 4567f25619f31..2cf2a4bdfb4b1 100644 --- a/pkg/config/setup/config.go +++ b/pkg/config/setup/config.go @@ -794,6 +794,13 @@ func InitConfig(config pkgconfigmodel.Config) { // Remote process collector config.BindEnvAndSetDefault("workloadmeta.local_process_collector.collection_interval", DefaultLocalProcessCollectorInterval) + // Tagger Component + // This is a temporary/transient flag used to slowly migrate to a new internal implementation of the tagger. + // If set to true, the tagger will store all entities in a 2-layered map, the first map is indexed by prefix, and the second one is indexed by id. + // If set to false, the tagger will use the default implementation by storing entities in a one-layer map from plain strings to Tag Entities. + // TODO: remove this config option when the migration is finalised. + config.BindEnvAndSetDefault("tagger.tagstore_use_composite_entity_id", false) + // SBOM configuration config.BindEnvAndSetDefault("sbom.enabled", false) bindEnvAndSetLogsConfigKeys(config, "sbom.") @@ -955,6 +962,7 @@ func InitConfig(config pkgconfigmodel.Config) { // Installer configuration config.BindEnvAndSetDefault("remote_updates", false) + config.BindEnvAndSetDefault("remote_policies", false) config.BindEnvAndSetDefault("installer.registry.url", "") config.BindEnvAndSetDefault("installer.registry.auth", "") config.BindEnv("fleet_policies_dir") @@ -1212,6 +1220,9 @@ func telemetry(config pkgconfigmodel.Setup) { // Agent Telemetry. It is experimental feature and is subject to change. // It should not be enabled unless prompted by Datadog Support config.BindEnvAndSetDefault("agent_telemetry.enabled", false) + config.SetKnown("agent_telemetry.additional_endpoints.*") + bindEnvAndSetLogsConfigKeys(config, "agent_telemetry.") + } func serializer(config pkgconfigmodel.Setup) { @@ -1483,6 +1494,8 @@ func logsagent(config pkgconfigmodel.Setup) { // Experimental auto multiline detection settings (these are subject to change until the feature is no longer experimental) config.BindEnvAndSetDefault("logs_config.experimental_auto_multi_line_detection", false) config.SetKnown("logs_config.auto_multi_line_detection_custom_samples") + config.BindEnvAndSetDefault("logs_config.auto_multi_line.enable_json_detection", true) + config.BindEnvAndSetDefault("logs_config.auto_multi_line.enable_datetime_detection", true) config.BindEnvAndSetDefault("logs_config.auto_multi_line.timestamp_detector_match_threshold", 0.5) config.BindEnvAndSetDefault("logs_config.auto_multi_line.tokenizer_max_input_bytes", 60) config.BindEnvAndSetDefault("logs_config.auto_multi_line.pattern_table_max_size", 20) diff --git a/pkg/config/setup/system_probe_cws.go b/pkg/config/setup/system_probe_cws.go index 20d72aed08ded..ffa2f283ae56d 100644 --- a/pkg/config/setup/system_probe_cws.go +++ b/pkg/config/setup/system_probe_cws.go @@ -131,4 +131,6 @@ func initCWSSystemProbeConfig(cfg pkgconfigmodel.Config) { // CWS enforcement capabilities cfg.BindEnvAndSetDefault("runtime_security_config.enforcement.enabled", true) cfg.BindEnvAndSetDefault("runtime_security_config.enforcement.raw_syscall.enabled", false) + cfg.BindEnvAndSetDefault("runtime_security_config.enforcement.exclude_binaries", []string{}) + cfg.BindEnvAndSetDefault("runtime_security_config.enforcement.rule_source_allowed", []string{"file", "remote-config"}) } diff --git a/pkg/flare/archive_test.go b/pkg/flare/archive_test.go index 0e44bd71f5045..d58d877579119 100644 --- a/pkg/flare/archive_test.go +++ b/pkg/flare/archive_test.go @@ -111,7 +111,7 @@ func setupIPCAddress(t *testing.T, confMock model.Config, URL string) { func TestGetAgentTaggerList(t *testing.T) { tagMap := make(map[string]types.TaggerListEntity) - tagMap["random_entity_name"] = types.TaggerListEntity{ + tagMap["random_prefix://random_id"] = types.TaggerListEntity{ Tags: map[string][]string{ "docker_source_name": {"docker_image:custom-agent:latest", "image_name:custom-agent"}, }, @@ -131,7 +131,7 @@ func TestGetAgentTaggerList(t *testing.T) { content, err := getAgentTaggerList() require.NoError(t, err) - assert.Contains(t, string(content), "random_entity_name") + assert.Contains(t, string(content), "random_prefix://random_id") assert.Contains(t, string(content), "docker_source_name") assert.Contains(t, string(content), "docker_image:custom-agent:latest") assert.Contains(t, string(content), "image_name:custom-agent") diff --git a/pkg/fleet/daemon/daemon.go b/pkg/fleet/daemon/daemon.go index 0b3fc11dd711c..beeb8aafdfa60 100644 --- a/pkg/fleet/daemon/daemon.go +++ b/pkg/fleet/daemon/daemon.go @@ -29,6 +29,7 @@ import ( installerErrors "github.com/DataDog/datadog-agent/pkg/fleet/installer/errors" "github.com/DataDog/datadog-agent/pkg/fleet/installer/repository" "github.com/DataDog/datadog-agent/pkg/fleet/internal/bootstrap" + "github.com/DataDog/datadog-agent/pkg/fleet/internal/cdn" "github.com/DataDog/datadog-agent/pkg/fleet/internal/exec" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -38,6 +39,8 @@ import ( const ( // gcInterval is the interval at which the GC will run gcInterval = 1 * time.Hour + // refreshStateInterval is the interval at which the state will be refreshed + refreshStateInterval = 5 * time.Minute ) // Daemon is the fleet daemon in charge of remote install, updates and configuration. @@ -53,6 +56,7 @@ type Daemon interface { GetPackage(pkg string, version string) (Package, error) GetState() (map[string]repository.State, error) + GetRemoteConfigState() []*pbgo.PackageState GetAPMInjectionStatus() (APMInjectionStatus, error) } @@ -60,12 +64,14 @@ type daemonImpl struct { m sync.Mutex stopChan chan struct{} - env *env.Env - installer installer.Installer - rc *remoteConfig - catalog catalog - requests chan remoteAPIRequest - requestsWG sync.WaitGroup + env *env.Env + installer installer.Installer + rc *remoteConfig + cdn *cdn.CDN + catalog catalog + requests chan remoteAPIRequest + requestsWG sync.WaitGroup + requestsState map[string]requestState } func newInstaller(env *env.Env, installerBin string) installer.Installer { @@ -93,12 +99,14 @@ func NewDaemon(rcFetcher client.ConfigFetcher, config config.Reader) (Daemon, er func newDaemon(rc *remoteConfig, installer installer.Installer, env *env.Env) *daemonImpl { i := &daemonImpl{ - env: env, - rc: rc, - installer: installer, - requests: make(chan remoteAPIRequest, 32), - catalog: catalog{}, - stopChan: make(chan struct{}), + env: env, + rc: rc, + installer: installer, + cdn: cdn.New(env), + requests: make(chan remoteAPIRequest, 32), + catalog: catalog{}, + stopChan: make(chan struct{}), + requestsState: make(map[string]requestState), } i.refreshState(context.Background()) return i @@ -112,6 +120,14 @@ func (d *daemonImpl) GetState() (map[string]repository.State, error) { return d.installer.States() } +// GetRemoteConfigState returns the remote config state. +func (d *daemonImpl) GetRemoteConfigState() []*pbgo.PackageState { + d.m.Lock() + defer d.m.Unlock() + + return d.rc.GetState() +} + // GetAPMInjectionStatus returns the APM injection status. This is not done in the service // to avoid cross-contamination between the daemon and the installer. func (d *daemonImpl) GetAPMInjectionStatus() (status APMInjectionStatus, err error) { @@ -176,15 +192,23 @@ func (d *daemonImpl) Start(_ context.Context) error { d.m.Lock() defer d.m.Unlock() go func() { + gcTicker := time.NewTicker(gcInterval) + defer gcTicker.Stop() + refreshStateTicker := time.NewTicker(refreshStateInterval) + defer refreshStateTicker.Stop() for { select { - case <-time.After(gcInterval): + case <-gcTicker.C: d.m.Lock() err := d.installer.GarbageCollect(context.Background()) d.m.Unlock() if err != nil { log.Errorf("Daemon: could not run GC: %v", err) } + case <-refreshStateTicker.C: + d.m.Lock() + d.refreshState(context.Background()) + d.m.Unlock() case <-d.stopChan: return case request := <-d.requests: @@ -431,7 +455,22 @@ func setRequestDone(ctx context.Context, err error) { } } +func (d *daemonImpl) resolveAgentRemoteConfigVersion(ctx context.Context) (string, error) { + if !d.env.RemotePolicies { + return "", nil + } + config, err := d.cdn.Get(ctx) + if err != nil { + return "", fmt.Errorf("could not get agent cdn config: %w", err) + } + return config.Version, nil +} + func (d *daemonImpl) refreshState(ctx context.Context) { + request, ok := ctx.Value(requestStateKey).(*requestState) + if ok { + d.requestsState[request.Package] = *request + } state, err := d.installer.States() if err != nil { // TODO: we should report this error through RC in some way @@ -443,7 +482,11 @@ func (d *daemonImpl) refreshState(ctx context.Context) { log.Errorf("could not get installer config state: %v", err) return } - requestState, ok := ctx.Value(requestStateKey).(*requestState) + configVersion, err := d.resolveAgentRemoteConfigVersion(ctx) + if err != nil { + log.Errorf("could not get agent remote config version: %v", err) + } + var packages []*pbgo.PackageState for pkg, s := range state { p := &pbgo.PackageState{ @@ -456,6 +499,10 @@ func (d *daemonImpl) refreshState(ctx context.Context) { p.StableConfigVersion = cs.Stable p.ExperimentConfigVersion = cs.Experiment } + if pkg == "datadog-agent" { + p.RemoteConfigVersion = configVersion + } + requestState, ok := d.requestsState[pkg] if ok && pkg == requestState.Package { var taskErr *pbgo.TaskError if requestState.Err != nil { diff --git a/pkg/fleet/daemon/daemon_test.go b/pkg/fleet/daemon/daemon_test.go index 0a06bbe1c8361..4e3d0941553ee 100644 --- a/pkg/fleet/daemon/daemon_test.go +++ b/pkg/fleet/daemon/daemon_test.go @@ -126,7 +126,11 @@ func (c *testRemoteConfigClient) Subscribe(product string, fn func(update map[st c.listeners[product] = append(c.listeners[product], client.Handler(fn)) } -func (c *testRemoteConfigClient) SetUpdaterPackagesState(_ []*pbgo.PackageState) { +func (c *testRemoteConfigClient) SetInstallerState(_ []*pbgo.PackageState) { +} + +func (c *testRemoteConfigClient) GetInstallerState() []*pbgo.PackageState { + return nil } func (c *testRemoteConfigClient) SubmitCatalog(catalog catalog) { @@ -284,11 +288,8 @@ func TestRemoteRequest(t *testing.T) { defer i.Stop() testStablePackage := Package{ - Name: "test-package", - Version: "0.0.1", - URL: "oci://example.com/test-package@sha256:2fa082d512a120a814e32ddb80454efce56595b5c84a37cc1a9f90cf9cc7ba85", - Platform: runtime.GOOS, - Arch: runtime.GOARCH, + Name: "test-package", + Version: "0.0.1", } testExperimentPackage := Package{ Name: "test-package", diff --git a/pkg/fleet/daemon/local_api.go b/pkg/fleet/daemon/local_api.go index d1481ccbc8ee0..5f4b58bcb9ea4 100644 --- a/pkg/fleet/daemon/local_api.go +++ b/pkg/fleet/daemon/local_api.go @@ -16,6 +16,7 @@ import ( "path/filepath" "github.com/DataDog/datadog-agent/pkg/fleet/installer/repository" + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/version" "github.com/gorilla/mux" @@ -31,6 +32,7 @@ type StatusResponse struct { Version string `json:"version"` Packages map[string]repository.State `json:"packages"` ApmInjectionStatus APMInjectionStatus `json:"apm_injection_status"` + RemoteConfigState []*pbgo.PackageState `json:"remote_config_state"` } // APMInjectionStatus contains the instrumentation status of the APM injection. @@ -134,6 +136,7 @@ func (l *localAPIImpl) status(w http.ResponseWriter, _ *http.Request) { Version: version.AgentVersion, Packages: packages, ApmInjectionStatus: apmStatus, + RemoteConfigState: l.daemon.GetRemoteConfigState(), } } diff --git a/pkg/fleet/daemon/local_api_test.go b/pkg/fleet/daemon/local_api_test.go index baba82ed18c3e..6017884168f43 100644 --- a/pkg/fleet/daemon/local_api_test.go +++ b/pkg/fleet/daemon/local_api_test.go @@ -15,6 +15,7 @@ import ( "testing" "github.com/DataDog/datadog-agent/pkg/fleet/installer/repository" + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/version" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -65,6 +66,11 @@ func (m *testDaemon) GetState() (map[string]repository.State, error) { return args.Get(0).(map[string]repository.State), args.Error(1) } +func (m *testDaemon) GetRemoteConfigState() []*pbgo.PackageState { + args := m.Called() + return args.Get(0).([]*pbgo.PackageState) +} + func (m *testDaemon) GetAPMInjectionStatus() (APMInjectionStatus, error) { args := m.Called() return args.Get(0).(APMInjectionStatus), args.Error(1) @@ -112,6 +118,7 @@ func TestAPIStatus(t *testing.T) { }, } api.i.On("GetState").Return(installerState, nil) + api.i.On("GetRemoteConfigState").Return([]*pbgo.PackageState(nil)) api.i.On("GetAPMInjectionStatus").Return(APMInjectionStatus{}, nil) resp, err := api.c.Status() diff --git a/pkg/fleet/daemon/remote_config.go b/pkg/fleet/daemon/remote_config.go index ed82e785ffe38..9509185899ccd 100644 --- a/pkg/fleet/daemon/remote_config.go +++ b/pkg/fleet/daemon/remote_config.go @@ -24,7 +24,8 @@ type remoteConfigClient interface { Start() Close() Subscribe(product string, fn func(update map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus))) - SetUpdaterPackagesState(packages []*pbgo.PackageState) + GetInstallerState() []*pbgo.PackageState + SetInstallerState(packages []*pbgo.PackageState) } type remoteConfig struct { @@ -63,9 +64,14 @@ func (rc *remoteConfig) Close() { rc.client.Close() } -// SetState sets the state of the given package. +// GetState gets the state of the remote config client. +func (rc *remoteConfig) GetState() []*pbgo.PackageState { + return rc.client.GetInstallerState() +} + +// SetState sets the state of the remote config client. func (rc *remoteConfig) SetState(packages []*pbgo.PackageState) { - rc.client.SetUpdaterPackagesState(packages) + rc.client.SetInstallerState(packages) } // Package represents a downloadable package. diff --git a/pkg/fleet/env/env.go b/pkg/fleet/env/env.go index a8df613e0b0e4..780382c143c30 100644 --- a/pkg/fleet/env/env.go +++ b/pkg/fleet/env/env.go @@ -109,6 +109,7 @@ func FromConfig(config config.Reader) *Env { APIKey: utils.SanitizeAPIKey(config.GetString("api_key")), Site: config.GetString("site"), RemoteUpdates: config.GetBool("remote_updates"), + RemotePolicies: config.GetBool("remote_policies"), RegistryOverride: config.GetString("installer.registry.url"), RegistryAuthOverride: config.GetString("installer.registry.auth"), } @@ -126,6 +127,9 @@ func (e *Env) ToEnv() []string { if e.RemoteUpdates { env = append(env, envRemoteUpdates+"=true") } + if e.RemotePolicies { + env = append(env, envRemotePolicies+"=true") + } if e.RegistryOverride != "" { env = append(env, envRegistryURL+"="+e.RegistryOverride) } diff --git a/pkg/fleet/env/env_test.go b/pkg/fleet/env/env_test.go index cc12b9c0365ba..b4e86c46e3f93 100644 --- a/pkg/fleet/env/env_test.go +++ b/pkg/fleet/env/env_test.go @@ -41,6 +41,7 @@ func TestFromEnv(t *testing.T) { envAPIKey: "123456", envSite: "datadoghq.eu", envRemoteUpdates: "true", + envRemotePolicies: "true", envRegistryURL: "registry.example.com", envRegistryAuth: "auth", envRegistryURL + "_IMAGE": "another.registry.example.com", @@ -58,6 +59,7 @@ func TestFromEnv(t *testing.T) { APIKey: "123456", Site: "datadoghq.eu", RemoteUpdates: true, + RemotePolicies: true, RegistryOverride: "registry.example.com", RegistryAuthOverride: "auth", RegistryOverrideByImage: map[string]string{ @@ -166,6 +168,7 @@ func TestToEnv(t *testing.T) { APIKey: "123456", Site: "datadoghq.eu", RemoteUpdates: true, + RemotePolicies: true, RegistryOverride: "registry.example.com", RegistryAuthOverride: "auth", RegistryOverrideByImage: map[string]string{ @@ -194,6 +197,7 @@ func TestToEnv(t *testing.T) { "DD_API_KEY=123456", "DD_SITE=datadoghq.eu", "DD_REMOTE_UPDATES=true", + "DD_REMOTE_POLICIES=true", "DD_INSTALLER_REGISTRY_URL=registry.example.com", "DD_INSTALLER_REGISTRY_AUTH=auth", "DD_APM_INSTRUMENTATION_LIBRARIES=dotnet:latest,java,ruby:1.2", diff --git a/pkg/kubestatemetrics/builder/builder.go b/pkg/kubestatemetrics/builder/builder.go index 779269a78798c..148318bc1d6fb 100644 --- a/pkg/kubestatemetrics/builder/builder.go +++ b/pkg/kubestatemetrics/builder/builder.go @@ -47,6 +47,9 @@ type Builder struct { metrics *watch.ListWatchMetrics resync time.Duration + + collectPodsFromKubelet bool + collectOnlyUnassignedPods bool } // New returns new Builder instance @@ -135,6 +138,20 @@ func (b *Builder) WithAllowAnnotations(l map[string][]string) { b.ksmBuilder.WithAllowAnnotations(l) } +// WithPodCollectionFromKubelet configures the builder to collect pods from the +// Kubelet instead of the API server. This has no effect if pod collection is +// disabled. +func (b *Builder) WithPodCollectionFromKubelet() { + b.collectPodsFromKubelet = true +} + +// WithUnassignedPodsCollection configures the builder to only collect pods that +// are not assigned to any node. This has no effect if pod collection is +// disabled. +func (b *Builder) WithUnassignedPodsCollection() { + b.collectOnlyUnassignedPods = true +} + // Build initializes and registers all enabled stores. // Returns metric writers. func (b *Builder) Build() metricsstore.MetricsWriterList { @@ -172,8 +189,16 @@ func GenerateStores[T any]( if b.namespaces.IsAllNamespaces() { store := store.NewMetricsStore(composedMetricGenFuncs, reflect.TypeOf(expectedType).String()) - listWatcher := listWatchFunc(client, corev1.NamespaceAll, b.fieldSelectorFilter) - b.startReflector(expectedType, store, listWatcher, useAPIServerCache) + + switch expectedType.(type) { + // Pods are handled differently because depending on the configuration + // they're collected from the API server or the Kubelet. + case *corev1.Pod: + handlePodCollection(b, store, client, listWatchFunc, corev1.NamespaceAll, useAPIServerCache) + default: + listWatcher := listWatchFunc(client, corev1.NamespaceAll, b.fieldSelectorFilter) + b.startReflector(expectedType, store, listWatcher, useAPIServerCache) + } return []cache.Store{store} } @@ -181,8 +206,15 @@ func GenerateStores[T any]( stores := make([]cache.Store, 0, len(b.namespaces)) for _, ns := range b.namespaces { store := store.NewMetricsStore(composedMetricGenFuncs, reflect.TypeOf(expectedType).String()) - listWatcher := listWatchFunc(client, ns, b.fieldSelectorFilter) - b.startReflector(expectedType, store, listWatcher, useAPIServerCache) + switch expectedType.(type) { + // Pods are handled differently because depending on the configuration + // they're collected from the API server or the Kubelet. + case *corev1.Pod: + handlePodCollection(b, store, client, listWatchFunc, ns, useAPIServerCache) + default: + listWatcher := listWatchFunc(client, ns, b.fieldSelectorFilter) + b.startReflector(expectedType, store, listWatcher, useAPIServerCache) + } stores = append(stores, store) } @@ -267,3 +299,20 @@ func (c *cacheEnabledListerWatcher) List(options v1.ListOptions) (runtime.Object return res, err } + +func handlePodCollection[T any](b *Builder, store cache.Store, client T, listWatchFunc func(kubeClient T, ns string, fieldSelector string) cache.ListerWatcher, namespace string, useAPIServerCache bool) { + if b.collectPodsFromKubelet { + b.startKubeletPodWatcher(store, namespace) + return + } + + fieldSelector := b.fieldSelectorFilter + if b.collectOnlyUnassignedPods { + // spec.nodeName is set to empty for unassigned pods. This ignores + // b.fieldSelectorFilter, but I think it's not used. + fieldSelector = "spec.nodeName=" + } + + listWatcher := listWatchFunc(client, namespace, fieldSelector) + b.startReflector(&corev1.Pod{}, store, listWatcher, useAPIServerCache) +} diff --git a/pkg/kubestatemetrics/builder/kubelet_pods.go b/pkg/kubestatemetrics/builder/kubelet_pods.go new file mode 100644 index 0000000000000..ce7af8ce6683c --- /dev/null +++ b/pkg/kubestatemetrics/builder/kubelet_pods.go @@ -0,0 +1,101 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build kubeapiserver && kubelet + +package builder + +import ( + "context" + "fmt" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/cache" + + "github.com/DataDog/datadog-agent/pkg/util/kubernetes/kubelet" + "github.com/DataDog/datadog-agent/pkg/util/log" +) + +// PodWatcher is an interface for a component that watches for changes in pods +type PodWatcher interface { + PullChanges(ctx context.Context) ([]*kubelet.Pod, error) + Expire() ([]string, error) +} + +func (b *Builder) startKubeletPodWatcher(store cache.Store, namespace string) { + podWatcher, err := kubelet.NewPodWatcher(15 * time.Second) + if err != nil { + log.Warnf("Failed to create pod watcher: %s", err) + } + + ticker := time.NewTicker(5 * time.Second) + + go func() { + for { + select { + case <-ticker.C: + err = updateStore(b.ctx, store, podWatcher, namespace) + if err != nil { + log.Errorf("Failed to update store: %s", err) + } + + case <-b.ctx.Done(): + ticker.Stop() + return + } + } + }() +} + +func updateStore(ctx context.Context, store cache.Store, podWatcher PodWatcher, namespace string) error { + pods, err := podWatcher.PullChanges(ctx) + if err != nil { + return fmt.Errorf("failed to pull changes from pod watcher: %w", err) + } + + for _, pod := range pods { + if namespace != corev1.NamespaceAll && pod.Metadata.Namespace != namespace { + continue + } + + kubePod := kubelet.ConvertKubeletPodToK8sPod(pod) + + err = store.Add(kubePod) + if err != nil { + log.Warnf("Failed to add pod to KSM store: %s", err) + } + } + + expiredEntities, err := podWatcher.Expire() + if err != nil { + return fmt.Errorf("failed to expire pods: %w", err) + } + + for _, expiredEntity := range expiredEntities { + // Expire() returns both pods and containers, we only care + // about pods + if !strings.HasPrefix(expiredEntity, kubelet.KubePodPrefix) { + continue + } + + // Only the UID is needed to be able to delete + expiredPod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + UID: types.UID(strings.TrimPrefix(expiredEntity, kubelet.KubePodPrefix)), + }, + } + + err = store.Delete(&expiredPod) + if err != nil { + log.Warnf("Failed to delete pod from KSM store: %s", err) + } + } + + return nil +} diff --git a/pkg/kubestatemetrics/builder/kubelet_pods_stub.go b/pkg/kubestatemetrics/builder/kubelet_pods_stub.go new file mode 100644 index 0000000000000..b4da17ab6227d --- /dev/null +++ b/pkg/kubestatemetrics/builder/kubelet_pods_stub.go @@ -0,0 +1,16 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build kubeapiserver && !kubelet + +package builder + +import ( + "k8s.io/client-go/tools/cache" +) + +func (b *Builder) startKubeletPodWatcher(_ cache.Store, _ string) { + // Do nothing +} diff --git a/pkg/kubestatemetrics/builder/kubelet_pods_test.go b/pkg/kubestatemetrics/builder/kubelet_pods_test.go new file mode 100644 index 0000000000000..94f5f26a798ee --- /dev/null +++ b/pkg/kubestatemetrics/builder/kubelet_pods_test.go @@ -0,0 +1,165 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build kubeapiserver && kubelet + +package builder + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + "github.com/DataDog/datadog-agent/pkg/util/kubernetes/kubelet" +) + +type MockPodWatcher struct { + mock.Mock +} + +func (m *MockPodWatcher) PullChanges(ctx context.Context) ([]*kubelet.Pod, error) { + args := m.Called(ctx) + return args.Get(0).([]*kubelet.Pod), args.Error(1) +} + +func (m *MockPodWatcher) Expire() ([]string, error) { + args := m.Called() + return args.Get(0).([]string), args.Error(1) +} + +type MockStore struct { + mock.Mock +} + +func (m *MockStore) Add(obj interface{}) error { + args := m.Called(obj) + return args.Error(0) +} + +func (m *MockStore) Delete(obj interface{}) error { + args := m.Called(obj) + return args.Error(0) +} + +func (m *MockStore) Update(_ interface{}) error { + // Unused in this test + return nil +} + +func (m *MockStore) List() []interface{} { + // Unused in this test + return nil +} + +func (m *MockStore) ListKeys() []string { + // Unused in this test + return nil +} + +func (m *MockStore) Get(_ interface{}) (item interface{}, exists bool, err error) { + // Unused in this test + return nil, false, nil +} + +func (m *MockStore) GetByKey(_ string) (item interface{}, exists bool, err error) { + // Unused in this test + return nil, false, nil +} + +func (m *MockStore) Replace(_ []interface{}, _ string) error { + // Unused in this test + return nil +} + +func (m *MockStore) Resync() error { + // Unused in this test + return nil +} + +func TestUpdateStore_AddPodToStore(t *testing.T) { + store := new(MockStore) + podWatcher := new(MockPodWatcher) + + kubeletPod := &kubelet.Pod{ + Metadata: kubelet.PodMetadata{ + Name: "test-pod", + Namespace: "default", + UID: "12345", + }, + } + + kubernetesPod := kubelet.ConvertKubeletPodToK8sPod(kubeletPod) + + podWatcher.On("PullChanges", mock.Anything).Return([]*kubelet.Pod{kubeletPod}, nil) + podWatcher.On("Expire").Return([]string{}, nil) + store.On("Add", kubernetesPod).Return(nil) + + err := updateStore(context.TODO(), store, podWatcher, "default") + assert.NoError(t, err) + + store.AssertCalled(t, "Add", kubernetesPod) +} + +func TestUpdateStore_FilterPodsByNamespace(t *testing.T) { + store := new(MockStore) + podWatcher := new(MockPodWatcher) + + kubeletPod := &kubelet.Pod{ + Metadata: kubelet.PodMetadata{ + Name: "test-pod", + Namespace: "other-namespace", + UID: "12345", + }, + } + + store.On("Add", mock.Anything).Return(nil) + podWatcher.On("PullChanges", mock.Anything).Return([]*kubelet.Pod{kubeletPod}, nil) + podWatcher.On("Expire").Return([]string{}, nil) + + err := updateStore(context.TODO(), store, podWatcher, "default") + assert.NoError(t, err) + + // Add() shouldn't be called because the pod is in a different namespace + store.AssertNotCalled(t, "Add", mock.Anything) +} + +func TestUpdateStore_HandleExpiredPods(t *testing.T) { + store := new(MockStore) + podWatcher := new(MockPodWatcher) + podUID := "kubernetes_pod://pod-12345" + kubernetesPod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + UID: types.UID("pod-12345"), + }, + } + + podWatcher.On("PullChanges", mock.Anything).Return([]*kubelet.Pod{}, nil) + podWatcher.On("Expire").Return([]string{podUID}, nil) + store.On("Delete", &kubernetesPod).Return(nil) + + err := updateStore(context.TODO(), store, podWatcher, "default") + assert.NoError(t, err) + + store.AssertCalled(t, "Delete", &kubernetesPod) +} + +func TestUpdateStore_HandleExpiredContainers(t *testing.T) { + store := new(MockStore) + podWatcher := new(MockPodWatcher) + + podWatcher.On("PullChanges", mock.Anything).Return([]*kubelet.Pod{}, nil) + podWatcher.On("Expire").Return([]string{"container-12345"}, nil) + + err := updateStore(context.TODO(), store, podWatcher, "default") + assert.NoError(t, err) + + // Delete() shouldn't be called because the expired entity is not a pod + store.AssertNotCalled(t, "Delete", mock.Anything) +} diff --git a/pkg/languagedetection/privileged/privileged_detector.go b/pkg/languagedetection/privileged/privileged_detector.go index e14b1db593e48..cf050a77676f5 100644 --- a/pkg/languagedetection/privileged/privileged_detector.go +++ b/pkg/languagedetection/privileged/privileged_detector.go @@ -53,6 +53,7 @@ func handleDetectorError(err error) { type LanguageDetector struct { hostProc string binaryIDCache *simplelru.LRU[binaryID, languagemodels.Language] + mux *sync.RWMutex detectors []languagemodels.Detector } @@ -64,6 +65,7 @@ func NewLanguageDetector() LanguageDetector { detectors: detectorsWithPrivilege, hostProc: kernel.ProcFSRoot(), binaryIDCache: lru, + mux: &sync.RWMutex{}, } } @@ -78,13 +80,15 @@ func (l *LanguageDetector) DetectWithPrivileges(procs []languagemodels.Process) continue } - if lang, ok := l.binaryIDCache.Get(bin); ok { + l.mux.RLock() + lang, ok := l.binaryIDCache.Get(bin) + l.mux.RUnlock() + if ok { log.Tracef("Pid %v already detected as %v, skipping", proc.GetPid(), lang.Name) languages[i] = lang continue } - var lang languagemodels.Language for _, detector := range l.detectors { var err error lang, err = detector.DetectLanguage(proc) @@ -94,7 +98,9 @@ func (l *LanguageDetector) DetectWithPrivileges(procs []languagemodels.Process) } languages[i] = lang } + l.mux.Lock() l.binaryIDCache.Add(bin, lang) + l.mux.Unlock() } return languages } diff --git a/pkg/logs/internal/decoder/auto_multiline_detection/aggregator.go b/pkg/logs/internal/decoder/auto_multiline_detection/aggregator.go index 789573fbdd692..ead3da8f38604 100644 --- a/pkg/logs/internal/decoder/auto_multiline_detection/aggregator.go +++ b/pkg/logs/internal/decoder/auto_multiline_detection/aggregator.go @@ -11,6 +11,8 @@ import ( "time" "github.com/DataDog/datadog-agent/pkg/logs/message" + status "github.com/DataDog/datadog-agent/pkg/logs/status/utils" + "github.com/DataDog/datadog-agent/pkg/telemetry" ) type bucket struct { @@ -59,39 +61,55 @@ func (b *bucket) flush() *message.Message { copy(content, data) msg := message.NewRawMessage(content, b.message.Status, b.originalDataLen, b.message.ParsingExtra.Timestamp) + tlmTags := []string{} if b.lineCount > 1 { msg.ParsingExtra.IsMultiLine = true + tlmTags = append(tlmTags, "line_type:multi_line") if b.tagMultiLineLogs { msg.ParsingExtra.Tags = append(msg.ParsingExtra.Tags, message.AutoMultiLineTag) } + } else { + tlmTags = append(tlmTags, "line_type:single_line") } if b.truncated { msg.ParsingExtra.IsTruncated = true + tlmTags = append(tlmTags, "truncated:true") if b.tagTruncatedLogs { msg.ParsingExtra.Tags = append(msg.ParsingExtra.Tags, message.TruncatedTag) } } + + telemetry.GetStatsTelemetryProvider().Count("datadog.logs_agent.auto_multi_line_aggregator.flush", 1, tlmTags) return msg } // Aggregator aggregates multiline logs with a given label. type Aggregator struct { - outputFn func(m *message.Message) - bucket *bucket - maxContentSize int - flushTimeout time.Duration - flushTimer *time.Timer + outputFn func(m *message.Message) + bucket *bucket + maxContentSize int + flushTimeout time.Duration + flushTimer *time.Timer + multiLineMatchInfo *status.CountInfo + linesCombinedInfo *status.CountInfo } // NewAggregator creates a new aggregator. -func NewAggregator(outputFn func(m *message.Message), maxContentSize int, flushTimeout time.Duration, tagTruncatedLogs bool, tagMultiLineLogs bool) *Aggregator { +func NewAggregator(outputFn func(m *message.Message), maxContentSize int, flushTimeout time.Duration, tagTruncatedLogs bool, tagMultiLineLogs bool, tailerInfo *status.InfoRegistry) *Aggregator { + multiLineMatchInfo := status.NewCountInfo("MultiLine matches") + linesCombinedInfo := status.NewCountInfo("Lines Combined") + tailerInfo.Register(multiLineMatchInfo) + tailerInfo.Register(linesCombinedInfo) + return &Aggregator{ - outputFn: outputFn, - bucket: &bucket{buffer: bytes.NewBuffer(nil), tagTruncatedLogs: tagTruncatedLogs, tagMultiLineLogs: tagMultiLineLogs}, - maxContentSize: maxContentSize, - flushTimeout: flushTimeout, + outputFn: outputFn, + bucket: &bucket{buffer: bytes.NewBuffer(nil), tagTruncatedLogs: tagTruncatedLogs, tagMultiLineLogs: tagMultiLineLogs}, + maxContentSize: maxContentSize, + flushTimeout: flushTimeout, + multiLineMatchInfo: multiLineMatchInfo, + linesCombinedInfo: linesCombinedInfo, } } @@ -116,6 +134,8 @@ func (a *Aggregator) Aggregate(msg *message.Message, label Label) { // If `startGroup` - flush the bucket. if label == startGroup { + a.multiLineMatchInfo.Add(1) + telemetry.GetStatsTelemetryProvider().Count("datadog.logs_agent.auto_multi_line_aggregator.multiline_matches", 1, []string{""}) a.Flush() } @@ -127,6 +147,11 @@ func (a *Aggregator) Aggregate(msg *message.Message, label Label) { a.bucket.truncate() // Truncate the start of the next bucket } + if !a.bucket.isEmpty() { + a.linesCombinedInfo.Add(1) + telemetry.GetStatsTelemetryProvider().Count("datadog.logs_agent.auto_multi_line_aggregator.lines_combined", 1, []string{""}) + } + a.bucket.add(msg) } diff --git a/pkg/logs/internal/decoder/auto_multiline_detection/aggregator_test.go b/pkg/logs/internal/decoder/auto_multiline_detection/aggregator_test.go index 603ac5ec02cd1..be067716ccb37 100644 --- a/pkg/logs/internal/decoder/auto_multiline_detection/aggregator_test.go +++ b/pkg/logs/internal/decoder/auto_multiline_detection/aggregator_test.go @@ -14,6 +14,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/DataDog/datadog-agent/pkg/logs/message" + status "github.com/DataDog/datadog-agent/pkg/logs/status/utils" ) func makeHandler() (chan *message.Message, func(*message.Message)) { @@ -35,7 +36,7 @@ func assertMessageContent(t *testing.T, m *message.Message, content string) { func TestNoAggregate(t *testing.T) { outputChan, outputFn := makeHandler() - ag := NewAggregator(outputFn, 100, time.Duration(1*time.Second), false, false) + ag := NewAggregator(outputFn, 100, time.Duration(1*time.Second), false, false, status.NewInfoRegistry()) ag.Aggregate(newMessage("1"), noAggregate) ag.Aggregate(newMessage("2"), noAggregate) @@ -49,7 +50,7 @@ func TestNoAggregate(t *testing.T) { func TestNoAggregateEndsGroup(t *testing.T) { outputChan, outputFn := makeHandler() - ag := NewAggregator(outputFn, 100, time.Duration(1*time.Second), false, false) + ag := NewAggregator(outputFn, 100, time.Duration(1*time.Second), false, false, status.NewInfoRegistry()) ag.Aggregate(newMessage("1"), startGroup) ag.Aggregate(newMessage("2"), startGroup) @@ -62,7 +63,7 @@ func TestNoAggregateEndsGroup(t *testing.T) { func TestAggregateGroups(t *testing.T) { outputChan, outputFn := makeHandler() - ag := NewAggregator(outputFn, 100, time.Duration(1*time.Second), false, false) + ag := NewAggregator(outputFn, 100, time.Duration(1*time.Second), false, false, status.NewInfoRegistry()) // Aggregated log ag.Aggregate(newMessage("1"), startGroup) @@ -82,7 +83,7 @@ func TestAggregateGroups(t *testing.T) { func TestAggregateDoesntStartGroup(t *testing.T) { outputChan, outputFn := makeHandler() - ag := NewAggregator(outputFn, 100, time.Duration(1*time.Second), false, false) + ag := NewAggregator(outputFn, 100, time.Duration(1*time.Second), false, false, status.NewInfoRegistry()) ag.Aggregate(newMessage("1"), aggregate) ag.Aggregate(newMessage("2"), aggregate) @@ -95,7 +96,7 @@ func TestAggregateDoesntStartGroup(t *testing.T) { func TestForceFlush(t *testing.T) { outputChan, outputFn := makeHandler() - ag := NewAggregator(outputFn, 100, time.Duration(1*time.Second), false, false) + ag := NewAggregator(outputFn, 100, time.Duration(1*time.Second), false, false, status.NewInfoRegistry()) ag.Aggregate(newMessage("1"), startGroup) ag.Aggregate(newMessage("2"), aggregate) @@ -107,7 +108,7 @@ func TestForceFlush(t *testing.T) { func TestAggregationTimer(t *testing.T) { outputChan, outputFn := makeHandler() - ag := NewAggregator(outputFn, 100, time.Duration(1*time.Second), false, false) + ag := NewAggregator(outputFn, 100, time.Duration(1*time.Second), false, false, status.NewInfoRegistry()) assert.Nil(t, ag.FlushChan()) ag.Aggregate(newMessage("1"), startGroup) @@ -124,7 +125,7 @@ func TestAggregationTimer(t *testing.T) { func TestTagTruncatedLogs(t *testing.T) { outputChan, outputFn := makeHandler() - ag := NewAggregator(outputFn, 10, time.Duration(1*time.Second), true, false) + ag := NewAggregator(outputFn, 10, time.Duration(1*time.Second), true, false, status.NewInfoRegistry()) ag.Aggregate(newMessage("1234567890"), startGroup) ag.Aggregate(newMessage("1"), aggregate) // Causes overflow, truncate and flush @@ -148,7 +149,7 @@ func TestTagTruncatedLogs(t *testing.T) { func TestTagMultiLineLogs(t *testing.T) { outputChan, outputFn := makeHandler() - ag := NewAggregator(outputFn, 10, time.Duration(1*time.Second), false, true) + ag := NewAggregator(outputFn, 10, time.Duration(1*time.Second), false, true, status.NewInfoRegistry()) ag.Aggregate(newMessage("12345"), startGroup) ag.Aggregate(newMessage("67890"), aggregate) diff --git a/pkg/logs/internal/decoder/auto_multiline_detection/json_detector.go b/pkg/logs/internal/decoder/auto_multiline_detection/json_detector.go index b598f618f1494..663db8d2d33f4 100644 --- a/pkg/logs/internal/decoder/auto_multiline_detection/json_detector.go +++ b/pkg/logs/internal/decoder/auto_multiline_detection/json_detector.go @@ -23,6 +23,7 @@ func NewJSONDetector() *JSONDetector { func (j *JSONDetector) ProcessAndContinue(context *messageContext) bool { if jsonRegexp.Match(context.rawMessage) { context.label = noAggregate + context.labelAssignedBy = "JSON_detector" return false } return true diff --git a/pkg/logs/internal/decoder/auto_multiline_detection/labeler.go b/pkg/logs/internal/decoder/auto_multiline_detection/labeler.go index 097bda7410aa7..27867c2703232 100644 --- a/pkg/logs/internal/decoder/auto_multiline_detection/labeler.go +++ b/pkg/logs/internal/decoder/auto_multiline_detection/labeler.go @@ -21,9 +21,10 @@ type messageContext struct { rawMessage []byte // NOTE: tokens can be nil if the heuristic runs before the tokenizer. // Heuristic implementations must check if tokens is nil before using it. - tokens []tokens.Token - tokenIndicies []int - label Label + tokens []tokens.Token + tokenIndicies []int + label Label + labelAssignedBy string } // Heuristic is an interface representing a strategy to label log messages. @@ -37,28 +38,38 @@ type Heuristic interface { // Each Heuristic operates on the output of the previous heuristic - mutating the message context. // A label is chosen when a herusitc signals the labeler to stop or when all herustics have been processed. type Labeler struct { - heuristics []Heuristic + lablerHeuristics []Heuristic + analyticsHeuristics []Heuristic } // NewLabeler creates a new labeler with the given heuristics. -func NewLabeler(heuristics []Heuristic) *Labeler { +// lablerHeuristics are used to mutate the label of a log message. +// analyticsHeuristics are used to analyze the log message and labeling process +// for the status page and telemetry. +func NewLabeler(lablerHeuristics []Heuristic, analyticsHeuristics []Heuristic) *Labeler { return &Labeler{ - heuristics: heuristics, + lablerHeuristics: lablerHeuristics, + analyticsHeuristics: analyticsHeuristics, } } // Label labels a log message. func (l *Labeler) Label(rawMessage []byte) Label { context := &messageContext{ - rawMessage: rawMessage, - tokens: nil, - label: aggregate, + rawMessage: rawMessage, + tokens: nil, + label: aggregate, + labelAssignedBy: "default", } - for _, h := range l.heuristics { + for _, h := range l.lablerHeuristics { if !h.ProcessAndContinue(context) { - return context.label + break } } + // analyticsHeuristics are always run and don't change the final label + for _, h := range l.analyticsHeuristics { + h.ProcessAndContinue(context) + } return context.label } diff --git a/pkg/logs/internal/decoder/auto_multiline_detection/labeler_test.go b/pkg/logs/internal/decoder/auto_multiline_detection/labeler_test.go index 754b795486287..925d2e62caa2e 100644 --- a/pkg/logs/internal/decoder/auto_multiline_detection/labeler_test.go +++ b/pkg/logs/internal/decoder/auto_multiline_detection/labeler_test.go @@ -35,7 +35,7 @@ func TestLabelerProceedNextHeuristic(t *testing.T) { return true }, }, - }) + }, []Heuristic{}) assert.Equal(t, noAggregate, labeler.Label([]byte("test 123"))) } @@ -55,7 +55,7 @@ func TestLabelerProceedFirstHeuristicWins(t *testing.T) { return true }, }, - }) + }, []Heuristic{}) assert.Equal(t, startGroup, labeler.Label([]byte("test 123"))) } @@ -68,7 +68,7 @@ func TestLabelerDefaultLabel(t *testing.T) { return false }, }, - }) + }, []Heuristic{}) assert.Equal(t, aggregate, labeler.Label([]byte("test 123"))) } @@ -82,7 +82,7 @@ func TestLabelerPassesAlongMessageContext(t *testing.T) { return false }, }, - }) + }, []Heuristic{}) labeler.Label([]byte("test 123")) } diff --git a/pkg/logs/internal/decoder/auto_multiline_detection/pattern_table.go b/pkg/logs/internal/decoder/auto_multiline_detection/pattern_table.go index 5675829ad07d9..483821034a4f4 100644 --- a/pkg/logs/internal/decoder/auto_multiline_detection/pattern_table.go +++ b/pkg/logs/internal/decoder/auto_multiline_detection/pattern_table.go @@ -7,23 +7,29 @@ package automultilinedetection import ( + "fmt" + "sync" + "github.com/DataDog/datadog-agent/pkg/logs/internal/decoder/auto_multiline_detection/tokens" + status "github.com/DataDog/datadog-agent/pkg/logs/status/utils" "github.com/DataDog/datadog-agent/pkg/util/log" ) type row struct { - tokens []tokens.Token - label Label - count int64 - lastIndex int64 + tokens []tokens.Token + label Label + labelAssignedBy string + count int64 + lastIndex int64 } // DiagnosticRow is a struct that represents a diagnostic view of a row in the PatternTable. type DiagnosticRow struct { - TokenString string - LabelString string - Count int64 - LastIndex int64 + TokenString string + LabelString string + labelAssignedBy string + Count int64 + LastIndex int64 } // PatternTable is a table of patterns that occur over time from a log source. @@ -34,26 +40,38 @@ type PatternTable struct { index int64 maxTableSize int matchThreshold float64 + + // Pattern table can be queried by the agent status command. + // We must lock access to the table when it is being queried or updated. + lock sync.Mutex } // NewPatternTable returns a new PatternTable heuristic. -func NewPatternTable(maxTableSize int, matchThreshold float64) *PatternTable { - return &PatternTable{ +func NewPatternTable(maxTableSize int, matchThreshold float64, tailerInfo *status.InfoRegistry) *PatternTable { + pt := &PatternTable{ table: make([]*row, 0, maxTableSize), index: 0, maxTableSize: maxTableSize, matchThreshold: matchThreshold, + lock: sync.Mutex{}, } + + tailerInfo.Register(pt) + return pt } // insert adds a pattern to the table and returns the index -func (p *PatternTable) insert(tokens []tokens.Token, label Label) int { +func (p *PatternTable) insert(context *messageContext) int { + p.lock.Lock() + defer p.lock.Unlock() + p.index++ foundIdx := -1 for i, r := range p.table { - if isMatch(r.tokens, tokens, p.matchThreshold) { + if isMatch(r.tokens, context.tokens, p.matchThreshold) { r.count++ - r.label = label + r.label = context.label + r.labelAssignedBy = context.labelAssignedBy r.lastIndex = p.index foundIdx = i break @@ -74,10 +92,11 @@ func (p *PatternTable) insert(tokens []tokens.Token, label Label) int { } p.table = append(p.table, &row{ - tokens: tokens, - label: label, - count: 1, - lastIndex: p.index, + tokens: context.tokens, + label: context.label, + labelAssignedBy: context.labelAssignedBy, + count: 1, + lastIndex: p.index, }) return len(p.table) - 1 @@ -107,13 +126,17 @@ func (p *PatternTable) evictLRU() { // DumpTable returns a slice of DiagnosticRow structs that represent the current state of the table. func (p *PatternTable) DumpTable() []DiagnosticRow { + p.lock.Lock() + defer p.lock.Unlock() + debug := make([]DiagnosticRow, 0, len(p.table)) for _, r := range p.table { debug = append(debug, DiagnosticRow{ - TokenString: tokensToString(r.tokens), - LabelString: labelToString(r.label), - Count: r.count, - LastIndex: r.lastIndex}) + TokenString: tokensToString(r.tokens), + LabelString: labelToString(r.label), + labelAssignedBy: r.labelAssignedBy, + Count: r.count, + LastIndex: r.lastIndex}) } return debug } @@ -128,6 +151,23 @@ func (p *PatternTable) ProcessAndContinue(context *messageContext) bool { return true } - p.insert(context.tokens, context.label) + p.insert(context) return true } + +// Implements the InfoProvider interface +// This data is exposed on the status page + +// InfoKey returns a string representing the key for the pattern table. +func (p *PatternTable) InfoKey() string { + return "Auto multiline pattern stats" +} + +// Info returns a breakdown of the patterns in the table. +func (p *PatternTable) Info() []string { + data := []string{} + for _, r := range p.DumpTable() { + data = append(data, fmt.Sprintf("%-11d %-15s %-20s %s", r.Count, r.LabelString, r.labelAssignedBy, r.TokenString)) + } + return data +} diff --git a/pkg/logs/internal/decoder/auto_multiline_detection/pattern_table_test.go b/pkg/logs/internal/decoder/auto_multiline_detection/pattern_table_test.go index b1115761bc909..cf6919f428628 100644 --- a/pkg/logs/internal/decoder/auto_multiline_detection/pattern_table_test.go +++ b/pkg/logs/internal/decoder/auto_multiline_detection/pattern_table_test.go @@ -11,35 +11,41 @@ import ( "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/pkg/logs/internal/decoder/auto_multiline_detection/tokens" + status "github.com/DataDog/datadog-agent/pkg/logs/status/utils" ) -func justTokens(tokens []tokens.Token, _ []int) []tokens.Token { - return tokens +func makeContext(str string, label Label) *messageContext { + tokenizer := NewTokenizer(0) + ts, _ := tokenizer.tokenize([]byte(str)) + + return &messageContext{ + rawMessage: []byte(str), + tokens: ts, + label: label, + } } func TestPatternTable(t *testing.T) { - tokenizer := NewTokenizer(0) - pt := NewPatternTable(5, 1) + pt := NewPatternTable(5, 1, status.NewInfoRegistry()) - pt.insert(justTokens(tokenizer.tokenize([]byte("abc 123 !"))), aggregate) - pt.insert(justTokens(tokenizer.tokenize([]byte("abc 123 @"))), aggregate) - pt.insert(justTokens(tokenizer.tokenize([]byte("abc 123 $"))), aggregate) - pt.insert(justTokens(tokenizer.tokenize([]byte("abc 123 %"))), aggregate) - pt.insert(justTokens(tokenizer.tokenize([]byte("abc 123 ^"))), aggregate) + pt.insert(makeContext("abc 123 !", aggregate)) + pt.insert(makeContext("abc 123 @", aggregate)) + pt.insert(makeContext("abc 123 $", aggregate)) + pt.insert(makeContext("abc 123 %", aggregate)) + pt.insert(makeContext("abc 123 ^", aggregate)) assert.Equal(t, 5, len(pt.table)) // Add more of the same pattern - should remain at the top and get it's count updated - pt.insert(justTokens(tokenizer.tokenize([]byte("abc 123 !"))), aggregate) - pt.insert(justTokens(tokenizer.tokenize([]byte("abc 123 !"))), aggregate) + pt.insert(makeContext("abc 123 !", aggregate)) + pt.insert(makeContext("abc 123 !", aggregate)) assert.Equal(t, 5, len(pt.table)) assert.Equal(t, int64(3), pt.table[0].count) // At this point `abc 123 @` was the last updated, so it will be evicted first - pt.insert(justTokens(tokenizer.tokenize([]byte("abc 123 *"))), aggregate) + pt.insert(makeContext("abc 123 *", aggregate)) assert.Equal(t, 5, len(pt.table), "Table should not grow past limit") dump := pt.DumpTable() @@ -52,7 +58,7 @@ func TestPatternTable(t *testing.T) { assert.Equal(t, "CCC DDD *", dump[4].TokenString) // Should sift up to position #2 - pt.insert(justTokens(tokenizer.tokenize([]byte("abc 123 *"))), aggregate) + pt.insert(makeContext("abc 123 *", aggregate)) dump = pt.DumpTable() @@ -70,11 +76,11 @@ func TestPatternTable(t *testing.T) { assert.Equal(t, int64(1), dump[4].Count) // Lets pretend the whole log format totally changes for some reason, and evict the whole table. - pt.insert(justTokens(tokenizer.tokenize([]byte("! acb 123"))), startGroup) - pt.insert(justTokens(tokenizer.tokenize([]byte("@ acb 123"))), aggregate) - pt.insert(justTokens(tokenizer.tokenize([]byte("# acb 123"))), noAggregate) - pt.insert(justTokens(tokenizer.tokenize([]byte("$ acb 123"))), aggregate) - pt.insert(justTokens(tokenizer.tokenize([]byte("% acb 123"))), startGroup) + pt.insert(makeContext("! acb 123", startGroup)) + pt.insert(makeContext("@ acb 123", aggregate)) + pt.insert(makeContext("# acb 123", noAggregate)) + pt.insert(makeContext("$ acb 123", aggregate)) + pt.insert(makeContext("% acb 123", startGroup)) dump = pt.DumpTable() diff --git a/pkg/logs/internal/decoder/auto_multiline_detection/timestamp_detector.go b/pkg/logs/internal/decoder/auto_multiline_detection/timestamp_detector.go index 6cae17615f777..3245c31d7ad3b 100644 --- a/pkg/logs/internal/decoder/auto_multiline_detection/timestamp_detector.go +++ b/pkg/logs/internal/decoder/auto_multiline_detection/timestamp_detector.go @@ -36,7 +36,7 @@ var knownTimestampFormats = []string{ "Mar 16 08:12:04", "Jul 1 09:00:55", "2024-10-14T22:11:20+0000", - "2024-07-01T14:59:55.711'+0000'", + "2024-07-01T14:59:55.711", "2024-07-01T14:59:55.711Z", "2024-08-19 12:17:55-0400", "2024-06-26 02:31:29,573", @@ -117,6 +117,7 @@ func (t *TimestampDetector) ProcessAndContinue(context *messageContext) bool { if t.tokenGraph.MatchProbability(context.tokens).probability > t.matchThreshold { context.label = startGroup + context.labelAssignedBy = "timestamp_detector" } return true diff --git a/pkg/logs/internal/decoder/auto_multiline_detection/timestamp_detector_test.go b/pkg/logs/internal/decoder/auto_multiline_detection/timestamp_detector_test.go index 3d10b2705f46b..db8e9832c1961 100644 --- a/pkg/logs/internal/decoder/auto_multiline_detection/timestamp_detector_test.go +++ b/pkg/logs/internal/decoder/auto_multiline_detection/timestamp_detector_test.go @@ -46,6 +46,8 @@ var inputs = []testInput{ {startGroup, "127.0.0.1 - - [16/May/2024:19:49:17 +0000]"}, {startGroup, "127.0.0.1 - - [17/May/2024:13:51:52 +0000] \"GET /probe?debug=1 HTTP/1.1\" 200 0 "}, {startGroup, "nova-api.log.1.2017-05-16_13:53:08 2017-05-16 00:00:00.008 25746 INFO nova.osapi"}, + {startGroup, "foo bar log 2024-11-22'T'10:10:15.455 my log line"}, + {startGroup, "foo bar log 2024-07-01T14:59:55.711'+0000' my log line"}, // A case where the timestamp has a non-matching token in the midddle of it. {startGroup, "acb def 10:10:10 foo 2024-05-15 hijk lmop"}, @@ -69,6 +71,14 @@ var inputs = []testInput{ {aggregate, " auth.handler: auth handler stopped"}, {aggregate, "10:10:10 foo :10: bar 10:10"}, {aggregate, "1234-1234-1234-123-21-1"}, + {aggregate, " = '10.20.30.123' (DEBUG)"}, + {aggregate, "192.168.1.123"}, + {aggregate, "'192.168.1.123'"}, + {aggregate, "10.0.0.123"}, + {aggregate, "\"10.0.0.123\""}, + {aggregate, "2001:0db8:85a3:0000:0000:8a2e:0370:7334"}, + {aggregate, "fd12:3456:789a:1::1"}, + {aggregate, "2001:db8:0:1234::5678"}, } func TestCorrectLabelIsAssigned(t *testing.T) { diff --git a/pkg/logs/internal/decoder/auto_multiline_detection/user_samples.go b/pkg/logs/internal/decoder/auto_multiline_detection/user_samples.go index 49dbf2ca8bbb7..d60a6031ea3db 100644 --- a/pkg/logs/internal/decoder/auto_multiline_detection/user_samples.go +++ b/pkg/logs/internal/decoder/auto_multiline_detection/user_samples.go @@ -102,6 +102,7 @@ func (j *UserSamples) ProcessAndContinue(context *messageContext) bool { for _, sample := range j.samples { if isMatch(sample.tokens, context.tokens, sample.matchThreshold) { context.label = sample.label + context.labelAssignedBy = "user_sample" return false } } diff --git a/pkg/logs/internal/decoder/auto_multiline_handler.go b/pkg/logs/internal/decoder/auto_multiline_handler.go index fa694f97dcf17..bf69b41693415 100644 --- a/pkg/logs/internal/decoder/auto_multiline_handler.go +++ b/pkg/logs/internal/decoder/auto_multiline_handler.go @@ -11,6 +11,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/config" automultilinedetection "github.com/DataDog/datadog-agent/pkg/logs/internal/decoder/auto_multiline_detection" "github.com/DataDog/datadog-agent/pkg/logs/message" + status "github.com/DataDog/datadog-agent/pkg/logs/status/utils" ) // AutoMultilineHandler aggreagates multiline logs. @@ -20,28 +21,39 @@ type AutoMultilineHandler struct { } // NewAutoMultilineHandler creates a new auto multiline handler. -func NewAutoMultilineHandler(outputFn func(m *message.Message), maxContentSize int, flushTimeout time.Duration) *AutoMultilineHandler { +func NewAutoMultilineHandler(outputFn func(m *message.Message), maxContentSize int, flushTimeout time.Duration, tailerInfo *status.InfoRegistry) *AutoMultilineHandler { // Order is important - heuristics := []automultilinedetection.Heuristic{ - automultilinedetection.NewJSONDetector(), - automultilinedetection.NewTokenizer(config.Datadog().GetInt("logs_config.auto_multi_line.tokenizer_max_input_bytes")), - automultilinedetection.NewUserSamples(config.Datadog()), - automultilinedetection.NewTimestampDetector(config.Datadog().GetFloat64("logs_config.auto_multi_line.timestamp_detector_match_threshold")), - automultilinedetection.NewPatternTable( - config.Datadog().GetInt("logs_config.auto_multi_line.pattern_table_max_size"), - config.Datadog().GetFloat64("logs_config.auto_multi_line.pattern_table_match_threshold"), - ), + heuristics := []automultilinedetection.Heuristic{} + + heuristics = append(heuristics, automultilinedetection.NewTokenizer(config.Datadog().GetInt("logs_config.auto_multi_line.tokenizer_max_input_bytes"))) + + if config.Datadog().GetBool("logs_config.auto_multi_line.enable_json_detection") { + heuristics = append(heuristics, automultilinedetection.NewJSONDetector()) + } + + heuristics = append(heuristics, automultilinedetection.NewUserSamples(config.Datadog())) + + if config.Datadog().GetBool("logs_config.auto_multi_line.enable_datetime_detection") { + heuristics = append(heuristics, automultilinedetection.NewTimestampDetector( + config.Datadog().GetFloat64("logs_config.auto_multi_line.timestamp_detector_match_threshold"))) + } + + analyticsHeuristics := []automultilinedetection.Heuristic{automultilinedetection.NewPatternTable( + config.Datadog().GetInt("logs_config.auto_multi_line.pattern_table_max_size"), + config.Datadog().GetFloat64("logs_config.auto_multi_line.pattern_table_match_threshold"), + tailerInfo), } return &AutoMultilineHandler{ - labeler: automultilinedetection.NewLabeler(heuristics), + labeler: automultilinedetection.NewLabeler(heuristics, analyticsHeuristics), aggregator: automultilinedetection.NewAggregator( outputFn, maxContentSize, flushTimeout, config.Datadog().GetBool("logs_config.tag_truncated_logs"), - config.Datadog().GetBool("logs_config.tag_auto_multi_line_logs")), + config.Datadog().GetBool("logs_config.tag_auto_multi_line_logs"), + tailerInfo), } } diff --git a/pkg/logs/internal/decoder/decoder.go b/pkg/logs/internal/decoder/decoder.go index d3fceec44caa5..468cab5f81bee 100644 --- a/pkg/logs/internal/decoder/decoder.go +++ b/pkg/logs/internal/decoder/decoder.go @@ -98,8 +98,9 @@ func NewDecoderWithFraming(source *sources.ReplaceableSource, parser parsers.Par } } if lineHandler == nil { - if pkgConfig.Datadog().GetBool("logs_config.experimental_auto_multi_line_detection") { - lineHandler = NewAutoMultilineHandler(outputFn, maxContentSize, config.AggregationTimeout(pkgConfig.Datadog())) + if source.Config().ExperimentalAutoMultiLineEnabled(pkgConfig.Datadog()) { + log.Infof("Experimental Auto multi line log detection enabled") + lineHandler = NewAutoMultilineHandler(outputFn, maxContentSize, config.AggregationTimeout(pkgConfig.Datadog()), tailerInfo) } else if source.Config().AutoMultiLineEnabled(pkgConfig.Datadog()) { log.Infof("Auto multi line log detection enabled") diff --git a/pkg/logs/internal/tag/provider.go b/pkg/logs/internal/tag/provider.go index 934bfeee5d717..b26b34bad479f 100644 --- a/pkg/logs/internal/tag/provider.go +++ b/pkg/logs/internal/tag/provider.go @@ -11,7 +11,6 @@ import ( "github.com/benbjohnson/clock" - "github.com/DataDog/datadog-agent/comp/core/tagger" "github.com/DataDog/datadog-agent/comp/core/tagger/types" "github.com/DataDog/datadog-agent/comp/logs/agent/config" pkgConfig "github.com/DataDog/datadog-agent/pkg/config" @@ -23,27 +22,34 @@ type Provider interface { GetTags() []string } +// EntityTagAdder returns the associated tag for an entity and their cardinality +type EntityTagAdder interface { + Tag(entity string, cardinality types.TagCardinality) ([]string, error) +} + // provider provides a list of up-to-date tags for a given entity by calling the tagger. type provider struct { entityID string taggerWarmupDuration time.Duration localTagProvider Provider clock clock.Clock + tagAdder EntityTagAdder sync.Once } // NewProvider returns a new Provider. -func NewProvider(entityID string) Provider { - return newProviderWithClock(entityID, clock.New()) +func NewProvider(entityID string, tagAdder EntityTagAdder) Provider { + return newProviderWithClock(entityID, clock.New(), tagAdder) } // newProviderWithClock returns a new provider using the given clock. -func newProviderWithClock(entityID string, clock clock.Clock) Provider { +func newProviderWithClock(entityID string, clock clock.Clock, tagAdder EntityTagAdder) Provider { p := &provider{ entityID: entityID, taggerWarmupDuration: config.TaggerWarmupDuration(pkgConfig.Datadog()), localTagProvider: newLocalProviderWithClock([]string{}, clock), clock: clock, + tagAdder: tagAdder, } return p @@ -58,7 +64,7 @@ func (p *provider) GetTags() []string { p.clock.Sleep(p.taggerWarmupDuration) }) - tags, err := tagger.Tag(p.entityID, types.HighCardinality) + tags, err := p.tagAdder.Tag(p.entityID, types.HighCardinality) if err != nil { log.Warnf("Cannot tag container %s: %v", p.entityID, err) return []string{} diff --git a/pkg/logs/internal/tag/provider_benchmark_test.go b/pkg/logs/internal/tag/provider_benchmark_test.go index cdaf8071e7ee9..2ebc319fd9785 100644 --- a/pkg/logs/internal/tag/provider_benchmark_test.go +++ b/pkg/logs/internal/tag/provider_benchmark_test.go @@ -9,6 +9,7 @@ import ( "testing" "time" + "github.com/DataDog/datadog-agent/comp/core/tagger/types" "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" model "github.com/DataDog/datadog-agent/pkg/config/model" @@ -25,6 +26,12 @@ func setupConfig(t testing.TB, tags []string) (model.Config, time.Time) { return mockConfig, startTime } +type dummyTagAdder struct{} + +func (dummyTagAdder) Tag(string, types.TagCardinality) ([]string, error) { + return nil, nil +} + func BenchmarkProviderExpectedTags(b *testing.B) { b.ReportAllocs() @@ -39,7 +46,7 @@ func BenchmarkProviderExpectedTags(b *testing.B) { m.SetWithoutSource("logs_config.expected_tags_duration", "1m") defer m.SetWithoutSource("logs_config.expected_tags_duration", 0) - p := NewProvider("foo") + p := NewProvider("foo", dummyTagAdder{}) for i := 0; i < b.N; i++ { p.GetTags() @@ -62,7 +69,7 @@ func BenchmarkProviderExpectedTagsEmptySlice(b *testing.B) { m.SetWithoutSource("logs_config.expected_tags_duration", "1m") defer m.SetWithoutSource("logs_config.expected_tags_duration", 0) - p := NewProvider("foo") + p := NewProvider("foo", dummyTagAdder{}) for i := 0; i < b.N; i++ { p.GetTags() @@ -85,7 +92,7 @@ func BenchmarkProviderExpectedTagsNil(b *testing.B) { m.SetWithoutSource("logs_config.expected_tags_duration", "1m") defer m.SetWithoutSource("logs_config.expected_tags_duration", 0) - p := NewProvider("foo") + p := NewProvider("foo", dummyTagAdder{}) for i := 0; i < b.N; i++ { p.GetTags() @@ -105,7 +112,7 @@ func BenchmarkProviderNoExpectedTags(b *testing.B) { // Setting a test-friendly value for the deadline (test should not take 1m) m.SetWithoutSource("logs_config.expected_tags_duration", "0") - p := NewProvider("foo") + p := NewProvider("foo", dummyTagAdder{}) for i := 0; i < b.N; i++ { p.GetTags() @@ -125,7 +132,7 @@ func BenchmarkProviderNoExpectedTagsNil(b *testing.B) { // Setting a test-friendly value for the deadline (test should not take 1m) m.SetWithoutSource("logs_config.expected_tags_duration", "0") - p := NewProvider("foo") + p := NewProvider("foo", dummyTagAdder{}) for i := 0; i < b.N; i++ { p.GetTags() diff --git a/pkg/logs/internal/tag/provider_test.go b/pkg/logs/internal/tag/provider_test.go index b41619fea579d..13786ec561b6d 100644 --- a/pkg/logs/internal/tag/provider_test.go +++ b/pkg/logs/internal/tag/provider_test.go @@ -10,10 +10,11 @@ import ( "testing" "time" - "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl" "github.com/benbjohnson/clock" "github.com/stretchr/testify/require" + "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl" + coreConfig "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" ) @@ -40,7 +41,7 @@ func TestProviderExpectedTags(t *testing.T) { m.SetWithoutSource("logs_config.expected_tags_duration", "5s") defer m.SetWithoutSource("logs_config.expected_tags_duration", 0) - p := newProviderWithClock("foo", clock) + p := newProviderWithClock("foo", clock, fakeTagger) pp := p.(*provider) var tt []string diff --git a/pkg/logs/internal/util/service_name.go b/pkg/logs/internal/util/service_name.go index b027c3feb9000..e6c506f4c99dc 100644 --- a/pkg/logs/internal/util/service_name.go +++ b/pkg/logs/internal/util/service_name.go @@ -8,16 +8,12 @@ package util import ( "strings" - "github.com/DataDog/datadog-agent/comp/core/tagger" "github.com/DataDog/datadog-agent/pkg/util/log" ) -// taggerFunc purpose is to ease testing ServiceNameFromTags -var taggerFunc = tagger.StandardTags - // ServiceNameFromTags returns the standard tag 'service' corresponding to a container // It returns an empty string if tag not found -func ServiceNameFromTags(ctrName, taggerEntity string) string { +func ServiceNameFromTags(ctrName, taggerEntity string, taggerFunc func(entity string) ([]string, error)) string { standardTags, err := taggerFunc(taggerEntity) if err != nil { log.Debugf("Couldn't get standard tags for container '%s': %v", ctrName, err) diff --git a/pkg/logs/internal/util/service_name_test.go b/pkg/logs/internal/util/service_name_test.go index 44de8126366c1..32af9e7af105d 100644 --- a/pkg/logs/internal/util/service_name_test.go +++ b/pkg/logs/internal/util/service_name_test.go @@ -48,8 +48,7 @@ func TestServiceNameFromTags(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - taggerFunc = tt.tFunc - if got := ServiceNameFromTags(tt.ctrName, tt.taggerEntity); got != tt.want { + if got := ServiceNameFromTags(tt.ctrName, tt.taggerEntity, tt.tFunc); got != tt.want { t.Errorf("ServiceNameFromTags() = %v, want %v", got, tt.want) } }) diff --git a/pkg/logs/launchers/container/launcher.go b/pkg/logs/launchers/container/launcher.go index e3ad958983288..8f143f25ddc63 100644 --- a/pkg/logs/launchers/container/launcher.go +++ b/pkg/logs/launchers/container/launcher.go @@ -11,6 +11,7 @@ package container import ( "context" + "github.com/DataDog/datadog-agent/comp/core/tagger" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/logs/auditor" "github.com/DataDog/datadog-agent/pkg/logs/launchers" @@ -60,14 +61,17 @@ type Launcher struct { tailers map[*sourcesPkg.LogSource]tailerfactory.Tailer wmeta optional.Option[workloadmeta.Component] + + tagger tagger.Component } // NewLauncher returns a new launcher -func NewLauncher(sources *sourcesPkg.LogSources, wmeta optional.Option[workloadmeta.Component]) *Launcher { +func NewLauncher(sources *sourcesPkg.LogSources, wmeta optional.Option[workloadmeta.Component], tagger tagger.Component) *Launcher { launcher := &Launcher{ sources: sources, tailers: make(map[*sourcesPkg.LogSource]tailerfactory.Tailer), wmeta: wmeta, + tagger: tagger, } return launcher } @@ -81,7 +85,7 @@ func (l *Launcher) Start(sourceProvider launchers.SourceProvider, pipelineProvid l.cancel = cancel l.stopped = make(chan struct{}) - l.tailerFactory = tailerfactory.New(l.sources, pipelineProvider, registry, l.wmeta) + l.tailerFactory = tailerfactory.New(l.sources, pipelineProvider, registry, l.wmeta, l.tagger) go l.run(ctx, sourceProvider) } diff --git a/pkg/logs/launchers/container/launcher_nodocker.go b/pkg/logs/launchers/container/launcher_nodocker.go index 3832af18605b1..7240c621d19e0 100644 --- a/pkg/logs/launchers/container/launcher_nodocker.go +++ b/pkg/logs/launchers/container/launcher_nodocker.go @@ -9,6 +9,7 @@ package container import ( + "github.com/DataDog/datadog-agent/comp/core/tagger" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/logs/auditor" "github.com/DataDog/datadog-agent/pkg/logs/launchers" @@ -24,7 +25,7 @@ import ( type Launcher struct{} // NewLauncher returns a new launcher -func NewLauncher(_ *sourcesPkg.LogSources, _ optional.Option[workloadmeta.Component]) *Launcher { +func NewLauncher(_ *sourcesPkg.LogSources, _ optional.Option[workloadmeta.Component], _ tagger.Component) *Launcher { return &Launcher{} } diff --git a/pkg/logs/launchers/container/launcher_test.go b/pkg/logs/launchers/container/launcher_test.go index 04a6c0340fa67..fb31bdc69bd25 100644 --- a/pkg/logs/launchers/container/launcher_test.go +++ b/pkg/logs/launchers/container/launcher_test.go @@ -14,6 +14,7 @@ import ( "github.com/stretchr/testify/require" + "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/comp/logs/agent/config" "github.com/DataDog/datadog-agent/pkg/logs/auditor" @@ -36,7 +37,9 @@ func (tf *testFactory) MakeTailer(source *sources.LogSource) (tailerfactory.Tail } func TestStartStop(t *testing.T) { - l := NewLauncher(nil, optional.NewNoneOption[workloadmeta.Component]()) + fakeTagger := taggerimpl.SetupFakeTagger(t) + defer fakeTagger.ResetTagger() + l := NewLauncher(nil, optional.NewNoneOption[workloadmeta.Component](), fakeTagger) sp := launchers.NewMockSourceProvider() pl := pipeline.NewMockProvider() @@ -54,7 +57,9 @@ func TestStartStop(t *testing.T) { } func TestAddsRemovesSource(t *testing.T) { - l := NewLauncher(nil, optional.NewNoneOption[workloadmeta.Component]()) + fakeTagger := taggerimpl.SetupFakeTagger(t) + defer fakeTagger.ResetTagger() + l := NewLauncher(nil, optional.NewNoneOption[workloadmeta.Component](), fakeTagger) l.tailerFactory = &testFactory{ makeTailer: func(source *sources.LogSource) (tailerfactory.Tailer, error) { return &tailerfactory.TestTailer{Name: source.Name}, nil @@ -83,7 +88,9 @@ func TestAddsRemovesSource(t *testing.T) { } func TestCannotMakeTailer(t *testing.T) { - l := NewLauncher(nil, optional.NewNoneOption[workloadmeta.Component]()) + fakeTagger := taggerimpl.SetupFakeTagger(t) + defer fakeTagger.ResetTagger() + l := NewLauncher(nil, optional.NewNoneOption[workloadmeta.Component](), fakeTagger) l.tailerFactory = &testFactory{ makeTailer: func(_ *sources.LogSource) (tailerfactory.Tailer, error) { return nil, errors.New("uhoh") @@ -104,7 +111,9 @@ func TestCannotMakeTailer(t *testing.T) { } func TestCannotStartTailer(t *testing.T) { - l := NewLauncher(nil, optional.NewNoneOption[workloadmeta.Component]()) + fakeTagger := taggerimpl.SetupFakeTagger(t) + defer fakeTagger.ResetTagger() + l := NewLauncher(nil, optional.NewNoneOption[workloadmeta.Component](), fakeTagger) l.tailerFactory = &testFactory{ makeTailer: func(source *sources.LogSource) (tailerfactory.Tailer, error) { return &tailerfactory.TestTailer{Name: source.Name, StartError: true}, nil diff --git a/pkg/logs/launchers/container/tailerfactory/defaults.go b/pkg/logs/launchers/container/tailerfactory/defaults.go index e41fd5fc778e6..b1efaca063171 100644 --- a/pkg/logs/launchers/container/tailerfactory/defaults.go +++ b/pkg/logs/launchers/container/tailerfactory/defaults.go @@ -10,11 +10,11 @@ package tailerfactory import ( "errors" + "github.com/DataDog/datadog-agent/comp/core/tagger/types" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/logs/internal/util" "github.com/DataDog/datadog-agent/pkg/logs/internal/util/containersorpods" "github.com/DataDog/datadog-agent/pkg/logs/sources" - "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -33,7 +33,9 @@ func (tf *factory) defaultSourceAndService(source *sources.LogSource, logWhat co getServiceNameFromTags := func(containerID, containerName string) string { return util.ServiceNameFromTags( containerName, - containers.BuildTaggerEntityName(containerID)) + types.NewEntityID(types.ContainerID, containerID).String(), + tf.tagger.Standard, + ) } return defaultSourceAndServiceInner(source, logWhat, diff --git a/pkg/logs/launchers/container/tailerfactory/factory.go b/pkg/logs/launchers/container/tailerfactory/factory.go index a3b2108461458..07bce46bf10be 100644 --- a/pkg/logs/launchers/container/tailerfactory/factory.go +++ b/pkg/logs/launchers/container/tailerfactory/factory.go @@ -10,6 +10,7 @@ package tailerfactory import ( + "github.com/DataDog/datadog-agent/comp/core/tagger" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/logs/auditor" "github.com/DataDog/datadog-agent/pkg/logs/internal/util/containersorpods" @@ -50,18 +51,21 @@ type factory struct { // dockerutil memoizes a DockerUtil instance; fetch this with getDockerUtil(). dockerutil *dockerutilPkg.DockerUtil + + tagger tagger.Component } var _ Factory = (*factory)(nil) // New creates a new Factory. -func New(sources *sources.LogSources, pipelineProvider pipeline.Provider, registry auditor.Registry, workloadmetaStore optional.Option[workloadmeta.Component]) Factory { +func New(sources *sources.LogSources, pipelineProvider pipeline.Provider, registry auditor.Registry, workloadmetaStore optional.Option[workloadmeta.Component], tagger tagger.Component) Factory { return &factory{ sources: sources, pipelineProvider: pipelineProvider, registry: registry, workloadmetaStore: workloadmetaStore, cop: containersorpods.NewChooser(), + tagger: tagger, } } diff --git a/pkg/logs/launchers/container/tailerfactory/socket.go b/pkg/logs/launchers/container/tailerfactory/socket.go index 4cbf781a1e42a..714be2815d44c 100644 --- a/pkg/logs/launchers/container/tailerfactory/socket.go +++ b/pkg/logs/launchers/container/tailerfactory/socket.go @@ -55,5 +55,6 @@ func (tf *factory) makeSocketTailer(source *sources.LogSource) (Tailer, error) { pipeline, readTimeout, tf.registry, + tf.tagger, ), nil } diff --git a/pkg/logs/launchers/container/tailerfactory/tailers/socket.go b/pkg/logs/launchers/container/tailerfactory/tailers/socket.go index cb151f62bc8e5..88e8a313db7f2 100644 --- a/pkg/logs/launchers/container/tailerfactory/tailers/socket.go +++ b/pkg/logs/launchers/container/tailerfactory/tailers/socket.go @@ -19,6 +19,7 @@ import ( dockerutilPkg "github.com/DataDog/datadog-agent/pkg/util/docker" "github.com/DataDog/datadog-agent/pkg/util/log" + "github.com/DataDog/datadog-agent/comp/core/tagger" "github.com/DataDog/datadog-agent/comp/logs/agent/config" ) @@ -42,6 +43,7 @@ type DockerSocketTailer struct { source *sources.LogSource pipeline chan *message.Message readTimeout time.Duration + tagger tagger.Component // registry is used to calculate `since` registry auditor.Registry @@ -57,7 +59,7 @@ type DockerSocketTailer struct { } // NewDockerSocketTailer Creates a new docker socket tailer -func NewDockerSocketTailer(dockerutil *dockerutilPkg.DockerUtil, containerID string, source *sources.LogSource, pipeline chan *message.Message, readTimeout time.Duration, registry auditor.Registry) *DockerSocketTailer { +func NewDockerSocketTailer(dockerutil *dockerutilPkg.DockerUtil, containerID string, source *sources.LogSource, pipeline chan *message.Message, readTimeout time.Duration, registry auditor.Registry, tagger tagger.Component) *DockerSocketTailer { return &DockerSocketTailer{ dockerutil: dockerutil, ContainerID: containerID, @@ -65,6 +67,7 @@ func NewDockerSocketTailer(dockerutil *dockerutilPkg.DockerUtil, containerID str pipeline: pipeline, readTimeout: readTimeout, registry: registry, + tagger: tagger, ctx: nil, cancel: nil, stopped: nil, @@ -81,7 +84,9 @@ func (t *DockerSocketTailer) tryStartTailer() (*dockerTailerPkg.Tailer, chan str t.source, t.pipeline, erroredContainerID, - t.readTimeout) + t.readTimeout, + t.tagger, + ) since, err := since(t.registry, inner.Identifier()) if err != nil { log.Warnf("Could not recover tailing from last committed offset %v: %v", diff --git a/pkg/logs/launchers/file/launcher.go b/pkg/logs/launchers/file/launcher.go index 3eef1ab04f637..e0c44ca20e6e1 100644 --- a/pkg/logs/launchers/file/launcher.go +++ b/pkg/logs/launchers/file/launcher.go @@ -13,6 +13,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util" "github.com/DataDog/datadog-agent/pkg/util/log" + "github.com/DataDog/datadog-agent/comp/core/tagger" "github.com/DataDog/datadog-agent/comp/logs/agent/config" flareController "github.com/DataDog/datadog-agent/comp/logs/agent/flare" "github.com/DataDog/datadog-agent/pkg/logs/auditor" @@ -52,10 +53,11 @@ type Launcher struct { validatePodContainerID bool scanPeriod time.Duration flarecontroller *flareController.FlareController + tagger tagger.Component } // NewLauncher returns a new launcher. -func NewLauncher(tailingLimit int, tailerSleepDuration time.Duration, validatePodContainerID bool, scanPeriod time.Duration, wildcardMode string, flarecontroller *flareController.FlareController) *Launcher { +func NewLauncher(tailingLimit int, tailerSleepDuration time.Duration, validatePodContainerID bool, scanPeriod time.Duration, wildcardMode string, flarecontroller *flareController.FlareController, tagger tagger.Component) *Launcher { var wildcardStrategy fileprovider.WildcardSelectionStrategy switch wildcardMode { @@ -79,6 +81,7 @@ func NewLauncher(tailingLimit int, tailerSleepDuration time.Duration, validatePo validatePodContainerID: validatePodContainerID, scanPeriod: scanPeriod, flarecontroller: flarecontroller, + tagger: tagger, } } @@ -386,6 +389,7 @@ func (s *Launcher) createTailer(file *tailer.File, outputChan chan *message.Mess SleepDuration: s.tailerSleepDuration, Decoder: decoder.NewDecoderFromSource(file.Source, tailerInfo), Info: tailerInfo, + TagAdder: s.tagger, } return tailer.NewTailer(tailerOptions) @@ -393,7 +397,7 @@ func (s *Launcher) createTailer(file *tailer.File, outputChan chan *message.Mess func (s *Launcher) createRotatedTailer(t *tailer.Tailer, file *tailer.File, pattern *regexp.Regexp) *tailer.Tailer { tailerInfo := t.GetInfo() - return t.NewRotatedTailer(file, decoder.NewDecoderFromSourceWithPattern(file.Source, pattern, tailerInfo), tailerInfo) + return t.NewRotatedTailer(file, decoder.NewDecoderFromSourceWithPattern(file.Source, pattern, tailerInfo), tailerInfo, s.tagger) } //nolint:revive // TODO(AML) Fix revive linter diff --git a/pkg/logs/launchers/file/launcher_test.go b/pkg/logs/launchers/file/launcher_test.go index ed436171346e2..96c44e8031568 100644 --- a/pkg/logs/launchers/file/launcher_test.go +++ b/pkg/logs/launchers/file/launcher_test.go @@ -16,6 +16,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" + "github.com/DataDog/datadog-agent/comp/core/tagger" + "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl" "github.com/DataDog/datadog-agent/comp/logs/agent/config" flareController "github.com/DataDog/datadog-agent/comp/logs/agent/flare" pkgConfig "github.com/DataDog/datadog-agent/pkg/config" @@ -49,11 +51,13 @@ type LauncherTestSuite struct { source *sources.LogSource openFilesLimit int s *Launcher + tagger tagger.Mock } func (suite *LauncherTestSuite) SetupTest() { suite.pipelineProvider = mock.NewMockProvider() suite.outputChan = suite.pipelineProvider.NextPipelineChan() + suite.tagger = taggerimpl.SetupFakeTagger(suite.T()) var err error suite.testDir = suite.T().TempDir() @@ -72,7 +76,7 @@ func (suite *LauncherTestSuite) SetupTest() { suite.source = sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Identifier: suite.configID, Path: suite.testPath}) sleepDuration := 20 * time.Millisecond fc := flareController.NewFlareController() - suite.s = NewLauncher(suite.openFilesLimit, sleepDuration, false, 10*time.Second, "by_name", fc) + suite.s = NewLauncher(suite.openFilesLimit, sleepDuration, false, 10*time.Second, "by_name", fc, suite.tagger) suite.s.pipelineProvider = suite.pipelineProvider suite.s.registry = auditor.NewRegistry() suite.s.activeSources = append(suite.s.activeSources, suite.source) @@ -85,6 +89,7 @@ func (suite *LauncherTestSuite) TearDownTest() { suite.testFile.Close() suite.testRotatedFile.Close() suite.s.cleanup() + suite.tagger.ResetTagger() } func (suite *LauncherTestSuite) TestLauncherStartsTailers() { @@ -219,6 +224,8 @@ func TestLauncherTestSuiteWithConfigID(t *testing.T) { func TestLauncherScanStartNewTailer(t *testing.T) { var path string var msg *message.Message + fakeTagger := taggerimpl.SetupFakeTagger(t) + defer fakeTagger.ResetTagger() IDs := []string{"", "123456789"} @@ -230,7 +237,7 @@ func TestLauncherScanStartNewTailer(t *testing.T) { openFilesLimit := 2 sleepDuration := 20 * time.Millisecond fc := flareController.NewFlareController() - launcher := NewLauncher(openFilesLimit, sleepDuration, false, 10*time.Second, "by_name", fc) + launcher := NewLauncher(openFilesLimit, sleepDuration, false, 10*time.Second, "by_name", fc, fakeTagger) launcher.pipelineProvider = mock.NewMockProvider() launcher.registry = auditor.NewRegistry() outputChan := launcher.pipelineProvider.NextPipelineChan() @@ -264,12 +271,14 @@ func TestLauncherScanStartNewTailer(t *testing.T) { func TestLauncherWithConcurrentContainerTailer(t *testing.T) { testDir := t.TempDir() path := fmt.Sprintf("%s/container.log", testDir) + fakeTagger := taggerimpl.SetupFakeTagger(t) + defer fakeTagger.ResetTagger() // create launcher openFilesLimit := 3 sleepDuration := 20 * time.Millisecond fc := flareController.NewFlareController() - launcher := NewLauncher(openFilesLimit, sleepDuration, false, 10*time.Second, "by_name", fc) + launcher := NewLauncher(openFilesLimit, sleepDuration, false, 10*time.Second, "by_name", fc, fakeTagger) launcher.pipelineProvider = mock.NewMockProvider() launcher.registry = auditor.NewRegistry() outputChan := launcher.pipelineProvider.NextPipelineChan() @@ -312,12 +321,14 @@ func TestLauncherWithConcurrentContainerTailer(t *testing.T) { func TestLauncherTailFromTheBeginning(t *testing.T) { testDir := t.TempDir() + fakeTagger := taggerimpl.SetupFakeTagger(t) + defer fakeTagger.ResetTagger() // create launcher openFilesLimit := 3 sleepDuration := 20 * time.Millisecond fc := flareController.NewFlareController() - launcher := NewLauncher(openFilesLimit, sleepDuration, false, 10*time.Second, "by_name", fc) + launcher := NewLauncher(openFilesLimit, sleepDuration, false, 10*time.Second, "by_name", fc, fakeTagger) launcher.pipelineProvider = mock.NewMockProvider() launcher.registry = auditor.NewRegistry() outputChan := launcher.pipelineProvider.NextPipelineChan() @@ -362,6 +373,8 @@ func TestLauncherTailFromTheBeginning(t *testing.T) { func TestLauncherSetTail(t *testing.T) { testDir := t.TempDir() + fakeTagger := taggerimpl.SetupFakeTagger(t) + defer fakeTagger.ResetTagger() path1 := fmt.Sprintf("%s/test.log", testDir) path2 := fmt.Sprintf("%s/test2.log", testDir) @@ -370,7 +383,7 @@ func TestLauncherSetTail(t *testing.T) { openFilesLimit := 2 sleepDuration := 20 * time.Millisecond fc := flareController.NewFlareController() - launcher := NewLauncher(openFilesLimit, sleepDuration, false, 10*time.Second, "by_name", fc) + launcher := NewLauncher(openFilesLimit, sleepDuration, false, 10*time.Second, "by_name", fc, fakeTagger) launcher.pipelineProvider = mock.NewMockProvider() launcher.registry = auditor.NewRegistry() @@ -388,13 +401,15 @@ func TestLauncherSetTail(t *testing.T) { func TestLauncherConfigIdentifier(t *testing.T) { testDir := t.TempDir() + fakeTagger := taggerimpl.SetupFakeTagger(t) + defer fakeTagger.ResetTagger() path := fmt.Sprintf("%s/test.log", testDir) os.Create(path) openFilesLimit := 2 sleepDuration := 20 * time.Millisecond fc := flareController.NewFlareController() - launcher := NewLauncher(openFilesLimit, sleepDuration, false, 10*time.Second, "by_name", fc) + launcher := NewLauncher(openFilesLimit, sleepDuration, false, 10*time.Second, "by_name", fc, fakeTagger) launcher.pipelineProvider = mock.NewMockProvider() launcher.registry = auditor.NewRegistry() @@ -412,6 +427,8 @@ func TestLauncherScanWithTooManyFiles(t *testing.T) { var path string testDir := t.TempDir() + fakeTagger := taggerimpl.SetupFakeTagger(t) + defer fakeTagger.ResetTagger() // creates files path = fmt.Sprintf("%s/1.log", testDir) @@ -431,7 +448,7 @@ func TestLauncherScanWithTooManyFiles(t *testing.T) { openFilesLimit := 2 sleepDuration := 20 * time.Millisecond fc := flareController.NewFlareController() - launcher := NewLauncher(openFilesLimit, sleepDuration, false, 10*time.Second, "by_name", fc) + launcher := NewLauncher(openFilesLimit, sleepDuration, false, 10*time.Second, "by_name", fc, fakeTagger) launcher.pipelineProvider = mock.NewMockProvider() launcher.registry = auditor.NewRegistry() source := sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Path: path}) @@ -453,15 +470,16 @@ func TestLauncherScanWithTooManyFiles(t *testing.T) { } func TestLauncherUpdatesSourceForExistingTailer(t *testing.T) { - testDir := t.TempDir() + fakeTagger := taggerimpl.SetupFakeTagger(t) + defer fakeTagger.ResetTagger() path := fmt.Sprintf("%s/*.log", testDir) os.Create(path) openFilesLimit := 2 sleepDuration := 20 * time.Millisecond fc := flareController.NewFlareController() - launcher := NewLauncher(openFilesLimit, sleepDuration, false, 10*time.Second, "by_name", fc) + launcher := NewLauncher(openFilesLimit, sleepDuration, false, 10*time.Second, "by_name", fc, fakeTagger) launcher.pipelineProvider = mock.NewMockProvider() launcher.registry = auditor.NewRegistry() @@ -504,6 +522,8 @@ func TestLauncherScanRecentFilesWithRemoval(t *testing.T) { err = os.Remove(path(name)) assert.Nil(t, err) } + fakeTagger := taggerimpl.SetupFakeTagger(t) + defer fakeTagger.ResetTagger() createLauncher := func() *Launcher { sleepDuration := 20 * time.Millisecond @@ -516,6 +536,7 @@ func TestLauncherScanRecentFilesWithRemoval(t *testing.T) { validatePodContainerID: false, scanPeriod: 10 * time.Second, flarecontroller: flareController.NewFlareController(), + tagger: fakeTagger, } launcher.pipelineProvider = mock.NewMockProvider() launcher.registry = auditor.NewRegistry() @@ -557,6 +578,8 @@ func TestLauncherScanRecentFilesWithNewFiles(t *testing.T) { testDir := t.TempDir() baseTime := time.Date(2010, time.August, 10, 25, 0, 0, 0, time.UTC) openFilesLimit := 2 + fakeTagger := taggerimpl.SetupFakeTagger(t) + defer fakeTagger.ResetTagger() path := func(name string) string { return fmt.Sprintf("%s/%s", testDir, name) @@ -572,7 +595,7 @@ func TestLauncherScanRecentFilesWithNewFiles(t *testing.T) { createLauncher := func() *Launcher { sleepDuration := 20 * time.Millisecond fc := flareController.NewFlareController() - launcher := NewLauncher(openFilesLimit, sleepDuration, false, 10*time.Second, "by_modification_time", fc) + launcher := NewLauncher(openFilesLimit, sleepDuration, false, 10*time.Second, "by_modification_time", fc, fakeTagger) launcher.pipelineProvider = mock.NewMockProvider() launcher.registry = auditor.NewRegistry() logDirectory := fmt.Sprintf("%s/*.log", testDir) @@ -621,6 +644,8 @@ func TestLauncherFileRotation(t *testing.T) { testDir := t.TempDir() openFilesLimit := 2 + fakeTagger := taggerimpl.SetupFakeTagger(t) + defer fakeTagger.ResetTagger() path := func(name string) string { return fmt.Sprintf("%s/%s", testDir, name) @@ -633,7 +658,7 @@ func TestLauncherFileRotation(t *testing.T) { createLauncher := func() *Launcher { sleepDuration := 20 * time.Millisecond fc := flareController.NewFlareController() - launcher := NewLauncher(openFilesLimit, sleepDuration, false, 10*time.Second, "by_name", fc) + launcher := NewLauncher(openFilesLimit, sleepDuration, false, 10*time.Second, "by_name", fc, fakeTagger) launcher.pipelineProvider = mock.NewMockProvider() launcher.registry = auditor.NewRegistry() logDirectory := fmt.Sprintf("%s/*.log", testDir) @@ -686,6 +711,8 @@ func TestLauncherFileDetectionSingleScan(t *testing.T) { testDir := t.TempDir() openFilesLimit := 2 + fakeTagger := taggerimpl.SetupFakeTagger(t) + defer fakeTagger.ResetTagger() path := func(name string) string { return fmt.Sprintf("%s/%s", testDir, name) @@ -698,7 +725,7 @@ func TestLauncherFileDetectionSingleScan(t *testing.T) { createLauncher := func() *Launcher { sleepDuration := 20 * time.Millisecond fc := flareController.NewFlareController() - launcher := NewLauncher(openFilesLimit, sleepDuration, false, 10*time.Second, "by_name", fc) + launcher := NewLauncher(openFilesLimit, sleepDuration, false, 10*time.Second, "by_name", fc, fakeTagger) launcher.pipelineProvider = mock.NewMockProvider() launcher.registry = auditor.NewRegistry() logDirectory := fmt.Sprintf("%s/*.log", testDir) diff --git a/pkg/logs/tailers/docker/tailer.go b/pkg/logs/tailers/docker/tailer.go index 082821e41fc82..be25cdc72b521 100644 --- a/pkg/logs/tailers/docker/tailer.go +++ b/pkg/logs/tailers/docker/tailer.go @@ -15,6 +15,8 @@ import ( "sync" "time" + "github.com/DataDog/datadog-agent/comp/core/tagger" + "github.com/DataDog/datadog-agent/comp/core/tagger/types" "github.com/DataDog/datadog-agent/comp/logs/agent/config" "github.com/DataDog/datadog-agent/pkg/logs/internal/decoder" "github.com/DataDog/datadog-agent/pkg/logs/internal/framer" @@ -23,7 +25,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/logs/message" "github.com/DataDog/datadog-agent/pkg/logs/sources" status "github.com/DataDog/datadog-agent/pkg/logs/status/utils" - "github.com/DataDog/datadog-agent/pkg/util/containers" dockerutil "github.com/DataDog/datadog-agent/pkg/util/docker" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -79,13 +80,13 @@ type Tailer struct { } // NewTailer returns a new Tailer -func NewTailer(cli *dockerutil.DockerUtil, containerID string, source *sources.LogSource, outputChan chan *message.Message, erroredContainerID chan string, readTimeout time.Duration) *Tailer { +func NewTailer(cli *dockerutil.DockerUtil, containerID string, source *sources.LogSource, outputChan chan *message.Message, erroredContainerID chan string, readTimeout time.Duration, tagger tagger.Component) *Tailer { return &Tailer{ ContainerID: containerID, outputChan: outputChan, decoder: decoder.NewDecoderWithFraming(sources.NewReplaceableSource(source), dockerstream.New(containerID), framer.DockerStream, nil, status.NewInfoRegistry()), Source: source, - tagProvider: tag.NewProvider(containers.BuildTaggerEntityName(containerID)), + tagProvider: tag.NewProvider(types.NewEntityID(types.ContainerID, containerID).String(), tagger), dockerutil: cli, readTimeout: readTimeout, sleepDuration: defaultSleepDuration, diff --git a/pkg/logs/tailers/file/tailer.go b/pkg/logs/tailers/file/tailer.go index b2dacd33ed8ab..9f76f70df132e 100644 --- a/pkg/logs/tailers/file/tailer.go +++ b/pkg/logs/tailers/file/tailer.go @@ -19,8 +19,8 @@ import ( "github.com/benbjohnson/clock" + "github.com/DataDog/datadog-agent/comp/core/tagger/types" coreConfig "github.com/DataDog/datadog-agent/pkg/config" - "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/logs/internal/decoder" @@ -128,6 +128,7 @@ type TailerOptions struct { Decoder *decoder.Decoder // Required Info *status.InfoRegistry // Required Rotated bool // Optional + TagAdder tag.EntityTagAdder // Required } // NewTailer returns an initialized Tailer, read to be started. @@ -142,7 +143,7 @@ type TailerOptions struct { func NewTailer(opts *TailerOptions) *Tailer { var tagProvider tag.Provider if opts.File.Source.Config().Identifier != "" { - tagProvider = tag.NewProvider(containers.BuildTaggerEntityName(opts.File.Source.Config().Identifier)) + tagProvider = tag.NewProvider(types.NewEntityID(types.ContainerID, opts.File.Source.Config().Identifier).String(), opts.TagAdder) } else { tagProvider = tag.NewLocalProvider([]string{}) } @@ -198,7 +199,7 @@ func addToTailerInfo(k, m string, tailerInfo *status.InfoRegistry) { // NewRotatedTailer creates a new tailer that replaces this one, writing // messages to the same channel but using an updated file and decoder. -func (t *Tailer) NewRotatedTailer(file *File, decoder *decoder.Decoder, info *status.InfoRegistry) *Tailer { +func (t *Tailer) NewRotatedTailer(file *File, decoder *decoder.Decoder, info *status.InfoRegistry, tagAdder tag.EntityTagAdder) *Tailer { options := &TailerOptions{ OutputChan: t.outputChan, File: file, @@ -206,6 +207,7 @@ func (t *Tailer) NewRotatedTailer(file *File, decoder *decoder.Decoder, info *st Decoder: decoder, Info: info, Rotated: true, + TagAdder: tagAdder, } return NewTailer(options) diff --git a/pkg/logs/tailers/journald/docker.go b/pkg/logs/tailers/journald/docker.go index d3a8a9bb7bc3b..0c80df85cf435 100644 --- a/pkg/logs/tailers/journald/docker.go +++ b/pkg/logs/tailers/journald/docker.go @@ -13,7 +13,6 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger" "github.com/DataDog/datadog-agent/comp/core/tagger/types" - "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -35,7 +34,7 @@ func (t *Tailer) getContainerID(entry *sdjournal.JournalEntry) string { // getContainerTags returns all the tags of a given container. func (t *Tailer) getContainerTags(containerID string) []string { - tags, err := tagger.Tag(containers.BuildTaggerEntityName(containerID), types.HighCardinality) + tags, err := tagger.Tag(types.NewEntityID(types.ContainerID, containerID).String(), types.HighCardinality) if err != nil { log.Warn(err) } diff --git a/pkg/network/buffer.go b/pkg/network/buffer.go index f86a091a0a5d7..1f0ef66176117 100644 --- a/pkg/network/buffer.go +++ b/pkg/network/buffer.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//nolint:revive // TODO(NET) Fix revive linter +// Package network contains logic and types for network tracing package network // ConnectionBuffer encapsulates a resizing buffer for ConnectionStat objects diff --git a/pkg/network/config/config.go b/pkg/network/config/config.go index a5f4c8997c7e3..06dc994e0da48 100644 --- a/pkg/network/config/config.go +++ b/pkg/network/config/config.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//nolint:revive // TODO(NET) Fix revive linter +// Package config implements network tracing configuration package config import ( diff --git a/pkg/network/config/replace_rules.go b/pkg/network/config/replace_rules.go index 483139606b74c..093ba772a0f35 100644 --- a/pkg/network/config/replace_rules.go +++ b/pkg/network/config/replace_rules.go @@ -10,8 +10,6 @@ import ( "fmt" "regexp" - "github.com/DataDog/datadog-agent/pkg/config" - //nolint:revive // TODO(NET) Fix revive linter ddconfig "github.com/DataDog/datadog-agent/pkg/config" ) @@ -28,7 +26,7 @@ type ReplaceRule struct { } func parseReplaceRules(cfg ddconfig.Config, key string) ([]*ReplaceRule, error) { - if !config.SystemProbe().IsSet(key) { + if !ddconfig.SystemProbe().IsSet(key) { return nil, nil } diff --git a/pkg/network/config/sysctl/sysctl.go b/pkg/network/config/sysctl/sysctl.go index 98603605bbe82..d89c0e10d4aed 100644 --- a/pkg/network/config/sysctl/sysctl.go +++ b/pkg/network/config/sysctl/sysctl.go @@ -5,7 +5,7 @@ //go:build linux -//nolint:revive // TODO(NET) Fix revive linter +// Package sysctl implements reading of system parameters such as system limits package sysctl import ( diff --git a/pkg/network/config/sysctl/sysctl_darwin.go b/pkg/network/config/sysctl/sysctl_darwin.go index f1696da1f9674..0bb07c812997b 100644 --- a/pkg/network/config/sysctl/sysctl_darwin.go +++ b/pkg/network/config/sysctl/sysctl_darwin.go @@ -5,7 +5,7 @@ //go:build darwin -//nolint:revive // TODO(NET) Fix revive linter +// Package sysctl implements reading of system parameters such as system limits package sysctl import ( diff --git a/pkg/network/dns/types.go b/pkg/network/dns/types.go index e14b36d676484..281ac2b597005 100644 --- a/pkg/network/dns/types.go +++ b/pkg/network/dns/types.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//nolint:revive // TODO(NET) Fix revive linter +// Package dns traces DNS activity and caches DNS lookups for reverse DNS capability package dns import ( diff --git a/pkg/network/ebpf/bpf_module.go b/pkg/network/ebpf/bpf_module.go index a0fd795f2b251..9a49a8ac9d008 100644 --- a/pkg/network/ebpf/bpf_module.go +++ b/pkg/network/ebpf/bpf_module.go @@ -21,7 +21,6 @@ import ( var prebuiltModulesInUse = map[string]struct{}{} var telemetrymu sync.Mutex -//nolint:revive // TODO(NET) Fix revive linter func ModuleFileName(moduleName string, debug bool) string { if debug { return fmt.Sprintf("%s-debug.o", moduleName) @@ -67,7 +66,6 @@ func ReadOffsetBPFModule(bpfDir string, debug bool) (bytecode.AssetReader, error return readModule(bpfDir, "offset-guess", debug) } -//nolint:revive // TODO(NET) Fix revive linter func ReadFentryTracerModule(bpfDir string, debug bool) (bytecode.AssetReader, error) { return readModule(bpfDir, "tracer-fentry", debug) } @@ -77,7 +75,6 @@ func ReadConntrackBPFModule(bpfDir string, debug bool) (bytecode.AssetReader, er return readModule(bpfDir, "conntrack", debug) } -//nolint:revive // TODO(NET) Fix revive linter func GetModulesInUse() []string { telemetrymu.Lock() defer telemetrymu.Unlock() diff --git a/pkg/network/ebpf/conntrack.go b/pkg/network/ebpf/conntrack.go index c97f7cae18117..1a5798ae0d771 100644 --- a/pkg/network/ebpf/conntrack.go +++ b/pkg/network/ebpf/conntrack.go @@ -5,7 +5,7 @@ //go:build linux -//nolint:revive // TODO(NET) Fix revive linter +// Package ebpf implements tracing network events with eBPF package ebpf import ( diff --git a/pkg/network/encoding/marshal/dns.go b/pkg/network/encoding/marshal/dns.go index 4fd644db711ce..0cab4bae202fd 100644 --- a/pkg/network/encoding/marshal/dns.go +++ b/pkg/network/encoding/marshal/dns.go @@ -3,7 +3,6 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//nolint:revive // TODO(NET) Fix revive linter package marshal import ( diff --git a/pkg/network/event_common.go b/pkg/network/event_common.go index 3c5c4f9769741..6908b02965048 100644 --- a/pkg/network/event_common.go +++ b/pkg/network/event_common.go @@ -226,7 +226,9 @@ func (s StatCounters) IsZero() bool { return s == StatCounters{} } -//nolint:revive // TODO(NET) Fix revive linter +// StatCookie A 64-bit hash designed to uniquely identify a connection. +// In eBPF this is 32 bits but it gets re-hashed to 64 bits in userspace to +// reduce collisions; see PR #17197 for more info. type StatCookie = uint64 // ConnectionStats stores statistics for a single connection. Field order in the struct should be 8-byte aligned diff --git a/pkg/network/events/monitor.go b/pkg/network/events/monitor.go index 9a5cbe17c2476..e06ddee483ea8 100644 --- a/pkg/network/events/monitor.go +++ b/pkg/network/events/monitor.go @@ -70,7 +70,7 @@ func Initialized() bool { return theMonitor.Load() != nil } -//nolint:revive // TODO(NET) Fix revive linter +// ProcessEventHandler represents a handler function triggered for process events type ProcessEventHandler interface { HandleProcessEvent(*Process) } diff --git a/pkg/network/events/network_consumer.go b/pkg/network/events/network_consumer.go index 95129db2c27ed..e71cbc6e6758e 100644 --- a/pkg/network/events/network_consumer.go +++ b/pkg/network/events/network_consumer.go @@ -15,12 +15,12 @@ import ( // NetworkConsumer describes a process monitoring object type NetworkConsumer struct{} -//nolint:revive // TODO(NET) Fix revive linter +// Start starts the event consumer (noop) func (n *NetworkConsumer) Start() error { return nil } -//nolint:revive // TODO(NET) Fix revive linter +// Stop stops the event consumer (noop) func (n *NetworkConsumer) Stop() { } diff --git a/pkg/network/events/network_consumer_others.go b/pkg/network/events/network_consumer_others.go index c1ece0fdbaa82..32b2e80c93c71 100644 --- a/pkg/network/events/network_consumer_others.go +++ b/pkg/network/events/network_consumer_others.go @@ -17,12 +17,12 @@ import ( // NetworkConsumer describes a process monitoring object type NetworkConsumer struct{} -//nolint:revive // TODO(NET) Fix revive linter +// Start starts the event consumer (noop) func (n *NetworkConsumer) Start() error { return fmt.Errorf("network consumer is only supported on linux") } -//nolint:revive // TODO(NET) Fix revive linter +// Stop stops the event consumer (noop) func (n *NetworkConsumer) Stop() {} // ID returns id for process monitor diff --git a/pkg/network/netlink/circuit_breaker.go b/pkg/network/netlink/circuit_breaker.go index b4d1b6cdd0c36..8cd62122c25d7 100644 --- a/pkg/network/netlink/circuit_breaker.go +++ b/pkg/network/netlink/circuit_breaker.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//nolint:revive // TODO(NET) Fix revive linter +// Package netlink implements network connection tracking. package netlink import ( diff --git a/pkg/network/netlink/conntracker_test.go b/pkg/network/netlink/conntracker_test.go index c0dffccab8573..c5b8d28d60a31 100644 --- a/pkg/network/netlink/conntracker_test.go +++ b/pkg/network/netlink/conntracker_test.go @@ -196,9 +196,7 @@ func TestTooManyEntries(t *testing.T) { // Run this test with -memprofile to get an insight of how much memory is // allocated/used by Conntracker to store maxStateSize entries. // Example: go test -run TestConntrackerMemoryAllocation -memprofile mem.prof . -// -//nolint:revive // TODO(NET) Fix revive linter -func TestConntrackerMemoryAllocation(t *testing.T) { +func TestConntrackerMemoryAllocation(_t *testing.T) { rt := newConntracker(10000) ipGen := randomIPGen() diff --git a/pkg/network/netlink/consumer_test.go b/pkg/network/netlink/consumer_test.go index e2e7fb05e6151..a8e2243436bc4 100644 --- a/pkg/network/netlink/consumer_test.go +++ b/pkg/network/netlink/consumer_test.go @@ -47,7 +47,6 @@ func TestConsumerKeepsRunningAfterCircuitBreakerTrip(t *testing.T) { go func() { defer close(exited) - //nolint:revive // TODO(NET) Fix revive linter for range ev { } }() diff --git a/pkg/network/netlink/noop.go b/pkg/network/netlink/noop.go index 840043ff950c5..67ce4912168a4 100644 --- a/pkg/network/netlink/noop.go +++ b/pkg/network/netlink/noop.go @@ -25,35 +25,26 @@ func NewNoOpConntracker() Conntracker { // GetType returns a string describing whether the conntracker is "ebpf" or "netlink" func (*noOpConntracker) GetType() string { return "" } -//nolint:revive // TODO(NET) Fix revive linter -func (*noOpConntracker) GetTranslationForConn(c *network.ConnectionStats) *network.IPTranslation { +func (*noOpConntracker) GetTranslationForConn(_c *network.ConnectionStats) *network.IPTranslation { return nil } -//nolint:revive // TODO(NET) Fix revive linter -func (*noOpConntracker) DeleteTranslation(c *network.ConnectionStats) { +func (*noOpConntracker) DeleteTranslation(_c *network.ConnectionStats) { } -//nolint:revive // TODO(NET) Fix revive linter func (*noOpConntracker) IsSampling() bool { return false } -//nolint:revive // TODO(NET) Fix revive linter func (*noOpConntracker) Close() {} -//nolint:revive // TODO(NET) Fix revive linter -func (c *noOpConntracker) DumpCachedTable(ctx context.Context) (map[uint32][]DebugConntrackEntry, error) { +func (*noOpConntracker) DumpCachedTable(_ctx context.Context) (map[uint32][]DebugConntrackEntry, error) { return nil, nil } // Describe returns all descriptions of the collector -// -//nolint:revive // TODO(NET) Fix revive linter -func (*noOpConntracker) Describe(ch chan<- *prometheus.Desc) {} +func (*noOpConntracker) Describe(_ch chan<- *prometheus.Desc) {} // Collect returns the current state of all metrics of the collector -// -//nolint:revive // TODO(NET) Fix revive linter -func (*noOpConntracker) Collect(ch chan<- prometheus.Metric) {} +func (*noOpConntracker) Collect(_ch chan<- prometheus.Metric) {} diff --git a/pkg/network/netlink/socket.go b/pkg/network/netlink/socket.go index 0a18bb571e77f..d05ce1c98aea1 100644 --- a/pkg/network/netlink/socket.go +++ b/pkg/network/netlink/socket.go @@ -111,23 +111,21 @@ func NewSocket(netNS netns.NsHandle) (*Socket, error) { } // fixMsg updates the fields of m using the logic specified in Send. -func (c *Socket) fixMsg(m *netlink.Message, ml int) { +func (s *Socket) fixMsg(m *netlink.Message, ml int) { if m.Header.Length == 0 { m.Header.Length = uint32(nlmsgAlign(ml)) } if m.Header.Sequence == 0 { - m.Header.Sequence = c.seq.Add(1) + m.Header.Sequence = s.seq.Add(1) } if m.Header.PID == 0 { - m.Header.PID = c.pid + m.Header.PID = s.pid } } // Send a netlink.Message -// -//nolint:revive // TODO(NET) Fix revive linter func (s *Socket) Send(m netlink.Message) error { s.fixMsg(&m, nlmsgLength(len(m.Data))) b, err := m.MarshalBinary() @@ -151,16 +149,12 @@ func (s *Socket) Send(m netlink.Message) error { } // Receive is not implemented. See ReceiveInto -// -//nolint:revive // TODO(NET) Fix revive linter func (s *Socket) Receive() ([]netlink.Message, error) { return nil, errNotImplemented } // ReceiveAndDiscard reads netlink messages off the socket & discards them. // If the NLMSG_DONE flag is found in one of the messages, returns true. -// -//nolint:revive // TODO(NET) Fix revive linter func (s *Socket) ReceiveAndDiscard() (bool, uint32, error) { for { n, _, err := s.recvmsg() @@ -205,8 +199,6 @@ func (s *Socket) ReceiveAndDiscard() (bool, uint32, error) { } // ReceiveInto reads one or more netlink.Messages off the socket -// -//nolint:revive // TODO(NET) Fix revive linter func (s *Socket) ReceiveInto(b []byte) ([]netlink.Message, uint32, error) { var netns uint32 n, oobn, err := s.recvmsg() @@ -282,29 +274,21 @@ func parseNetNS(scms []unix.SocketControlMessage) uint32 { } // File descriptor of the socket -// -//nolint:revive // TODO(NET) Fix revive linter func (s *Socket) File() *os.File { return s.fd } // Close the socket -// -//nolint:revive // TODO(NET) Fix revive linter func (s *Socket) Close() error { return s.fd.Close() } // SendMessages isn't implemented in our case -// -//nolint:revive // TODO(NET) Fix revive linter -func (s *Socket) SendMessages(m []netlink.Message) error { +func (s *Socket) SendMessages(_m []netlink.Message) error { return errNotImplemented } // JoinGroup creates a new group membership -// -//nolint:revive // TODO(NET) Fix revive linter func (s *Socket) JoinGroup(group uint32) error { return os.NewSyscallError("setsockopt", s.SetSockoptInt( unix.SOL_NETLINK, @@ -314,8 +298,6 @@ func (s *Socket) JoinGroup(group uint32) error { } // LeaveGroup deletes a group membership -// -//nolint:revive // TODO(NET) Fix revive linter func (s *Socket) LeaveGroup(group uint32) error { return os.NewSyscallError("setsockopt", s.SetSockoptInt( unix.SOL_NETLINK, @@ -325,8 +307,6 @@ func (s *Socket) LeaveGroup(group uint32) error { } // SetSockoptInt sets a socket option -// -//nolint:revive // TODO(NET) Fix revive linter func (s *Socket) SetSockoptInt(level, opt, value int) error { // Value must be in range of a C integer. if value < math.MinInt32 || value > math.MaxInt32 { @@ -346,8 +326,6 @@ func (s *Socket) SetSockoptInt(level, opt, value int) error { } // GetSockoptInt gets a socket option -// -//nolint:revive // TODO(NET) Fix revive linter func (s *Socket) GetSockoptInt(level, opt int) (int, error) { var err error var v int @@ -363,8 +341,6 @@ func (s *Socket) GetSockoptInt(level, opt int) (int, error) { } // SetBPF attaches an assembled BPF program to the socket -// -//nolint:revive // TODO(NET) Fix revive linter func (s *Socket) SetBPF(filter []bpf.RawInstruction) error { prog := unix.SockFprog{ Len: uint16(len(filter)), @@ -382,7 +358,6 @@ func (s *Socket) SetBPF(filter []bpf.RawInstruction) error { return err } -//nolint:revive // TODO(NET) Fix revive linter func (s *Socket) recvmsg() (int, int, error) { ctrlErr := s.conn.Read(s.rawread) if ctrlErr != nil { @@ -391,7 +366,6 @@ func (s *Socket) recvmsg() (int, int, error) { return s.n, s.oobn, s.readErr } -//nolint:revive // TODO(NET) Fix revive linter func (s *Socket) rawread(fd uintptr) bool { s.n, s.oobn, _, s.readErr = noallocRecvmsg(int(fd), s.recvbuf, s.oobbuf, unix.MSG_DONTWAIT) return ready(s.readErr) diff --git a/pkg/network/netlink/testutil/conntrack.go b/pkg/network/netlink/testutil/conntrack.go index 1a6714c18f768..016a2e1805b26 100644 --- a/pkg/network/netlink/testutil/conntrack.go +++ b/pkg/network/netlink/testutil/conntrack.go @@ -5,7 +5,7 @@ //go:build linux -//nolint:revive // TODO(NET) Fix revive linter +// Package testutil contains helper functions used for testing conntrack package testutil import ( @@ -98,7 +98,7 @@ func SetupDNAT6(t *testing.T) { teardownDNAT6(t, ifName, linkName) }) - nettestutil.Ip6tablesSave(t) + nettestutil.IP6tablesSave(t) cmds := []string{ fmt.Sprintf("ip link add %s type dummy", linkName), fmt.Sprintf("ip address add fd00::1 dev %s", linkName), @@ -177,8 +177,7 @@ func SetupVeth6Pair(t *testing.T) (ns string) { return } -//nolint:revive // TODO(NET) Fix revive linter -func teardownVeth6Pair(t *testing.T, ns string) { +func teardownVeth6Pair(t *testing.T, _ns string) { cmds := []string{ "ip link del veth1", } @@ -236,7 +235,7 @@ func SetupCrossNsDNAT6(t *testing.T) (ns string) { }) ns = SetupVeth6Pair(t) - nettestutil.Ip6tablesSave(t) + nettestutil.IP6tablesSave(t) cmds := []string{ "ip6tables -I INPUT 1 -m conntrack --ctstate NEW,RELATED,ESTABLISHED -j ACCEPT", fmt.Sprintf("ip netns exec %s ip6tables -A PREROUTING -t nat -p tcp --dport 80 -j REDIRECT --to-port 8080", ns), diff --git a/pkg/network/nettop/main.go b/pkg/network/nettop/main.go index 5cf90f8387e6e..ad28dda34b871 100644 --- a/pkg/network/nettop/main.go +++ b/pkg/network/nettop/main.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//nolint:revive // TODO(NET) Fix revive linter +// Package main - single file executable package main import ( diff --git a/pkg/network/protocols/http/tls_counter.go b/pkg/network/protocols/http/tls_counter.go index bd9461bc73f3d..b6fcccf69bec6 100644 --- a/pkg/network/protocols/http/tls_counter.go +++ b/pkg/network/protocols/http/tls_counter.go @@ -16,7 +16,7 @@ import ( type TLSCounter struct { counterPlain *libtelemetry.Counter counterGnuTLS *libtelemetry.Counter - counterOpenSLL *libtelemetry.Counter + counterOpenSSL *libtelemetry.Counter counterJavaTLS *libtelemetry.Counter counterGoTLS *libtelemetry.Counter counterIstioTLS *libtelemetry.Counter @@ -29,7 +29,7 @@ func NewTLSCounter(metricGroup *libtelemetry.MetricGroup, metricName string, tag // tls_library:none is a must, as prometheus metrics must have the same cardinality of tags counterPlain: metricGroup.NewCounter(metricName, append(tags, "encrypted:false", "tls_library:none")...), counterGnuTLS: metricGroup.NewCounter(metricName, append(tags, "encrypted:true", "tls_library:gnutls")...), - counterOpenSLL: metricGroup.NewCounter(metricName, append(tags, "encrypted:true", "tls_library:openssl")...), + counterOpenSSL: metricGroup.NewCounter(metricName, append(tags, "encrypted:true", "tls_library:openssl")...), counterJavaTLS: metricGroup.NewCounter(metricName, append(tags, "encrypted:true", "tls_library:java")...), counterGoTLS: metricGroup.NewCounter(metricName, append(tags, "encrypted:true", "tls_library:go")...), counterIstioTLS: metricGroup.NewCounter(metricName, append(tags, "encrypted:true", "tls_library:istio")...), diff --git a/pkg/network/protocols/http/tls_counter_linux.go b/pkg/network/protocols/http/tls_counter_linux.go index 46745f641b8b4..d217d692b4750 100644 --- a/pkg/network/protocols/http/tls_counter_linux.go +++ b/pkg/network/protocols/http/tls_counter_linux.go @@ -13,7 +13,7 @@ func (t *TLSCounter) Add(tx Transaction) { case GnuTLS: t.counterGnuTLS.Add(1) case OpenSSL: - t.counterOpenSLL.Add(1) + t.counterOpenSSL.Add(1) case Java: t.counterJavaTLS.Add(1) case Go: diff --git a/pkg/network/tags_linux.go b/pkg/network/tags_linux.go index 45c351c8a1f85..53f350ea8e96e 100644 --- a/pkg/network/tags_linux.go +++ b/pkg/network/tags_linux.go @@ -12,17 +12,17 @@ import ( ) const ( - //nolint:revive // TODO(NET) Fix revive linter + // ConnTagGnuTLS is the tag for GnuTLS connections ConnTagGnuTLS = http.GnuTLS - //nolint:revive // TODO(NET) Fix revive linter + // ConnTagOpenSSL is the tag for OpenSSL connections ConnTagOpenSSL = http.OpenSSL - //nolint:revive // TODO(NET) Fix revive linter + // ConnTagGo is the tag for GO TLS connections ConnTagGo = http.Go - //nolint:revive // TODO(NET) Fix revive linter + // ConnTagJava is the tag for Java TLS connections ConnTagJava = http.Java - //nolint:revive // TODO(NET) Fix revive linter + // ConnTagTLS is the tag for TLS connections in general ConnTagTLS = http.TLS - //nolint:revive // TODO(NET) Fix revive linter + // ConnTagIstio is the tag for Istio TLS connections ConnTagIstio = http.Istio // ConnTagNodeJS is the tag for NodeJS TLS connections ConnTagNodeJS = http.NodeJS diff --git a/pkg/network/testutil/cmd.go b/pkg/network/testutil/cmd.go index f6154ecbd342e..f7411eb606aa4 100644 --- a/pkg/network/testutil/cmd.go +++ b/pkg/network/testutil/cmd.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//nolint:revive // TODO(NET) Fix revive linter +// Package testutil contains helper functions used in network tests package testutil import ( @@ -59,7 +59,8 @@ func RunCommandWithContext(ctx context.Context, cmd string) (string, error) { return string(out), nil } -//nolint:revive // TODO(NET) Fix revive linter +// StartCommandCtx Runs a command with the given context, returning a handle to the command +// and its input stream. func StartCommandCtx(ctx context.Context, cmd string) (*exec.Cmd, io.WriteCloser, error) { args := strings.Split(cmd, " ") c := exec.CommandContext(ctx, args[0], args[1:]...) diff --git a/pkg/network/testutil/iptables.go b/pkg/network/testutil/iptables.go index 4ee65c2d3e5e2..f48a890641fe1 100644 --- a/pkg/network/testutil/iptables.go +++ b/pkg/network/testutil/iptables.go @@ -59,11 +59,9 @@ func IptablesRestore(tb testing.TB, state []byte) { assert.NoError(tb, restoreErr) } -// Ip6tablesSave saves the current iptables state to a file +// IP6tablesSave saves the current iptables state to a file // and returns its path -// -//nolint:revive // TODO(NET) Fix revive linter -func Ip6tablesSave(tb testing.TB) { +func IP6tablesSave(tb testing.TB) { cmd := exec.Command("ip6tables-save") state, err := cmd.Output() require.NoError(tb, err) @@ -79,14 +77,12 @@ func Ip6tablesSave(tb testing.TB) { require.NoError(tb, err) fullState := append(state, natState...) tb.Cleanup(func() { - Ip6tablesRestore(tb, fullState) + IP6tablesRestore(tb, fullState) }) } -// Ip6tablesRestore restores iptables state from a file -// -//nolint:revive // TODO(NET) Fix revive linter -func Ip6tablesRestore(tb testing.TB, state []byte) { +// IP6tablesRestore restores iptables state from a file +func IP6tablesRestore(tb testing.TB, state []byte) { cmd := exec.Command("ip6tables-restore", "--counters") cmd.Stdin = bytes.NewReader(state) assert.NoError(tb, cmd.Run()) diff --git a/pkg/network/tracer/connection/dump.go b/pkg/network/tracer/connection/dump.go index 5629b3c86791a..101ab0d084cb6 100644 --- a/pkg/network/tracer/connection/dump.go +++ b/pkg/network/tracer/connection/dump.go @@ -5,7 +5,6 @@ //go:build linux_bpf -//nolint:revive // TODO(NET) Fix revive linter package connection import ( @@ -23,7 +22,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/log" ) -//nolint:revive // TODO(NET) Fix revive linter func dumpMapsHandler(w io.Writer, manager *manager.Manager, mapName string, currentMap *ebpf.Map) { switch mapName { diff --git a/pkg/network/tracer/connection/ebpf_tracer.go b/pkg/network/tracer/connection/ebpf_tracer.go index 0d6ad30bf5b81..e46d8dd242161 100644 --- a/pkg/network/tracer/connection/ebpf_tracer.go +++ b/pkg/network/tracer/connection/ebpf_tracer.go @@ -46,21 +46,14 @@ const ( connTracerModuleName = "network_tracer__ebpf" ) -//nolint:revive // TODO(NET) Fix revive linter var EbpfTracerTelemetry = struct { connections telemetry.Gauge tcpFailedConnects *prometheus.Desc - //nolint:revive // TODO(NET) Fix revive linter TcpSentMiscounts *prometheus.Desc - //nolint:revive // TODO(NET) Fix revive linter unbatchedTcpClose *prometheus.Desc - //nolint:revive // TODO(NET) Fix revive linter unbatchedUdpClose *prometheus.Desc - //nolint:revive // TODO(NET) Fix revive linter UdpSendsProcessed *prometheus.Desc - //nolint:revive // TODO(NET) Fix revive linter UdpSendsMissed *prometheus.Desc - //nolint:revive // TODO(NET) Fix revive linter UdpDroppedConns *prometheus.Desc // doubleFlushAttemptsClose is a counter measuring the number of attempts to flush a closed connection twice from tcp_close doubleFlushAttemptsClose *prometheus.Desc @@ -74,19 +67,12 @@ var EbpfTracerTelemetry = struct { iterationDups telemetry.Counter iterationAborts telemetry.Counter - //nolint:revive // TODO(NET) Fix revive linter lastTcpFailedConnects *atomic.Int64 - //nolint:revive // TODO(NET) Fix revive linter LastTcpSentMiscounts *atomic.Int64 - //nolint:revive // TODO(NET) Fix revive linter lastUnbatchedTcpClose *atomic.Int64 - //nolint:revive // TODO(NET) Fix revive linter lastUnbatchedUdpClose *atomic.Int64 - //nolint:revive // TODO(NET) Fix revive linter lastUdpSendsProcessed *atomic.Int64 - //nolint:revive // TODO(NET) Fix revive linter lastUdpSendsMissed *atomic.Int64 - //nolint:revive // TODO(NET) Fix revive linter lastUdpDroppedConns *atomic.Int64 // lastDoubleFlushAttemptsClose is a counter measuring the diff between the last two values of doubleFlushAttemptsClose lastDoubleFlushAttemptsClose *atomic.Int64 @@ -204,7 +190,6 @@ func newEbpfTracer(config *config.Config, _ telemetryComponent.Component) (Trace } var m *manager.Manager - //nolint:revive // TODO(NET) Fix revive linter var tracerType TracerType = TracerTypeFentry var closeTracerFn func() m, closeTracerFn, err := fentry.LoadTracer(config, mgrOptions, connCloseEventHandler) diff --git a/pkg/network/tracer/connection/fentry/manager.go b/pkg/network/tracer/connection/fentry/manager.go index 79022c28371b2..5bf409712b057 100644 --- a/pkg/network/tracer/connection/fentry/manager.go +++ b/pkg/network/tracer/connection/fentry/manager.go @@ -5,7 +5,6 @@ //go:build linux_bpf -//nolint:revive // TODO(NET) Fix revive linter package fentry import ( diff --git a/pkg/network/tracer/connection/fentry/probes.go b/pkg/network/tracer/connection/fentry/probes.go index 4fa6ecc383384..76109f5ab8ffb 100644 --- a/pkg/network/tracer/connection/fentry/probes.go +++ b/pkg/network/tracer/connection/fentry/probes.go @@ -46,9 +46,7 @@ const ( udpSendSkb = "kprobe__udp_send_skb" skbFreeDatagramLocked = "skb_free_datagram_locked" - //nolint:revive // TODO(NET) Fix revive linter __skbFreeDatagramLocked = "__skb_free_datagram_locked" - //nolint:revive // TODO(NET) Fix revive linter skbConsumeUdp = "skb_consume_udp" udpv6RecvMsg = "udpv6_recvmsg" diff --git a/pkg/network/tracer/connection/fentry/tracer.go b/pkg/network/tracer/connection/fentry/tracer.go index 1bf3276e18fce..2a8eb802de103 100644 --- a/pkg/network/tracer/connection/fentry/tracer.go +++ b/pkg/network/tracer/connection/fentry/tracer.go @@ -26,7 +26,6 @@ import ( const probeUID = "net" -//nolint:revive // TODO(NET) Fix revive linter var ErrorNotSupported = errors.New("fentry tracer is only supported on Fargate") // LoadTracer loads a new tracer diff --git a/pkg/network/tracer/connection/kprobe/compile.go b/pkg/network/tracer/connection/kprobe/compile.go index 2733c73eed09e..ec9ed0e338bbc 100644 --- a/pkg/network/tracer/connection/kprobe/compile.go +++ b/pkg/network/tracer/connection/kprobe/compile.go @@ -5,7 +5,6 @@ //go:build linux_bpf -//nolint:revive // TODO(NET) Fix revive linter package kprobe import ( diff --git a/pkg/network/tracer/connection/kprobe/tracer.go b/pkg/network/tracer/connection/kprobe/tracer.go index 4f2cf37173633..9fa6888a53d98 100644 --- a/pkg/network/tracer/connection/kprobe/tracer.go +++ b/pkg/network/tracer/connection/kprobe/tracer.go @@ -29,15 +29,11 @@ import ( const probeUID = "net" -//nolint:revive // TODO(NET) Fix revive linter type TracerType int const ( - //nolint:revive // TODO(NET) Fix revive linter TracerTypePrebuilt TracerType = iota - //nolint:revive // TODO(NET) Fix revive linter TracerTypeRuntimeCompiled - //nolint:revive // TODO(NET) Fix revive linter TracerTypeCORE ) diff --git a/pkg/network/tracer/connection/tracer.go b/pkg/network/tracer/connection/tracer.go index 3ab6dff49ca49..c1d318d9108e2 100644 --- a/pkg/network/tracer/connection/tracer.go +++ b/pkg/network/tracer/connection/tracer.go @@ -23,15 +23,10 @@ import ( type TracerType int const ( - //nolint:revive // TODO(NET) Fix revive linter TracerTypeKProbePrebuilt TracerType = iota - //nolint:revive // TODO(NET) Fix revive linter TracerTypeKProbeRuntimeCompiled - //nolint:revive // TODO(NET) Fix revive linter TracerTypeKProbeCORE - //nolint:revive // TODO(NET) Fix revive linter TracerTypeFentry - //nolint:revive // TODO(NET) Fix revive linter TracerTypeEbpfless ) diff --git a/pkg/network/tracer/offsetguess/conntrack.go b/pkg/network/tracer/offsetguess/conntrack.go index b38b3b2defd3d..0d6432a86fbfd 100644 --- a/pkg/network/tracer/offsetguess/conntrack.go +++ b/pkg/network/tracer/offsetguess/conntrack.go @@ -5,7 +5,6 @@ //go:build linux_bpf -//nolint:revive // TODO(NET) Fix revive linter package offsetguess import ( @@ -45,7 +44,6 @@ type conntrackOffsetGuesser struct { udpv6Enabled uint64 } -//nolint:revive // TODO(NET) Fix revive linter func NewConntrackOffsetGuesser(cfg *config.Config) (OffsetGuesser, error) { tcpv6Enabled, udpv6Enabled := getIpv6Configuration(cfg) tcpv6EnabledConst, udpv6EnabledConst := boolToUint64(tcpv6Enabled), boolToUint64(udpv6Enabled) @@ -80,7 +78,6 @@ func (c *conntrackOffsetGuesser) Close() { } } -//nolint:revive // TODO(NET) Fix revive linter func (c *conntrackOffsetGuesser) Probes(*config.Config) (map[probes.ProbeFuncName]struct{}, error) { p := map[probes.ProbeFuncName]struct{}{} enableProbe(p, probes.ConntrackHashInsert) diff --git a/pkg/network/tracer/offsetguess/offsetguess.go b/pkg/network/tracer/offsetguess/offsetguess.go index 066f1b8573108..2da365eb2c234 100644 --- a/pkg/network/tracer/offsetguess/offsetguess.go +++ b/pkg/network/tracer/offsetguess/offsetguess.go @@ -83,7 +83,6 @@ var whatString = map[GuessWhat]string{ GuessCtNet: "conntrack network namespace", } -//nolint:revive // TODO(NET) Fix revive linter type OffsetGuesser interface { Manager() *manager.Manager Probes(c *config.Config) (map[string]struct{}, error) @@ -193,7 +192,6 @@ func setupOffsetGuesser(guesser OffsetGuesser, config *config.Config, buf byteco return nil } -//nolint:revive // TODO(NET) Fix revive linter func RunOffsetGuessing(cfg *config.Config, buf bytecode.AssetReader, newGuesser func() (OffsetGuesser, error)) (editors []manager.ConstantEditor, err error) { // Offset guessing has been flaky for some customers, so if it fails we'll retry it up to 5 times start := time.Now() diff --git a/pkg/network/tracer/offsetguess/tracer.go b/pkg/network/tracer/offsetguess/tracer.go index e2eac8acacc14..b01a9a56e4906 100644 --- a/pkg/network/tracer/offsetguess/tracer.go +++ b/pkg/network/tracer/offsetguess/tracer.go @@ -77,7 +77,6 @@ type tracerOffsetGuesser struct { guessUDPv6 bool } -//nolint:revive // TODO(NET) Fix revive linter func NewTracerOffsetGuesser() (OffsetGuesser, error) { return &tracerOffsetGuesser{ m: &manager.Manager{ @@ -287,7 +286,6 @@ func uint32ArrayFromIPv6(ip net.IP) (addr [4]uint32, err error) { // IPv6LinkLocalPrefix is only exposed for testing purposes var IPv6LinkLocalPrefix = "fe80::" -//nolint:revive // TODO(NET) Fix revive linter func GetIPv6LinkLocalAddress() ([]*net.UDPAddr, error) { ints, err := net.Interfaces() if err != nil { @@ -677,10 +675,9 @@ func (t *tracerOffsetGuesser) checkAndUpdateCurrentOffset(mp *maps.GenericMap[ui if !t.guessTCPv6 && !t.guessUDPv6 { t.logAndAdvance(t.status.Offset_sk_buff_head, GuessNotApplicable) return t.setReadyState(mp) - } else { //nolint:revive // TODO(NET) Fix revive linter - t.logAndAdvance(t.status.Offset_sk_buff_head, GuessDAddrIPv6) - break } + t.logAndAdvance(t.status.Offset_sk_buff_head, GuessDAddrIPv6) + break } t.status.Offset_sk_buff_head++ t.status.Offset_sk_buff_head, _ = skipOverlaps(t.status.Offset_sk_buff_head, t.skBuffRanges()) @@ -1088,8 +1085,6 @@ func acceptHandler(l net.Listener) { // responsible for the V4 offset guessing in kernel-space and 2) using it we can obtain // in user-space TCP socket information such as RTT and use it for setting the expected // values in the `fieldValues` struct. -// -//nolint:revive // TODO(NET) Fix revive linter func TcpGetInfo(conn net.Conn) (*unix.TCPInfo, error) { tcpConn, ok := conn.(*net.TCPConn) if !ok { @@ -1154,7 +1149,6 @@ func newUDPServer(addr string) (string, func(), error) { return ln.LocalAddr().String(), doneFn, nil } -//nolint:revive // TODO(NET) Fix revive linter var TracerOffsets tracerOffsets type tracerOffsets struct { diff --git a/pkg/network/tracer/offsetguess_test.go b/pkg/network/tracer/offsetguess_test.go index d05a3aaa12d8e..4ae13c0e8e0f8 100644 --- a/pkg/network/tracer/offsetguess_test.go +++ b/pkg/network/tracer/offsetguess_test.go @@ -307,7 +307,6 @@ func testOffsetGuess(t *testing.T) { } var offset uint64 - //nolint:revive // TODO(NET) Fix revive linter var name offsetT = o require.NoError(t, mp.Lookup(&name, &offset)) assert.Equal(t, offset, consts[o], "unexpected offset for %s", o) diff --git a/pkg/network/tracer/testutil/tcp.go b/pkg/network/tracer/testutil/tcp.go index aae58b8ca870b..9ef69afc27172 100644 --- a/pkg/network/tracer/testutil/tcp.go +++ b/pkg/network/tracer/testutil/tcp.go @@ -5,7 +5,7 @@ //go:build test -// Package testutil is test utilities for testing the network tracer +// Package testutil has utilities for testing the network tracer package testutil import "net" diff --git a/pkg/network/tracer/tracer.go b/pkg/network/tracer/tracer.go index 55faac7817df1..fded4ce4d70c0 100644 --- a/pkg/network/tracer/tracer.go +++ b/pkg/network/tracer/tracer.go @@ -5,6 +5,7 @@ //go:build linux_bpf +// Package tracer implements the functionality of the network tracer package tracer import ( diff --git a/pkg/network/tracer/tracer_unsupported.go b/pkg/network/tracer/tracer_unsupported.go index 546a74ccbfabc..bdb6abdf3dbf5 100644 --- a/pkg/network/tracer/tracer_unsupported.go +++ b/pkg/network/tracer/tracer_unsupported.go @@ -5,7 +5,7 @@ //go:build (linux && !linux_bpf) || (windows && !npm) || (!linux && !windows) -//nolint:revive // TODO(NET) Fix revive linter +// Package tracer implements the functionality of the network tracer package tracer import ( diff --git a/pkg/network/types/connection_key.go b/pkg/network/types/connection_key.go index 0d830cb7be8b4..dad5346f64107 100644 --- a/pkg/network/types/connection_key.go +++ b/pkg/network/types/connection_key.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//nolint:revive // TODO(NET) Fix revive linter +// Package types implements the definition for network connection tuples package types import ( diff --git a/pkg/process/checks/net.go b/pkg/process/checks/net.go index 1dc52652f4ad0..26e01d0677061 100644 --- a/pkg/process/checks/net.go +++ b/pkg/process/checks/net.go @@ -408,11 +408,10 @@ func batchConnections( continue } - //nolint:revive // TODO(NET) Fix revive linter - new := int32(len(newRouteIndices)) - newRouteIndices[c.RouteIdx] = new + newIdx := int32(len(newRouteIndices)) + newRouteIndices[c.RouteIdx] = newIdx batchRoutes = append(batchRoutes, routes[c.RouteIdx]) - c.RouteIdx = new + c.RouteIdx = newIdx } // EncodeDomainDatabase will take the namedb (a simple slice of strings with each unique diff --git a/pkg/process/util/address.go b/pkg/process/util/address.go index 56db100433050..15dde9f118d23 100644 --- a/pkg/process/util/address.go +++ b/pkg/process/util/address.go @@ -3,7 +3,6 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//nolint:revive // TODO(NET) Fix revive linter package util import ( diff --git a/pkg/process/util/containers/containers.go b/pkg/process/util/containers/containers.go index 596d8fa9a1ab4..fd66a93ae0af4 100644 --- a/pkg/process/util/containers/containers.go +++ b/pkg/process/util/containers/containers.go @@ -119,7 +119,7 @@ func (p *containerProvider) GetContainers(cacheValidity time.Duration, previousC continue } - entityID := containers.BuildTaggerEntityName(container.ID) + entityID := types.NewEntityID(types.ContainerID, container.ID).String() tags, err := tagger.Tag(entityID, types.HighCardinality) if err != nil { log.Debugf("Could not collect tags for container %q, err: %v", container.ID[:12], err) diff --git a/pkg/process/util/containers/containers_test.go b/pkg/process/util/containers/containers_test.go index 316b237a4858f..fe9ceb0804aa8 100644 --- a/pkg/process/util/containers/containers_test.go +++ b/pkg/process/util/containers/containers_test.go @@ -17,6 +17,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core" "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl" + "github.com/DataDog/datadog-agent/comp/core/tagger/types" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" workloadmetamock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/mock" @@ -105,7 +106,7 @@ func TestGetContainers(t *testing.T) { MemoryRequest: pointer.Ptr[uint64](300), }, }) - fakeTagger.SetTags(containers.BuildTaggerEntityName("cID1"), "fake", []string{"low:common"}, []string{"orch:orch1"}, []string{"id:container1"}, nil) + fakeTagger.SetTags(types.NewEntityID(types.ContainerID, "cID1").String(), "fake", []string{"low:common"}, []string{"orch:orch1"}, []string{"id:container1"}, nil) // cID2 not running metadataProvider.Set(&workloadmeta.Container{ @@ -147,7 +148,7 @@ func TestGetContainers(t *testing.T) { RepoDigest: "sha256:378e0fa5bc50e6707ec9eb03c511cc6a2a4741f0c345d88dedb2fb9247b19f94", }, }) - fakeTagger.SetTags(containers.BuildTaggerEntityName("cID3"), "fake", []string{"low:common"}, []string{"orch:orch1"}, []string{"id:container3"}, nil) + fakeTagger.SetTags(types.NewEntityID(types.ContainerID, "cID3").String(), "fake", []string{"low:common"}, []string{"orch:orch1"}, []string{"id:container3"}, nil) // cID4 missing tags cID4Metrics := mock.GetFullSampleContainerEntry() @@ -310,7 +311,7 @@ func TestGetContainers(t *testing.T) { ID: "pod7", }, }) - fakeTagger.SetTags(containers.BuildTaggerEntityName("cID7"), "fake", []string{"low:common"}, []string{"orch:orch7"}, []string{"id:container7"}, nil) + fakeTagger.SetTags(types.NewEntityID(types.ContainerID, "cID7").String(), "fake", []string{"low:common"}, []string{"orch:orch7"}, []string{"id:container7"}, nil) // // Running and checking diff --git a/pkg/process/util/util.go b/pkg/process/util/util.go index a5e6d572332ea..3b1d435584abc 100644 --- a/pkg/process/util/util.go +++ b/pkg/process/util/util.go @@ -3,6 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. +// Package util contains helper functions for processes, IP addresses, env variables, etc. package util import ( diff --git a/pkg/proto/datadog/remoteconfig/remoteconfig.proto b/pkg/proto/datadog/remoteconfig/remoteconfig.proto index 604557566c9e4..da9ac3f9f0dd6 100644 --- a/pkg/proto/datadog/remoteconfig/remoteconfig.proto +++ b/pkg/proto/datadog/remoteconfig/remoteconfig.proto @@ -121,6 +121,7 @@ message PackageState { string experiment_version = 3; string stable_config_version = 5; string experiment_config_version = 6; + string remote_config_version = 7; PackageStateTask task = 4; } diff --git a/pkg/proto/pbgo/core/remoteconfig.pb.go b/pkg/proto/pbgo/core/remoteconfig.pb.go index 5dbc3b04ca049..47c309f3718a5 100644 --- a/pkg/proto/pbgo/core/remoteconfig.pb.go +++ b/pkg/proto/pbgo/core/remoteconfig.pb.go @@ -1097,6 +1097,7 @@ type PackageState struct { ExperimentVersion string `protobuf:"bytes,3,opt,name=experiment_version,json=experimentVersion,proto3" json:"experiment_version,omitempty"` StableConfigVersion string `protobuf:"bytes,5,opt,name=stable_config_version,json=stableConfigVersion,proto3" json:"stable_config_version,omitempty"` ExperimentConfigVersion string `protobuf:"bytes,6,opt,name=experiment_config_version,json=experimentConfigVersion,proto3" json:"experiment_config_version,omitempty"` + RemoteConfigVersion string `protobuf:"bytes,7,opt,name=remote_config_version,json=remoteConfigVersion,proto3" json:"remote_config_version,omitempty"` Task *PackageStateTask `protobuf:"bytes,4,opt,name=task,proto3" json:"task,omitempty"` } @@ -1167,6 +1168,13 @@ func (x *PackageState) GetExperimentConfigVersion() string { return "" } +func (x *PackageState) GetRemoteConfigVersion() string { + if x != nil { + return x.RemoteConfigVersion + } + return "" +} + func (x *PackageState) GetTask() *PackageStateTask { if x != nil { return x.Task @@ -2143,7 +2151,7 @@ var file_datadog_remoteconfig_remoteconfig_proto_rawDesc = []byte{ 0x12, 0x38, 0x0a, 0x08, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x52, 0x08, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x73, 0x22, 0xa4, 0x02, 0x0a, 0x0c, 0x50, + 0x52, 0x08, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x73, 0x22, 0xd8, 0x02, 0x0a, 0x0c, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, @@ -2158,153 +2166,156 @@ var file_datadog_remoteconfig_remoteconfig_proto_rawDesc = []byte{ 0x3a, 0x0a, 0x19, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x04, 0x74, - 0x61, 0x73, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x64, 0x61, 0x74, 0x61, - 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x61, 0x63, 0x6b, 0x61, - 0x67, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x04, 0x74, 0x61, 0x73, - 0x6b, 0x22, 0x84, 0x01, 0x0a, 0x10, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2f, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x72, 0x72, 0x6f, - 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x39, 0x0a, 0x09, 0x54, 0x61, 0x73, 0x6b, - 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x22, 0x93, 0x01, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x02, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, - 0x07, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70, 0x70, 0x6c, 0x79, - 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x61, 0x70, - 0x70, 0x6c, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70, 0x70, 0x6c, - 0x79, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, - 0x70, 0x70, 0x6c, 0x79, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x80, 0x02, 0x0a, 0x0b, 0x43, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x6f, 0x6f, - 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x0b, 0x72, 0x6f, 0x6f, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, - 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x64, - 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x68, 0x61, 0x73, 0x5f, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x68, 0x61, 0x73, 0x45, - 0x72, 0x72, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x14, 0x62, 0x61, - 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, - 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, - 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x22, 0x48, 0x0a, 0x0e, - 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x48, 0x61, 0x73, 0x68, 0x12, 0x1c, - 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x12, 0x0a, 0x04, - 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, - 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0x74, 0x0a, 0x0e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x46, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, - 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6c, 0x65, - 0x6e, 0x67, 0x74, 0x68, 0x12, 0x36, 0x0a, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, - 0x48, 0x61, 0x73, 0x68, 0x52, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x22, 0x99, 0x01, 0x0a, - 0x17, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x06, 0x63, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, - 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x52, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x4e, 0x0a, 0x13, 0x63, 0x61, 0x63, 0x68, - 0x65, 0x64, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, - 0x65, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x11, 0x63, 0x61, 0x63, 0x68, 0x65, 0x64, 0x54, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x22, 0xaa, 0x01, 0x0a, 0x18, 0x43, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x6f, 0x6f, 0x74, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0c, 0x52, 0x05, 0x72, 0x6f, 0x6f, 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x74, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x74, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, - 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x64, 0x61, - 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x46, 0x69, 0x6c, - 0x65, 0x52, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x25, - 0x0a, 0x0e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, - 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x73, 0x22, 0x3d, 0x0a, 0x0d, 0x46, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x74, - 0x61, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x68, 0x61, 0x73, 0x68, 0x22, 0x81, 0x05, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x5a, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x60, 0x0a, 0x0e, 0x64, - 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x44, 0x69, 0x72, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, - 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x66, 0x0a, - 0x10, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x15, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x72, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x34, 0x0a, 0x04, 0x74, 0x61, 0x73, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, + 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, + 0x04, 0x74, 0x61, 0x73, 0x6b, 0x22, 0x84, 0x01, 0x0a, 0x10, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2f, 0x0a, 0x05, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x64, 0x61, 0x74, 0x61, + 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x05, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x64, 0x61, 0x74, + 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x73, 0x6b, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x39, 0x0a, 0x09, + 0x54, 0x61, 0x73, 0x6b, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x93, 0x01, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x61, + 0x70, 0x70, 0x6c, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x0a, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1f, 0x0a, 0x0b, + 0x61, 0x70, 0x70, 0x6c, 0x79, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x80, 0x02, + 0x0a, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x21, 0x0a, + 0x0c, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x0b, 0x72, 0x6f, 0x6f, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0c, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x68, + 0x61, 0x73, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, + 0x68, 0x61, 0x73, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x30, + 0x0a, 0x14, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x62, 0x61, + 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x22, 0x48, 0x0a, 0x0e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x48, 0x61, + 0x73, 0x68, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, + 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x68, 0x61, 0x73, 0x68, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0x74, 0x0a, 0x0e, 0x54, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, + 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, + 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x36, 0x0a, 0x06, 0x68, 0x61, 0x73, 0x68, + 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, + 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x46, 0x69, 0x6c, 0x65, 0x48, 0x61, 0x73, 0x68, 0x52, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, + 0x22, 0x99, 0x01, 0x0a, 0x17, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x47, 0x65, 0x74, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x06, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x64, + 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x4e, 0x0a, 0x13, + 0x63, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x66, 0x69, + 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x64, 0x61, 0x74, 0x61, + 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x46, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x11, 0x63, 0x61, 0x63, 0x68, 0x65, + 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x22, 0xaa, 0x01, 0x0a, + 0x18, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x6f, 0x6f, + 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x05, 0x72, 0x6f, 0x6f, 0x74, 0x73, 0x12, + 0x18, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, + 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x22, 0x3d, 0x0a, 0x0d, 0x46, 0x69, 0x6c, + 0x65, 0x4d, 0x65, 0x74, 0x61, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x22, 0x81, 0x05, 0x0a, 0x16, 0x47, 0x65, 0x74, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x64, 0x61, 0x74, 0x61, + 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x60, 0x0a, 0x0e, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, - 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x0e, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, - 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, - 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x0d, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x5d, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x33, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x64, 0x61, 0x74, 0x61, - 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4d, - 0x65, 0x74, 0x61, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x1a, 0x5f, 0x0a, 0x12, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x33, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x64, 0x61, 0x74, - 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x46, 0x69, 0x6c, 0x65, - 0x4d, 0x65, 0x74, 0x61, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x42, 0x0a, 0x14, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, - 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xeb, 0x01, 0x0a, 0x11, 0x54, 0x72, 0x61, - 0x63, 0x65, 0x72, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x31, 0x12, 0x1a, - 0x0a, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, - 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x65, 0x6e, 0x76, 0x69, 0x72, - 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x70, 0x70, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x70, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x74, - 0x72, 0x61, 0x63, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, - 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x75, 0x6e, 0x74, - 0x69, 0x6d, 0x65, 0x49, 0x44, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x75, 0x6e, - 0x74, 0x69, 0x6d, 0x65, 0x49, 0x44, 0x22, 0x67, 0x0a, 0x10, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, - 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x12, 0x53, 0x0a, 0x14, 0x74, 0x72, - 0x61, 0x63, 0x65, 0x72, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x5f, - 0x76, 0x31, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, - 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, - 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x31, 0x52, 0x12, 0x74, 0x72, 0x61, - 0x63, 0x65, 0x72, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x56, 0x31, 0x2a, - 0x4a, 0x0a, 0x09, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x08, 0x0a, 0x04, - 0x49, 0x44, 0x4c, 0x45, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, - 0x47, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x4f, 0x4e, 0x45, 0x10, 0x02, 0x12, 0x11, 0x0a, - 0x0d, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x10, 0x03, - 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x04, 0x42, 0x15, 0x5a, 0x13, 0x70, - 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x67, 0x6f, 0x2f, 0x63, 0x6f, - 0x72, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x0d, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x66, 0x0a, 0x10, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x64, 0x61, + 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x47, 0x65, 0x74, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x0e, 0x61, 0x63, 0x74, + 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x0d, 0x61, 0x63, 0x74, 0x69, 0x76, + 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x5d, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x33, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x46, + 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x5f, 0x0a, 0x12, 0x44, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x33, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x46, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x42, 0x0a, 0x14, 0x54, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xeb, 0x01, 0x0a, + 0x11, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x56, 0x31, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x12, 0x18, + 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x65, 0x6e, 0x76, 0x69, + 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x65, + 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x70, + 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x61, 0x70, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0d, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, + 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x44, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x44, 0x22, 0x67, 0x0a, 0x10, 0x54, 0x72, + 0x61, 0x63, 0x65, 0x72, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x12, 0x53, + 0x0a, 0x14, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x73, 0x5f, 0x76, 0x31, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x64, + 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x72, + 0x61, 0x63, 0x65, 0x72, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x31, 0x52, + 0x12, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x73, 0x56, 0x31, 0x2a, 0x4a, 0x0a, 0x09, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x08, 0x0a, 0x04, 0x49, 0x44, 0x4c, 0x45, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x55, + 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x4f, 0x4e, 0x45, 0x10, + 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x53, 0x54, 0x41, + 0x54, 0x45, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x04, 0x42, + 0x15, 0x5a, 0x13, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x67, + 0x6f, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/pkg/proto/pbgo/core/remoteconfig_gen.go b/pkg/proto/pbgo/core/remoteconfig_gen.go index 9085603741456..8eb8f8e403113 100644 --- a/pkg/proto/pbgo/core/remoteconfig_gen.go +++ b/pkg/proto/pbgo/core/remoteconfig_gen.go @@ -3049,9 +3049,9 @@ func (z OrgStatusResponse) Msgsize() (s int) { // MarshalMsg implements msgp.Marshaler func (z *PackageState) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 6 + // map header, size 7 // string "Package" - o = append(o, 0x86, 0xa7, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65) + o = append(o, 0x87, 0xa7, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65) o = msgp.AppendString(o, z.Package) // string "StableVersion" o = append(o, 0xad, 0x53, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) @@ -3065,6 +3065,9 @@ func (z *PackageState) MarshalMsg(b []byte) (o []byte, err error) { // string "ExperimentConfigVersion" o = append(o, 0xb7, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) o = msgp.AppendString(o, z.ExperimentConfigVersion) + // string "RemoteConfigVersion" + o = append(o, 0xb3, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.RemoteConfigVersion) // string "Task" o = append(o, 0xa4, 0x54, 0x61, 0x73, 0x6b) if z.Task == nil { @@ -3127,6 +3130,12 @@ func (z *PackageState) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "ExperimentConfigVersion") return } + case "RemoteConfigVersion": + z.RemoteConfigVersion, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "RemoteConfigVersion") + return + } case "Task": if msgp.IsNil(bts) { bts, err = msgp.ReadNilBytes(bts) @@ -3158,7 +3167,7 @@ func (z *PackageState) UnmarshalMsg(bts []byte) (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *PackageState) Msgsize() (s int) { - s = 1 + 8 + msgp.StringPrefixSize + len(z.Package) + 14 + msgp.StringPrefixSize + len(z.StableVersion) + 18 + msgp.StringPrefixSize + len(z.ExperimentVersion) + 20 + msgp.StringPrefixSize + len(z.StableConfigVersion) + 24 + msgp.StringPrefixSize + len(z.ExperimentConfigVersion) + 5 + s = 1 + 8 + msgp.StringPrefixSize + len(z.Package) + 14 + msgp.StringPrefixSize + len(z.StableVersion) + 18 + msgp.StringPrefixSize + len(z.ExperimentVersion) + 20 + msgp.StringPrefixSize + len(z.StableConfigVersion) + 24 + msgp.StringPrefixSize + len(z.ExperimentConfigVersion) + 20 + msgp.StringPrefixSize + len(z.RemoteConfigVersion) + 5 if z.Task == nil { s += msgp.NilSize } else { diff --git a/pkg/proto/pbgo/trace/span_gen.go b/pkg/proto/pbgo/trace/span_gen.go index f08192b31b01b..c1b148714d3ce 100644 --- a/pkg/proto/pbgo/trace/span_gen.go +++ b/pkg/proto/pbgo/trace/span_gen.go @@ -273,7 +273,7 @@ func (z *Span) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "Metrics") return } - if z.Metrics == nil && zb0003 > 0{ + if z.Metrics == nil && zb0003 > 0 { z.Metrics = make(map[string]float64, zb0003) } else if len(z.Metrics) > 0 { for key := range z.Metrics { diff --git a/pkg/security/agent/agent.go b/pkg/security/agent/agent.go index afd53efc39b81..c582e4a777763 100644 --- a/pkg/security/agent/agent.go +++ b/pkg/security/agent/agent.go @@ -29,17 +29,18 @@ import ( // RuntimeSecurityAgent represents the main wrapper for the Runtime Security product type RuntimeSecurityAgent struct { - hostname string - reporter common.RawReporter - client *RuntimeSecurityClient - running *atomic.Bool - wg sync.WaitGroup - connected *atomic.Bool - eventReceived *atomic.Uint64 - activityDumpReceived *atomic.Uint64 - telemetry *telemetry - endpoints *config.Endpoints - cancel context.CancelFunc + hostname string + reporter common.RawReporter + client *RuntimeSecurityClient + running *atomic.Bool + wg sync.WaitGroup + connected *atomic.Bool + eventReceived *atomic.Uint64 + activityDumpReceived *atomic.Uint64 + telemetry *telemetry + profContainersTelemetry *profContainersTelemetry + endpoints *config.Endpoints + cancel context.CancelFunc // activity dump storage *dump.ActivityDumpStorageManager @@ -73,6 +74,11 @@ func (rsa *RuntimeSecurityAgent) Start(reporter common.RawReporter, endpoints *c // Send Runtime Security Agent telemetry go rsa.telemetry.run(ctx) } + + if rsa.profContainersTelemetry != nil { + // Send Profiled Containers telemetry + go rsa.profContainersTelemetry.run(ctx) + } } // Stop the runtime recurity agent @@ -187,7 +193,7 @@ func (rsa *RuntimeSecurityAgent) DispatchActivityDump(msg *api.ActivityDumpStrea if rsa.telemetry != nil { // register for telemetry for this container imageName, imageTag := dump.GetImageNameTag() - rsa.telemetry.registerProfiledContainer(imageName, imageTag) + rsa.profContainersTelemetry.registerProfiledContainer(imageName, imageTag) raw := bytes.NewBuffer(msg.GetData()) diff --git a/pkg/security/agent/agent_nix.go b/pkg/security/agent/agent_nix.go index 81c6261704984..f7c537a7d7525 100644 --- a/pkg/security/agent/agent_nix.go +++ b/pkg/security/agent/agent_nix.go @@ -25,10 +25,16 @@ func NewRuntimeSecurityAgent(statsdClient statsd.ClientInterface, hostname strin } // on windows do no telemetry - telemetry, err := newTelemetry(statsdClient, wmeta, opts.LogProfiledWorkloads, opts.IgnoreDDAgentContainers) + telemetry, err := newTelemetry(statsdClient, wmeta, opts.IgnoreDDAgentContainers) if err != nil { return nil, errors.New("failed to initialize the telemetry reporter") } + + profContainersTelemetry, err := newProfContainersTelemetry(statsdClient, wmeta, opts.LogProfiledWorkloads) + if err != nil { + return nil, errors.New("failed to initialize the profiled containers telemetry reporter") + } + // on windows do no storage manager storage, err := dump.NewAgentStorageManager() if err != nil { @@ -36,13 +42,14 @@ func NewRuntimeSecurityAgent(statsdClient statsd.ClientInterface, hostname strin } return &RuntimeSecurityAgent{ - client: client, - hostname: hostname, - telemetry: telemetry, - storage: storage, - running: atomic.NewBool(false), - connected: atomic.NewBool(false), - eventReceived: atomic.NewUint64(0), - activityDumpReceived: atomic.NewUint64(0), + client: client, + hostname: hostname, + telemetry: telemetry, + profContainersTelemetry: profContainersTelemetry, + storage: storage, + running: atomic.NewBool(false), + connected: atomic.NewBool(false), + eventReceived: atomic.NewUint64(0), + activityDumpReceived: atomic.NewUint64(0), }, nil } diff --git a/pkg/security/agent/prof_containers_telemetry.go b/pkg/security/agent/prof_containers_telemetry.go new file mode 100644 index 0000000000000..0f73a4868cbf9 --- /dev/null +++ b/pkg/security/agent/prof_containers_telemetry.go @@ -0,0 +1,134 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux + +package agent + +import ( + "context" + "errors" + "fmt" + "time" + + workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" + "github.com/DataDog/datadog-agent/pkg/security/metrics" + "github.com/DataDog/datadog-agent/pkg/security/proto/api" + "github.com/DataDog/datadog-agent/pkg/util/log" + "github.com/DataDog/datadog-go/v5/statsd" +) + +type profContainersTelemetry struct { + statsdClient statsd.ClientInterface + wmeta workloadmeta.Component + runtimeSecurityClient *RuntimeSecurityClient + profiledContainers map[profiledContainer]struct{} + logProfiledWorkloads bool +} + +func newProfContainersTelemetry(statsdClient statsd.ClientInterface, wmeta workloadmeta.Component, logProfiledWorkloads bool) (*profContainersTelemetry, error) { + runtimeSecurityClient, err := NewRuntimeSecurityClient() + if err != nil { + return nil, err + } + + return &profContainersTelemetry{ + statsdClient: statsdClient, + wmeta: wmeta, + runtimeSecurityClient: runtimeSecurityClient, + profiledContainers: make(map[profiledContainer]struct{}), + logProfiledWorkloads: logProfiledWorkloads, + }, nil +} + +func (t *profContainersTelemetry) registerProfiledContainer(name, tag string) { + entry := profiledContainer{ + name: name, + tag: tag, + } + + if entry.isValid() { + t.profiledContainers[entry] = struct{}{} + } +} + +func (t *profContainersTelemetry) run(ctx context.Context) { + log.Info("started collecting Profiled Containers telemetry") + defer log.Info("stopping Profiled Containers telemetry") + + profileCounterTicker := time.NewTicker(5 * time.Minute) + defer profileCounterTicker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-profileCounterTicker.C: + if err := t.reportProfiledContainers(); err != nil { + log.Debugf("couldn't report profiled containers: %v", err) + } + } + } +} + +type profiledContainer struct { + name string + tag string +} + +func (pc *profiledContainer) isValid() bool { + return pc.name != "" && pc.tag != "" +} + +func (t *profContainersTelemetry) fetchConfig() (*api.SecurityConfigMessage, error) { + cfg, err := t.runtimeSecurityClient.GetConfig() + if err != nil { + return cfg, errors.New("couldn't fetch config from runtime security module") + } + return cfg, nil +} + +func (t *profContainersTelemetry) reportProfiledContainers() error { + cfg, err := t.fetchConfig() + if err != nil { + return err + } + if !cfg.ActivityDumpEnabled { + return nil + } + + profiled := make(map[profiledContainer]bool) + + runningContainers := t.wmeta.ListContainersWithFilter(workloadmeta.GetRunningContainers) + for _, container := range runningContainers { + entry := profiledContainer{ + name: container.Image.Name, + tag: container.Image.Tag, + } + if !entry.isValid() { + continue + } + profiled[entry] = false + } + + doneProfiling := make([]string, 0) + for containerEntry := range t.profiledContainers { + profiled[containerEntry] = true + doneProfiling = append(doneProfiling, fmt.Sprintf("%s:%s", containerEntry.name, containerEntry.tag)) + } + + missing := make([]string, 0, len(profiled)) + for entry, found := range profiled { + if !found { + missing = append(missing, fmt.Sprintf("%s:%s", entry.name, entry.tag)) + } + } + + if t.logProfiledWorkloads && len(missing) > 0 { + log.Infof("not yet profiled workloads (%d/%d): %v; finished profiling: %v", len(missing), len(profiled), missing, doneProfiling) + } + _ = t.statsdClient.Gauge(metrics.MetricActivityDumpNotYetProfiledWorkload, float64(len(missing)), nil, 1.0) + return nil +} diff --git a/pkg/security/agent/telemetry_linux.go b/pkg/security/agent/telemetry_linux.go index ed00085994a08..abd606a35b400 100644 --- a/pkg/security/agent/telemetry_linux.go +++ b/pkg/security/agent/telemetry_linux.go @@ -9,7 +9,6 @@ package agent import ( "context" "errors" - "fmt" "os" "time" @@ -25,11 +24,9 @@ import ( type telemetry struct { containers *sectelemetry.ContainersTelemetry runtimeSecurityClient *RuntimeSecurityClient - profiledContainers map[profiledContainer]struct{} - logProfiledWorkloads bool } -func newTelemetry(statsdClient statsd.ClientInterface, wmeta workloadmeta.Component, logProfiledWorkloads, ignoreDDAgentContainers bool) (*telemetry, error) { +func newTelemetry(statsdClient statsd.ClientInterface, wmeta workloadmeta.Component, ignoreDDAgentContainers bool) (*telemetry, error) { runtimeSecurityClient, err := NewRuntimeSecurityClient() if err != nil { return nil, err @@ -45,30 +42,15 @@ func newTelemetry(statsdClient statsd.ClientInterface, wmeta workloadmeta.Compon return &telemetry{ containers: containersTelemetry, runtimeSecurityClient: runtimeSecurityClient, - profiledContainers: make(map[profiledContainer]struct{}), - logProfiledWorkloads: logProfiledWorkloads, }, nil } -func (t *telemetry) registerProfiledContainer(name, tag string) { - entry := profiledContainer{ - name: name, - tag: tag, - } - - if entry.isValid() { - t.profiledContainers[entry] = struct{}{} - } -} - func (t *telemetry) run(ctx context.Context) { log.Info("started collecting Runtime Security Agent telemetry") defer log.Info("stopping Runtime Security Agent telemetry") metricsTicker := time.NewTicker(1 * time.Minute) defer metricsTicker.Stop() - profileCounterTicker := time.NewTicker(5 * time.Minute) - defer profileCounterTicker.Stop() for { select { @@ -78,23 +60,10 @@ func (t *telemetry) run(ctx context.Context) { if err := t.reportContainers(); err != nil { log.Debugf("couldn't report containers: %v", err) } - case <-profileCounterTicker.C: - if err := t.reportProfiledContainers(); err != nil { - log.Debugf("couldn't report profiled containers: %v", err) - } } } } -type profiledContainer struct { - name string - tag string -} - -func (pc *profiledContainer) isValid() bool { - return pc.name != "" && pc.tag != "" -} - func (t *telemetry) fetchConfig() (*api.SecurityConfigMessage, error) { cfg, err := t.runtimeSecurityClient.GetConfig() if err != nil { @@ -103,48 +72,6 @@ func (t *telemetry) fetchConfig() (*api.SecurityConfigMessage, error) { return cfg, nil } -func (t *telemetry) reportProfiledContainers() error { - cfg, err := t.fetchConfig() - if err != nil { - return err - } - if !cfg.ActivityDumpEnabled { - return nil - } - - profiled := make(map[profiledContainer]bool) - - for _, container := range t.containers.ListRunningContainers() { - entry := profiledContainer{ - name: container.Image.Name, - tag: container.Image.Tag, - } - if !entry.isValid() { - continue - } - profiled[entry] = false - } - - doneProfiling := make([]string, 0) - for containerEntry := range t.profiledContainers { - profiled[containerEntry] = true - doneProfiling = append(doneProfiling, fmt.Sprintf("%s:%s", containerEntry.name, containerEntry.tag)) - } - - missing := make([]string, 0, len(profiled)) - for entry, found := range profiled { - if !found { - missing = append(missing, fmt.Sprintf("%s:%s", entry.name, entry.tag)) - } - } - - if t.logProfiledWorkloads && len(missing) > 0 { - log.Infof("not yet profiled workloads (%d/%d): %v; finished profiling: %v", len(missing), len(profiled), missing, doneProfiling) - } - t.containers.TelemetrySender.Gauge(metrics.MetricActivityDumpNotYetProfiledWorkload, float64(len(missing)), nil) - return nil -} - func (t *telemetry) reportContainers() error { // retrieve the runtime security module config cfg, err := t.fetchConfig() diff --git a/pkg/security/agent/telemetry_others.go b/pkg/security/agent/telemetry_others.go index aeb59d282624e..d36f3e1aba54b 100644 --- a/pkg/security/agent/telemetry_others.go +++ b/pkg/security/agent/telemetry_others.go @@ -12,6 +12,10 @@ import "context" type telemetry struct{} -func (t *telemetry) registerProfiledContainer(_, _ string) {} - func (t *telemetry) run(_ context.Context) {} + +type profContainersTelemetry struct{} + +func (t *profContainersTelemetry) registerProfiledContainer(_, _ string) {} + +func (t *profContainersTelemetry) run(_ context.Context) {} diff --git a/pkg/security/config/config.go b/pkg/security/config/config.go index 596ba79300085..630c3a952ce10 100644 --- a/pkg/security/config/config.go +++ b/pkg/security/config/config.go @@ -235,6 +235,8 @@ type RuntimeSecurityConfig struct { // Enforcement capabilities EnforcementEnabled bool EnforcementRawSyscallEnabled bool + EnforcementBinaryExcluded []string + EnforcementRuleSourceAllowed []string //WindowsFilenameCacheSize is the max number of filenames to cache WindowsFilenameCacheSize int @@ -415,7 +417,9 @@ func NewRuntimeSecurityConfig() (*RuntimeSecurityConfig, error) { // enforcement EnforcementEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.enforcement.enabled"), + EnforcementBinaryExcluded: coreconfig.SystemProbe().GetStringSlice("runtime_security_config.enforcement.exclude_binaries"), EnforcementRawSyscallEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.enforcement.raw_syscall.enabled"), + EnforcementRuleSourceAllowed: coreconfig.SystemProbe().GetStringSlice("runtime_security_config.enforcement.rule_source_allowed"), // User Sessions UserSessionsCacheSize: coreconfig.SystemProbe().GetInt("runtime_security_config.user_sessions.cache_size"), diff --git a/pkg/security/ebpf/c/include/constants/custom.h b/pkg/security/ebpf/c/include/constants/custom.h index 6d599afdc3869..181460e586ce1 100644 --- a/pkg/security/ebpf/c/include/constants/custom.h +++ b/pkg/security/ebpf/c/include/constants/custom.h @@ -12,7 +12,6 @@ #define MAX_PATH_LEN 256 #define REVISION_ARRAY_SIZE 4096 #define INODE_DISCARDER_TYPE 0 -#define PID_DISCARDER_TYPE 1 #define BASENAME_APPROVER_TYPE 0 #define FLAG_APPROVER_TYPE 1 diff --git a/pkg/security/ebpf/c/include/constants/enums.h b/pkg/security/ebpf/c/include/constants/enums.h index a48a6c45ba883..859a3230465c1 100644 --- a/pkg/security/ebpf/c/include/constants/enums.h +++ b/pkg/security/ebpf/c/include/constants/enums.h @@ -184,13 +184,13 @@ enum erpc_op { UNKNOWN_OP, DISCARD_INODE_OP, - DISCARD_PID_OP, + DISCARD_PID_OP, // DEPRECATED RESOLVE_SEGMENT_OP, // DEPRECATED RESOLVE_PATH_OP, RESOLVE_PARENT_OP, // DEPRECATED REGISTER_SPAN_TLS_OP, // can be used outside of the CWS, do not change the value EXPIRE_INODE_DISCARDER_OP, - EXPIRE_PID_DISCARDER_OP, + EXPIRE_PID_DISCARDER_OP, // DEPRECATED BUMP_DISCARDERS_REVISION, GET_RINGBUF_USAGE, USER_SESSION_CONTEXT_OP, diff --git a/pkg/security/ebpf/c/include/helpers/approvers.h b/pkg/security/ebpf/c/include/helpers/approvers.h index 5078525a4108e..dedc9b36772dd 100644 --- a/pkg/security/ebpf/c/include/helpers/approvers.h +++ b/pkg/security/ebpf/c/include/helpers/approvers.h @@ -38,10 +38,7 @@ int __attribute__((always_inline)) approve_by_basename(struct dentry *dentry, u6 } int __attribute__((always_inline)) basename_approver(struct syscall_cache_t *syscall, struct dentry *dentry, u64 event_type) { - if ((syscall->policy.flags & BASENAME) > 0) { - return approve_by_basename(dentry, event_type); - } - return 0; + return approve_by_basename(dentry, event_type); } int __attribute__((always_inline)) chmod_approvers(struct syscall_cache_t *syscall) { @@ -79,11 +76,11 @@ int __attribute__((always_inline)) approve_mmap_by_protection(struct syscall_cac int __attribute__((always_inline)) mmap_approvers(struct syscall_cache_t *syscall) { int pass_to_userspace = 0; - if ((syscall->policy.flags & BASENAME) > 0 && syscall->mmap.dentry != NULL) { + if (syscall->mmap.dentry != NULL) { pass_to_userspace = approve_by_basename(syscall->mmap.dentry, EVENT_MMAP); } - if (!pass_to_userspace && (syscall->policy.flags & FLAGS) > 0) { + if (!pass_to_userspace) { pass_to_userspace = approve_mmap_by_protection(syscall); if (!pass_to_userspace) { pass_to_userspace = approve_mmap_by_flags(syscall); @@ -127,13 +124,9 @@ int __attribute__((always_inline)) approve_mprotect_by_req_protection(struct sys } int __attribute__((always_inline)) mprotect_approvers(struct syscall_cache_t *syscall) { - int pass_to_userspace = 0; - - if ((syscall->policy.flags & FLAGS) > 0) { - pass_to_userspace = approve_mprotect_by_vm_protection(syscall); - if (!pass_to_userspace) { - pass_to_userspace = approve_mprotect_by_req_protection(syscall); - } + int pass_to_userspace = approve_mprotect_by_vm_protection(syscall); + if (!pass_to_userspace) { + pass_to_userspace = approve_mprotect_by_req_protection(syscall); } return pass_to_userspace; @@ -160,13 +153,8 @@ int __attribute__((always_inline)) approve_by_flags(struct syscall_cache_t *sysc } int __attribute__((always_inline)) open_approvers(struct syscall_cache_t *syscall) { - int pass_to_userspace = 0; - - if ((syscall->policy.flags & BASENAME) > 0) { - pass_to_userspace = approve_by_basename(syscall->open.dentry, EVENT_OPEN); - } - - if (!pass_to_userspace && (syscall->policy.flags & FLAGS) > 0) { + int pass_to_userspace = approve_by_basename(syscall->open.dentry, EVENT_OPEN); + if (!pass_to_userspace) { pass_to_userspace = approve_by_flags(syscall); } @@ -205,11 +193,11 @@ int __attribute__((always_inline)) approve_splice_by_exit_flags(struct syscall_c int __attribute__((always_inline)) splice_approvers(struct syscall_cache_t *syscall) { int pass_to_userspace = 0; - if ((syscall->policy.flags & BASENAME) > 0 && syscall->splice.dentry != NULL) { + if (syscall->splice.dentry != NULL) { pass_to_userspace = approve_by_basename(syscall->splice.dentry, EVENT_SPLICE); } - if (!pass_to_userspace && (syscall->policy.flags & FLAGS) > 0) { + if (!pass_to_userspace) { pass_to_userspace = approve_splice_by_exit_flags(syscall); if (!pass_to_userspace) { pass_to_userspace = approve_splice_by_entry_flags(syscall); @@ -229,14 +217,12 @@ int __attribute__((always_inline)) utime_approvers(struct syscall_cache_t *sysca int __attribute__((always_inline)) bpf_approvers(struct syscall_cache_t *syscall) { int pass_to_userspace = 0; + u32 key = 0; - if ((syscall->policy.flags & FLAGS) > 0) { - u32 key = 0; - u64 *cmd_bitmask = bpf_map_lookup_elem(&bpf_cmd_approvers, &key); - if (cmd_bitmask != NULL && ((1 << syscall->bpf.cmd) & *cmd_bitmask) > 0) { - monitor_event_approved(syscall->type, FLAG_APPROVER_TYPE); - pass_to_userspace = 1; - } + u64 *cmd_bitmask = bpf_map_lookup_elem(&bpf_cmd_approvers, &key); + if (cmd_bitmask != NULL && ((1 << syscall->bpf.cmd) & *cmd_bitmask) > 0) { + monitor_event_approved(syscall->type, FLAG_APPROVER_TYPE); + pass_to_userspace = 1; } return pass_to_userspace; diff --git a/pkg/security/ebpf/c/include/helpers/discarders.h b/pkg/security/ebpf/c/include/helpers/discarders.h index 6db98d996cfa2..86bc5d3a1c96d 100644 --- a/pkg/security/ebpf/c/include/helpers/discarders.h +++ b/pkg/security/ebpf/c/include/helpers/discarders.h @@ -283,108 +283,4 @@ int __attribute__((always_inline)) expire_inode_discarders(u32 mount_id, u64 ino return 0; } -int __attribute__((always_inline)) expire_pid_discarder(u32 tgid); - -int __attribute__((always_inline)) discard_pid(u64 event_type, u32 tgid, u64 timeout) { - struct pid_discarder_t key = { - .tgid = tgid, - }; - - u64 now = bpf_ktime_get_ns(); - - u64 *discarder_timestamp; - u64 timestamp = timeout ? now + timeout : 0; - - u32 revision = get_discarders_revision(); - - struct pid_discarder_params_t *pid_params = bpf_map_lookup_elem(&pid_discarders, &key); - if (pid_params) { - if (!pid_params->params.is_retained && pid_params->params.revision != revision) { - return expire_pid_discarder(tgid); - } - - // either the discarder is not retained or its expiration period is already over - if (!pid_params->params.is_retained || pid_params->params.expire_at < now) { - pid_params->params.is_retained = 0; - - // the revision change, all the discarders are invalidated, - // we need to add only the current event type and to use the current revision - if (pid_params->params.revision != revision) { - pid_params->params.event_mask = 0; - pid_params->params.revision = revision; - } - add_event_to_mask(&pid_params->params.event_mask, event_type); - - if ((discarder_timestamp = get_discarder_timestamp(&pid_params->params, event_type)) != NULL) { - *discarder_timestamp = timestamp; - } - } - } else { - struct pid_discarder_params_t new_pid_params = { - .params.revision = revision, - }; - add_event_to_mask(&new_pid_params.params.event_mask, event_type); - - if ((discarder_timestamp = get_discarder_timestamp(&new_pid_params.params, event_type)) != NULL) { - *discarder_timestamp = timestamp; - } - bpf_map_update_elem(&pid_discarders, &key, &new_pid_params, BPF_NOEXIST); - } - - monitor_discarder_added(EVENT_ANY); - - return 0; -} - -int __attribute__((always_inline)) is_discarded_by_pid(u64 event_type, u32 tgid) { - struct pid_discarder_t key = { - .tgid = tgid, - }; - - struct pid_discarder_params_t *pid_params = (struct pid_discarder_params_t *)is_discarded(&pid_discarders, &key, event_type, bpf_ktime_get_ns()); - if (!pid_params) { - return NOT_DISCARDED; - } - - u32 revision = get_discarders_revision(); - if (pid_params->params.revision != revision) { - return NOT_DISCARDED; - } - - return DISCARDED; -} - -int __attribute__((always_inline)) is_discarded_by_process(const char mode, u64 event_type) { - u64 pid_tgid = bpf_get_current_pid_tgid(); - u32 tgid = pid_tgid >> 32; - - if (is_runtime_discarded() && is_runtime_request()) { - return 1; - } - - if (mode != NO_FILTER && is_discarded_by_pid(event_type, tgid)) { - return 1; - } - - return 0; -} - -int __attribute__((always_inline)) expire_pid_discarder(u32 tgid) { - if (!tgid) { - return 0; - } - - struct pid_discarder_t key = { - .tgid = tgid, - }; - - struct discarder_params_t *params = bpf_map_lookup_elem(&pid_discarders, &key); - if (params) { - params->is_retained = 1; - params->expire_at = bpf_ktime_get_ns() + get_discarder_retention(); - } - - return 0; -} - #endif diff --git a/pkg/security/ebpf/c/include/helpers/erpc.h b/pkg/security/ebpf/c/include/helpers/erpc.h index a06a8591c3fa0..773f00f5fb108 100644 --- a/pkg/security/ebpf/c/include/helpers/erpc.h +++ b/pkg/security/ebpf/c/include/helpers/erpc.h @@ -36,30 +36,6 @@ int __attribute__((always_inline)) handle_expire_inode_discarder(void *data) { return 0; } -int __attribute__((always_inline)) handle_discard_pid(void *data) { - if (!is_runtime_request()) { - return 0; - } - - struct discard_pid_t discarder; - bpf_probe_read(&discarder, sizeof(discarder), data); - - return discard_pid(discarder.req.event_type, discarder.pid, discarder.req.timeout); -} - -int __attribute__((always_inline)) handle_expire_pid_discarder(void *data) { - if (!is_runtime_request()) { - return 0; - } - - u32 pid; - bpf_probe_read(&pid, sizeof(pid), data); - - expire_pid_discarder(pid); - - return 0; -} - int __attribute__((always_inline)) handle_bump_discarders_revision(void *data) { if (!is_runtime_request()) { return 0; @@ -116,8 +92,6 @@ int __attribute__((always_inline)) handle_erpc_request(ctx_t *ctx) { switch (op) { case DISCARD_INODE_OP: return handle_discard_inode(data); - case DISCARD_PID_OP: - return handle_discard_pid(data); case RESOLVE_PATH_OP: return handle_dr_request(ctx, data, DR_ERPC_KEY); case USER_SESSION_CONTEXT_OP: @@ -126,8 +100,6 @@ int __attribute__((always_inline)) handle_erpc_request(ctx_t *ctx) { return handle_register_span_memory(data); case EXPIRE_INODE_DISCARDER_OP: return handle_expire_inode_discarder(data); - case EXPIRE_PID_DISCARDER_OP: - return handle_expire_pid_discarder(data); case BUMP_DISCARDERS_REVISION: return handle_bump_discarders_revision(data); #if USE_RING_BUFFER == 1 diff --git a/pkg/security/ebpf/c/include/hooks/bpf.h b/pkg/security/ebpf/c/include/hooks/bpf.h index 454b09d1af998..8b1c9f3bca35c 100644 --- a/pkg/security/ebpf/c/include/hooks/bpf.h +++ b/pkg/security/ebpf/c/include/hooks/bpf.h @@ -51,10 +51,6 @@ __attribute__((always_inline)) void send_bpf_event(void *ctx, struct syscall_cac HOOK_SYSCALL_ENTRY3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) { struct policy_t policy = fetch_policy(EVENT_BPF); - if (is_discarded_by_process(policy.mode, EVENT_BPF)) { - return 0; - } - struct syscall_cache_t syscall = { .policy = policy, .type = EVENT_BPF, diff --git a/pkg/security/ebpf/c/include/hooks/chdir.h b/pkg/security/ebpf/c/include/hooks/chdir.h index 3c198a1c94026..b424ec706ddd1 100644 --- a/pkg/security/ebpf/c/include/hooks/chdir.h +++ b/pkg/security/ebpf/c/include/hooks/chdir.h @@ -9,10 +9,6 @@ long __attribute__((always_inline)) trace__sys_chdir(const char *path) { struct policy_t policy = fetch_policy(EVENT_CHDIR); - if (is_discarded_by_process(policy.mode, EVENT_CHDIR)) { - return 0; - } - struct syscall_cache_t syscall = { .type = EVENT_CHDIR, .policy = policy, diff --git a/pkg/security/ebpf/c/include/hooks/chmod.h b/pkg/security/ebpf/c/include/hooks/chmod.h index eeac8e94be4d8..8afd2f1f663ab 100644 --- a/pkg/security/ebpf/c/include/hooks/chmod.h +++ b/pkg/security/ebpf/c/include/hooks/chmod.h @@ -8,10 +8,6 @@ int __attribute__((always_inline)) trace__sys_chmod(const char *path, umode_t mode) { struct policy_t policy = fetch_policy(EVENT_CHMOD); - if (is_discarded_by_process(policy.mode, EVENT_CHMOD)) { - return 0; - } - struct syscall_cache_t syscall = { .type = EVENT_CHMOD, .policy = policy, diff --git a/pkg/security/ebpf/c/include/hooks/chown.h b/pkg/security/ebpf/c/include/hooks/chown.h index 684882dd99fcc..248d366aef68c 100644 --- a/pkg/security/ebpf/c/include/hooks/chown.h +++ b/pkg/security/ebpf/c/include/hooks/chown.h @@ -8,10 +8,6 @@ int __attribute__((always_inline)) trace__sys_chown(const char *filename, uid_t user, gid_t group) { struct policy_t policy = fetch_policy(EVENT_CHOWN); - if (is_discarded_by_process(policy.mode, EVENT_CHOWN)) { - return 0; - } - struct syscall_cache_t syscall = { .type = EVENT_CHOWN, .policy = policy, diff --git a/pkg/security/ebpf/c/include/hooks/exec.h b/pkg/security/ebpf/c/include/hooks/exec.h index ba86779c03284..c7ff9374bc2a9 100644 --- a/pkg/security/ebpf/c/include/hooks/exec.h +++ b/pkg/security/ebpf/c/include/hooks/exec.h @@ -291,8 +291,6 @@ int hook_do_exit(ctx_t *ctx) { // only send the exit event if this is the thread group leader that isn't being killed by an execing thread if (tgid == pid && pid_tgid_execing == NULL) { - expire_pid_discarder(tgid); - // update exit time struct pid_cache_t *pid_entry = (struct pid_cache_t *)bpf_map_lookup_elem(&pid_cache, &tgid); if (pid_entry) { diff --git a/pkg/security/ebpf/c/include/hooks/link.h b/pkg/security/ebpf/c/include/hooks/link.h index d697f6a37bd40..51a4a18ee88b2 100644 --- a/pkg/security/ebpf/c/include/hooks/link.h +++ b/pkg/security/ebpf/c/include/hooks/link.h @@ -9,10 +9,6 @@ int __attribute__((always_inline)) trace__sys_link(u8 async, const char *oldpath, const char *newpath) { struct policy_t policy = fetch_policy(EVENT_LINK); - if (is_discarded_by_process(policy.mode, EVENT_LINK)) { - return 0; - } - struct syscall_cache_t syscall = { .type = EVENT_LINK, .policy = policy, diff --git a/pkg/security/ebpf/c/include/hooks/mkdir.h b/pkg/security/ebpf/c/include/hooks/mkdir.h index 506600db82ccd..0047972df52df 100644 --- a/pkg/security/ebpf/c/include/hooks/mkdir.h +++ b/pkg/security/ebpf/c/include/hooks/mkdir.h @@ -9,10 +9,6 @@ long __attribute__((always_inline)) trace__sys_mkdir(u8 async, umode_t mode) { struct policy_t policy = fetch_policy(EVENT_MKDIR); - if (is_discarded_by_process(policy.mode, EVENT_MKDIR)) { - return 0; - } - struct syscall_cache_t syscall = { .type = EVENT_MKDIR, .policy = policy, diff --git a/pkg/security/ebpf/c/include/hooks/mmap.h b/pkg/security/ebpf/c/include/hooks/mmap.h index 0b3fb632a543d..45392650a94ca 100644 --- a/pkg/security/ebpf/c/include/hooks/mmap.h +++ b/pkg/security/ebpf/c/include/hooks/mmap.h @@ -14,10 +14,6 @@ int hook_vm_mmap_pgoff(ctx_t *ctx) { u64 flags = CTX_PARM5(ctx); struct policy_t policy = fetch_policy(EVENT_MMAP); - if (is_discarded_by_process(policy.mode, EVENT_MMAP)) { - return 0; - } - struct syscall_cache_t syscall = { .type = EVENT_MMAP, .policy = policy, diff --git a/pkg/security/ebpf/c/include/hooks/module.h b/pkg/security/ebpf/c/include/hooks/module.h index 6e0e38af8ae23..905301cbeebf2 100644 --- a/pkg/security/ebpf/c/include/hooks/module.h +++ b/pkg/security/ebpf/c/include/hooks/module.h @@ -6,11 +6,6 @@ #include "helpers/syscalls.h" int __attribute__((always_inline)) trace_init_module(u32 loaded_from_memory, const char *uargs) { - struct policy_t policy = fetch_policy(EVENT_INIT_MODULE); - if (is_discarded_by_process(policy.mode, EVENT_INIT_MODULE)) { - return 0; - } - struct syscall_cache_t syscall = { .type = EVENT_INIT_MODULE, .init_module = { @@ -161,11 +156,6 @@ HOOK_SYSCALL_EXIT(finit_module) { } HOOK_SYSCALL_ENTRY1(delete_module, const char *, name_user) { - struct policy_t policy = fetch_policy(EVENT_DELETE_MODULE); - if (is_discarded_by_process(policy.mode, EVENT_DELETE_MODULE)) { - return 0; - } - struct syscall_cache_t syscall = { .type = EVENT_DELETE_MODULE, .delete_module = { diff --git a/pkg/security/ebpf/c/include/hooks/mprotect.h b/pkg/security/ebpf/c/include/hooks/mprotect.h index f93e0189ef503..0a6107c12c13b 100644 --- a/pkg/security/ebpf/c/include/hooks/mprotect.h +++ b/pkg/security/ebpf/c/include/hooks/mprotect.h @@ -8,10 +8,6 @@ HOOK_SYSCALL_ENTRY0(mprotect) { struct policy_t policy = fetch_policy(EVENT_MPROTECT); - if (is_discarded_by_process(policy.mode, EVENT_MPROTECT)) { - return 0; - } - struct syscall_cache_t syscall = { .type = EVENT_MPROTECT, .policy = policy, diff --git a/pkg/security/ebpf/c/include/hooks/network/bind.h b/pkg/security/ebpf/c/include/hooks/network/bind.h index 5200ab731a7a0..1a84e3bc7feae 100644 --- a/pkg/security/ebpf/c/include/hooks/network/bind.h +++ b/pkg/security/ebpf/c/include/hooks/network/bind.h @@ -11,12 +11,6 @@ HOOK_SYSCALL_ENTRY3(bind, int, socket, struct sockaddr *, addr, unsigned int, ad return 0; } - struct policy_t policy = fetch_policy(EVENT_BIND); - if (is_discarded_by_process(policy.mode, EVENT_BIND)) { - return 0; - } - - /* cache the bind and wait to grab the retval to send it */ struct syscall_cache_t syscall = { .type = EVENT_BIND, }; diff --git a/pkg/security/ebpf/c/include/hooks/open.h b/pkg/security/ebpf/c/include/hooks/open.h index abc2981d5900f..3d48368979c93 100644 --- a/pkg/security/ebpf/c/include/hooks/open.h +++ b/pkg/security/ebpf/c/include/hooks/open.h @@ -12,10 +12,6 @@ int __attribute__((always_inline)) trace__sys_openat2(const char *path, u8 async, int flags, umode_t mode, u64 pid_tgid) { struct policy_t policy = fetch_policy(EVENT_OPEN); - if (is_discarded_by_process(policy.mode, EVENT_OPEN)) { - return 0; - } - struct syscall_cache_t syscall = { .type = EVENT_OPEN, .policy = policy, diff --git a/pkg/security/ebpf/c/include/hooks/ptrace.h b/pkg/security/ebpf/c/include/hooks/ptrace.h index bb5d8015dd77b..358966d12c2ac 100644 --- a/pkg/security/ebpf/c/include/hooks/ptrace.h +++ b/pkg/security/ebpf/c/include/hooks/ptrace.h @@ -6,11 +6,6 @@ #include "helpers/syscalls.h" HOOK_SYSCALL_ENTRY3(ptrace, u32, request, pid_t, pid, void *, addr) { - struct policy_t policy = fetch_policy(EVENT_PTRACE); - if (is_discarded_by_process(policy.mode, EVENT_PTRACE)) { - return 0; - } - struct syscall_cache_t syscall = { .type = EVENT_PTRACE, .ptrace = { diff --git a/pkg/security/ebpf/c/include/hooks/rename.h b/pkg/security/ebpf/c/include/hooks/rename.h index 41f29c4380da1..4cef2525162b7 100644 --- a/pkg/security/ebpf/c/include/hooks/rename.h +++ b/pkg/security/ebpf/c/include/hooks/rename.h @@ -94,11 +94,6 @@ int hook_vfs_rename(ctx_t *ctx) { return mark_as_discarded(syscall); } - // If we are discarded, we still want to invalidate the inode - if (is_discarded_by_process(syscall->policy.mode, EVENT_RENAME)) { - return mark_as_discarded(syscall); - } - // the mount id of path_key is resolved by kprobe/mnt_want_write. It is already set by the time we reach this probe. syscall->resolver.dentry = syscall->rename.src_dentry; syscall->resolver.key = syscall->rename.src_file.path_key; diff --git a/pkg/security/ebpf/c/include/hooks/rmdir.h b/pkg/security/ebpf/c/include/hooks/rmdir.h index e19c79cda00bb..71fc46ec73f05 100644 --- a/pkg/security/ebpf/c/include/hooks/rmdir.h +++ b/pkg/security/ebpf/c/include/hooks/rmdir.h @@ -87,10 +87,6 @@ int hook_security_inode_rmdir(ctx_t *ctx) { return 0; } - if (is_discarded_by_process(syscall->policy.mode, syscall->type)) { - return mark_as_discarded(syscall); - } - if (dentry != NULL) { syscall->resolver.key = key; syscall->resolver.dentry = dentry; diff --git a/pkg/security/ebpf/c/include/hooks/setxattr.h b/pkg/security/ebpf/c/include/hooks/setxattr.h index 06e29e4866566..f97a33e160f3b 100644 --- a/pkg/security/ebpf/c/include/hooks/setxattr.h +++ b/pkg/security/ebpf/c/include/hooks/setxattr.h @@ -8,10 +8,6 @@ int __attribute__((always_inline)) trace__sys_setxattr(const char *xattr_name) { struct policy_t policy = fetch_policy(EVENT_SETXATTR); - if (is_discarded_by_process(policy.mode, EVENT_SETXATTR)) { - return 0; - } - struct syscall_cache_t syscall = { .type = EVENT_SETXATTR, .policy = policy, @@ -39,10 +35,6 @@ HOOK_SYSCALL_ENTRY2(fsetxattr, int, fd, const char *, name) { int __attribute__((always_inline)) trace__sys_removexattr(const char *xattr_name) { struct policy_t policy = fetch_policy(EVENT_REMOVEXATTR); - if (is_discarded_by_process(policy.mode, EVENT_REMOVEXATTR)) { - return 0; - } - struct syscall_cache_t syscall = { .type = EVENT_REMOVEXATTR, .policy = policy, diff --git a/pkg/security/ebpf/c/include/hooks/signal.h b/pkg/security/ebpf/c/include/hooks/signal.h index 9c8e417a44072..8b6d11fb80206 100644 --- a/pkg/security/ebpf/c/include/hooks/signal.h +++ b/pkg/security/ebpf/c/include/hooks/signal.h @@ -6,17 +6,11 @@ #include "helpers/syscalls.h" HOOK_SYSCALL_ENTRY2(kill, int, pid, int, type) { - struct policy_t policy = fetch_policy(EVENT_SIGNAL); - if (is_discarded_by_process(policy.mode, EVENT_SIGNAL)) { - return 0; - } - /* TODO: implement the event for pid equal to 0 or -1. */ if (pid < 1) { return 0; } - /* cache the signal and wait to grab the retval to send it */ struct syscall_cache_t syscall = { .type = EVENT_SIGNAL, .signal = { diff --git a/pkg/security/ebpf/c/include/hooks/splice.h b/pkg/security/ebpf/c/include/hooks/splice.h index 4f168ee098b24..7dff0490400e7 100644 --- a/pkg/security/ebpf/c/include/hooks/splice.h +++ b/pkg/security/ebpf/c/include/hooks/splice.h @@ -10,10 +10,6 @@ HOOK_SYSCALL_ENTRY0(splice) { struct policy_t policy = fetch_policy(EVENT_SPLICE); - if (is_discarded_by_process(policy.mode, EVENT_SPLICE)) { - return 0; - } - struct syscall_cache_t syscall = { .type = EVENT_SPLICE, .policy = policy, diff --git a/pkg/security/ebpf/c/include/hooks/unlink.h b/pkg/security/ebpf/c/include/hooks/unlink.h index 24c6546d0ec14..55f97e367494a 100644 --- a/pkg/security/ebpf/c/include/hooks/unlink.h +++ b/pkg/security/ebpf/c/include/hooks/unlink.h @@ -73,10 +73,6 @@ int hook_vfs_unlink(ctx_t *ctx) { return mark_as_discarded(syscall); } - if (is_discarded_by_process(syscall->policy.mode, EVENT_UNLINK)) { - return mark_as_discarded(syscall); - } - // the mount id of path_key is resolved by kprobe/mnt_want_write. It is already set by the time we reach this probe. syscall->resolver.dentry = dentry; syscall->resolver.key = syscall->unlink.file.path_key; diff --git a/pkg/security/ebpf/c/include/hooks/utimes.h b/pkg/security/ebpf/c/include/hooks/utimes.h index 482679ab114e2..88bdc8d44c8ae 100644 --- a/pkg/security/ebpf/c/include/hooks/utimes.h +++ b/pkg/security/ebpf/c/include/hooks/utimes.h @@ -7,10 +7,6 @@ int __attribute__((always_inline)) trace__sys_utimes(const char *filename) { struct policy_t policy = fetch_policy(EVENT_UTIME); - if (is_discarded_by_process(policy.mode, EVENT_UTIME)) { - return 0; - } - struct syscall_cache_t syscall = { .type = EVENT_UTIME, .policy = policy, diff --git a/pkg/security/ebpf/c/include/maps.h b/pkg/security/ebpf/c/include/maps.h index 880f20d6e5866..d472b54d64627 100644 --- a/pkg/security/ebpf/c/include/maps.h +++ b/pkg/security/ebpf/c/include/maps.h @@ -52,8 +52,6 @@ BPF_LRU_MAP(exec_pid_transfer, u32, u64, 512) BPF_LRU_MAP(netns_cache, u32, u32, 40960) BPF_LRU_MAP(span_tls, u32, struct span_tls_t, 4096) BPF_LRU_MAP(inode_discarders, struct inode_discarder_t, struct inode_discarder_params_t, 4096) -BPF_LRU_MAP(pid_discarders, u32, struct pid_discarder_params_t, 512) -BPF_LRU_MAP(pathnames, struct path_key_t, struct path_leaf_t, 1) // edited BPF_LRU_MAP(flow_pid, struct pid_route_t, u32, 10240) BPF_LRU_MAP(conntrack, struct namespaced_flow_t, struct namespaced_flow_t, 4096) BPF_LRU_MAP(io_uring_ctx_pid, void *, u64, 2048) @@ -68,6 +66,7 @@ BPF_LRU_MAP(user_sessions, struct user_session_key_t, struct user_session_t, 102 BPF_LRU_MAP_FLAGS(tasks_in_coredump, u64, u8, 64, BPF_F_NO_COMMON_LRU) BPF_LRU_MAP_FLAGS(syscalls, u64, struct syscall_cache_t, 1, BPF_F_NO_COMMON_LRU) // max entries will be overridden at runtime +BPF_LRU_MAP_FLAGS(pathnames, struct path_key_t, struct path_leaf_t, 1, BPF_F_NO_COMMON_LRU) // edited BPF_PERCPU_ARRAY_MAP(dr_erpc_state, struct dr_erpc_state_t, 1) BPF_PERCPU_ARRAY_MAP(cgroup_tracing_event_gen, struct cgroup_tracing_event_t, EVENT_GEN_SIZE) diff --git a/pkg/security/ebpf/c/include/structs/filter.h b/pkg/security/ebpf/c/include/structs/filter.h index 2c43b13b11580..724331aa84841 100644 --- a/pkg/security/ebpf/c/include/structs/filter.h +++ b/pkg/security/ebpf/c/include/structs/filter.h @@ -7,7 +7,6 @@ struct policy_t { char mode; - char flags; }; // Approvers @@ -45,14 +44,6 @@ struct inode_discarder_params_t { u32 mount_revision; }; -struct pid_discarder_params_t { - struct discarder_params_t params; -}; - -struct pid_discarder_t { - u32 tgid; -}; - struct inode_discarder_t { struct path_key_t path_key; u32 is_leaf; diff --git a/pkg/security/ebpf/probes/all.go b/pkg/security/ebpf/probes/all.go index 4f5bcdf92e175..931e0a5957f38 100644 --- a/pkg/security/ebpf/probes/all.go +++ b/pkg/security/ebpf/probes/all.go @@ -109,7 +109,6 @@ func AllMaps() []*manager.Map { // Filters {Name: "filter_policy"}, {Name: "inode_discarders"}, - {Name: "pid_discarders"}, {Name: "inode_disc_revisions"}, {Name: "basename_approvers"}, // Dentry resolver table diff --git a/pkg/security/probe/constantfetch/btfhub/constants.json b/pkg/security/probe/constantfetch/btfhub/constants.json index 7ef404ea26e4f..6bd99c787014d 100644 --- a/pkg/security/probe/constantfetch/btfhub/constants.json +++ b/pkg/security/probe/constantfetch/btfhub/constants.json @@ -18204,6 +18204,13 @@ "uname_release": "4.14.35-2047.536.5.el7uek.aarch64", "cindex": 89 }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.537.4.1.el7uek.aarch64", + "cindex": 89 + }, { "distrib": "ol", "version": "7", @@ -18211,6 +18218,27 @@ "uname_release": "4.14.35-2047.537.4.el7uek.aarch64", "cindex": 89 }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.538.5.1.el7uek.aarch64", + "cindex": 89 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.538.5.el7uek.aarch64", + "cindex": 89 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.539.5.el7uek.aarch64", + "cindex": 89 + }, { "distrib": "ol", "version": "7", @@ -20752,6 +20780,27 @@ "uname_release": "4.1.12-124.86.1.el7uek.x86_64", "cindex": 94 }, + { + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.1.12-124.87.2.2.el7uek.x86_64", + "cindex": 94 + }, + { + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.1.12-124.87.2.el7uek.x86_64", + "cindex": 94 + }, + { + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.1.12-124.88.3.el7uek.x86_64", + "cindex": 94 + }, { "distrib": "ol", "version": "7", @@ -23489,6 +23538,13 @@ "uname_release": "4.14.35-2047.537.3.el7uek.x86_64", "cindex": 96 }, + { + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.537.4.1.el7uek.x86_64", + "cindex": 96 + }, { "distrib": "ol", "version": "7", @@ -23524,6 +23580,83 @@ "uname_release": "4.14.35-2047.538.4.el7uek.x86_64", "cindex": 96 }, + { + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.538.5.1.el7uek.x86_64", + "cindex": 96 + }, + { + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.538.5.el7uek.x86_64", + "cindex": 96 + }, + { + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.539.1.el7uek.x86_64", + "cindex": 96 + }, + { + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.539.2.el7uek.x86_64", + "cindex": 96 + }, + { + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.539.3.el7uek.x86_64", + "cindex": 96 + }, + { + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.539.4.el7uek.x86_64", + "cindex": 96 + }, + { + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.539.5.el7uek.x86_64", + "cindex": 96 + }, + { + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.540.1.el7uek.x86_64", + "cindex": 96 + }, + { + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.540.2.el7uek.x86_64", + "cindex": 96 + }, + { + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.540.3.el7uek.x86_64", + "cindex": 96 + }, + { + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.540.4.el7uek.x86_64", + "cindex": 96 + }, { "distrib": "ol", "version": "7", diff --git a/pkg/security/probe/discarders_linux.go b/pkg/security/probe/discarders_linux.go index e3b867ea3d04a..8adc92a9737a5 100644 --- a/pkg/security/probe/discarders_linux.go +++ b/pkg/security/probe/discarders_linux.go @@ -96,25 +96,6 @@ func marshalDiscardHeader(req *erpc.Request, eventType model.EventType, timeout return 16 } -//nolint:deadcode,unused -type pidDiscarders struct { - erpc *erpc.ERPC -} - -//nolint:deadcode,unused -func (p *pidDiscarders) discardWithTimeout(req *erpc.Request, eventType model.EventType, pid uint32, timeout int64) error { - req.OP = erpc.DiscardPidOp - offset := marshalDiscardHeader(req, eventType, uint64(timeout)) - binary.NativeEndian.PutUint32(req.Data[offset:offset+4], pid) - - return p.erpc.Request(req) -} - -//nolint:deadcode,unused -func newPidDiscarders(erpc *erpc.ERPC) *pidDiscarders { - return &pidDiscarders{erpc: erpc} -} - // InodeDiscarderMapEntry describes a map entry type InodeDiscarderMapEntry struct { PathKey model.PathKey @@ -135,11 +116,6 @@ type InodeDiscarderParams struct { Revision uint32 } -// PidDiscarderParams describes a map value -type PidDiscarderParams struct { - DiscarderParams `yaml:"params"` -} - // DiscarderParams describes a map value type DiscarderParams struct { EventMask uint64 `yaml:"event_mask"` @@ -519,12 +495,6 @@ func createInvalidDiscardersCache() map[invalidDiscarderEntry]bool { return invalidDiscarders } -// PidDiscarderDump describes a dump of a pid discarder -type PidDiscarderDump struct { - Index int `yaml:"index"` - PidDiscarderParams `yaml:"value"` -} - // InodeDiscarderDump describes a dump of an inode discarder type InodeDiscarderDump struct { Index int `yaml:"index"` @@ -538,41 +508,9 @@ type InodeDiscarderDump struct { type DiscardersDump struct { Date time.Time `yaml:"date"` Inodes []InodeDiscarderDump `yaml:"inodes"` - Pids []PidDiscarderDump `yaml:"pids"` Stats map[string]discarder.Stats `yaml:"stats"` } -func dumpPidDiscarders(pidMap *ebpf.Map) ([]PidDiscarderDump, error) { - var dumps []PidDiscarderDump - - info, err := pidMap.Info() - if err != nil { - return nil, fmt.Errorf("could not get info about pid discarders: %w", err) - } - - var ( - count int - pid uint32 - pidParams PidDiscarderParams - ) - - for entries := pidMap.Iterate(); entries.Next(&pid, &pidParams); { - record := PidDiscarderDump{ - Index: count, - PidDiscarderParams: pidParams, - } - - dumps = append(dumps, record) - - count++ - if count == int(info.MaxEntries) { - break - } - } - - return dumps, nil -} - func dumpInodeDiscarders(resolver *dentry.Resolver, inodeMap *ebpf.Map) ([]InodeDiscarderDump, error) { var dumps []InodeDiscarderDump @@ -646,19 +584,13 @@ func dumpDiscarderStats(buffers ...*ebpf.Map) (map[string]discarder.Stats, error } // DumpDiscarders removes all the discarders -func dumpDiscarders(resolver *dentry.Resolver, pidMap, inodeMap, statsFB, statsBB *ebpf.Map) (DiscardersDump, error) { +func dumpDiscarders(resolver *dentry.Resolver, inodeMap, statsFB, statsBB *ebpf.Map) (DiscardersDump, error) { seclog.Debugf("Dumping discarders") dump := DiscardersDump{ Date: time.Now(), } - pids, err := dumpPidDiscarders(pidMap) - if err != nil { - return dump, err - } - dump.Pids = pids - inodes, err := dumpInodeDiscarders(resolver, inodeMap) if err != nil { return dump, err diff --git a/pkg/security/probe/erpc/erpc.go b/pkg/security/probe/erpc/erpc.go index f7fc15b2b301c..9a7e71052becd 100644 --- a/pkg/security/probe/erpc/erpc.go +++ b/pkg/security/probe/erpc/erpc.go @@ -25,7 +25,7 @@ const ( const ( // DiscardInodeOp discards an inode DiscardInodeOp = iota + 1 - // DiscardPidOp discards a pid + // DiscardPidOp discards a pid (DEPRECATED) DiscardPidOp // ResolveSegmentOp resolves the requested segment (DEPRECATED) ResolveSegmentOp @@ -39,7 +39,7 @@ const ( ExpireInodeDiscarderOp // ExpirePidDiscarderOp is used to expire a pid discarder ExpirePidDiscarderOp - // BumpDiscardersRevision is used to bump the discarders revision + // BumpDiscardersRevision is used to bump the discarders revision (DEPRECATED) BumpDiscardersRevision // GetRingbufUsage is used to retrieve the ring buffer usage GetRingbufUsage diff --git a/pkg/security/probe/kfilters/approvers.go b/pkg/security/probe/kfilters/approvers.go index 7c273d2b7acd0..4da7b1cfb93a1 100644 --- a/pkg/security/probe/kfilters/approvers.go +++ b/pkg/security/probe/kfilters/approvers.go @@ -26,9 +26,8 @@ type kfiltersGetter func(approvers rules.Approvers) (ActiveKFilters, error) var KFilterGetters = make(map[eval.EventType]kfiltersGetter) func newBasenameKFilter(tableName string, eventType model.EventType, basename string) (activeKFilter, error) { - return &mapEventMask{ + return &eventMaskEntry{ tableName: tableName, - key: basename, tableKey: ebpf.NewStringMapItem(basename, BasenameFilterSize), eventMask: uint64(1 << (eventType - 1)), }, nil diff --git a/pkg/security/probe/kfilters/approvers_test.go b/pkg/security/probe/kfilters/approvers_test.go index a7c65e48c2731..b0d867f88f14b 100644 --- a/pkg/security/probe/kfilters/approvers_test.go +++ b/pkg/security/probe/kfilters/approvers_test.go @@ -33,7 +33,7 @@ func TestApproverAncestors1(t *testing.T) { t.Fatal("no capabilities for open") } - approvers, err := rs.GetEventTypeApprovers("open", capabilities.GetFieldCapabilities()) + approvers, err := rs.GetEventTypeApprovers("open", capabilities) if err != nil { t.Fatal(err) } @@ -54,7 +54,7 @@ func TestApproverAncestors2(t *testing.T) { if !exists { t.Fatal("no capabilities for open") } - approvers, err := rs.GetEventTypeApprovers("open", capabilities.GetFieldCapabilities()) + approvers, err := rs.GetEventTypeApprovers("open", capabilities) if err != nil { t.Fatal(err) } @@ -74,7 +74,7 @@ func TestApproverGlob(t *testing.T) { if !exists { t.Fatal("no capabilities for open") } - approvers, err := rs.GetEventTypeApprovers("open", capabilities.GetFieldCapabilities()) + approvers, err := rs.GetEventTypeApprovers("open", capabilities) if err != nil { t.Fatal(err) } diff --git a/pkg/security/probe/kfilters/bpf.go b/pkg/security/probe/kfilters/bpf.go index d4bc85cabc1dc..a157e9c99c7fa 100644 --- a/pkg/security/probe/kfilters/bpf.go +++ b/pkg/security/probe/kfilters/bpf.go @@ -15,9 +15,10 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/secl/rules" ) -var bpfCapabilities = Capabilities{ - "bpf.cmd": { - ValueTypeBitmask: eval.ScalarValueType | eval.BitmaskValueType, +var bpfCapabilities = rules.FieldCapabilities{ + { + Field: "bpf.cmd", + TypeBitmask: eval.ScalarValueType | eval.BitmaskValueType, }, } diff --git a/pkg/security/probe/kfilters/capabilities.go b/pkg/security/probe/kfilters/capabilities.go index d64a5e19c08a6..b7a388a446ff8 100644 --- a/pkg/security/probe/kfilters/capabilities.go +++ b/pkg/security/probe/kfilters/capabilities.go @@ -12,39 +12,9 @@ import ( ) // allCapabilities hold all the supported filtering capabilities -var allCapabilities = make(map[eval.EventType]Capabilities) - -// Capability represents the type of values we are able to filter kernel side -type Capability struct { - ValueTypeBitmask eval.FieldValueType - ValidateFnc func(value rules.FilterValue) bool - FilterWeight int -} - -// Capabilities represents the filtering capabilities for a set of fields -type Capabilities map[eval.Field]Capability - -// GetFieldCapabilities returns the field capabilities for a set of capabilities -func (caps Capabilities) GetFieldCapabilities() rules.FieldCapabilities { - var fcs rules.FieldCapabilities - - for field, cap := range caps { - fcs = append(fcs, rules.FieldCapability{ - Field: field, - TypeBitmask: cap.ValueTypeBitmask, - ValidateFnc: cap.ValidateFnc, - FilterWeight: cap.FilterWeight, - }) - } - - return fcs -} +var allCapabilities = make(map[eval.EventType]rules.FieldCapabilities) // GetCapababilities returns all the filtering capabilities func GetCapababilities() map[eval.EventType]rules.FieldCapabilities { - capabilities := make(map[eval.EventType]rules.FieldCapabilities) - for eventType, eventCapabilities := range allCapabilities { - capabilities[eventType] = eventCapabilities.GetFieldCapabilities() - } - return capabilities + return allCapabilities } diff --git a/pkg/security/probe/kfilters/capabilities_linux.go b/pkg/security/probe/kfilters/capabilities_linux.go index 6fc6d294193da..187273f6066ce 100644 --- a/pkg/security/probe/kfilters/capabilities_linux.go +++ b/pkg/security/probe/kfilters/capabilities_linux.go @@ -29,33 +29,39 @@ func validateBasenameFilter(value rules.FilterValue) bool { return false } -func oneBasenameCapabilities(event string) Capabilities { - return Capabilities{ - event + ".file.path": { - ValueTypeBitmask: eval.ScalarValueType | eval.PatternValueType | eval.GlobValueType, - ValidateFnc: validateBasenameFilter, +func oneBasenameCapabilities(event string) rules.FieldCapabilities { + return rules.FieldCapabilities{ + { + Field: event + ".file.path", + TypeBitmask: eval.ScalarValueType | eval.PatternValueType | eval.GlobValueType, + ValidateFnc: validateBasenameFilter, }, - event + ".file.name": { - ValueTypeBitmask: eval.ScalarValueType, + { + Field: event + ".file.name", + TypeBitmask: eval.ScalarValueType, }, } } -func twoBasenameCapabilities(event string, field1, field2 string) Capabilities { - return Capabilities{ - event + "." + field1 + ".path": { - ValueTypeBitmask: eval.ScalarValueType | eval.GlobValueType, - ValidateFnc: validateBasenameFilter, +func twoBasenameCapabilities(event string, field1, field2 string) rules.FieldCapabilities { + return rules.FieldCapabilities{ + { + Field: event + "." + field1 + ".path", + TypeBitmask: eval.ScalarValueType | eval.GlobValueType, + ValidateFnc: validateBasenameFilter, }, - event + "." + field1 + ".name": { - ValueTypeBitmask: eval.ScalarValueType, + { + Field: event + "." + field1 + ".name", + TypeBitmask: eval.ScalarValueType, }, - event + "." + field2 + ".path": { - ValueTypeBitmask: eval.ScalarValueType | eval.GlobValueType, - ValidateFnc: validateBasenameFilter, + { + Field: event + "." + field2 + ".path", + TypeBitmask: eval.ScalarValueType | eval.GlobValueType, + ValidateFnc: validateBasenameFilter, }, - event + "." + field2 + ".name": { - ValueTypeBitmask: eval.ScalarValueType, + { + Field: event + "." + field2 + ".name", + TypeBitmask: eval.ScalarValueType, }, } } diff --git a/pkg/security/probe/kfilters/capabilities_windows.go b/pkg/security/probe/kfilters/capabilities_windows.go index 7d1fcf91e555b..1a6bb057412c8 100644 --- a/pkg/security/probe/kfilters/capabilities_windows.go +++ b/pkg/security/probe/kfilters/capabilities_windows.go @@ -7,27 +7,32 @@ package kfilters import ( "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" + "github.com/DataDog/datadog-agent/pkg/security/secl/rules" ) func init() { - allCapabilities["create"] = Capabilities{ - "create.file.name": { - ValueTypeBitmask: eval.ScalarValueType | eval.PatternValueType, + allCapabilities["create"] = rules.FieldCapabilities{ + { + Field: "create.file.name", + TypeBitmask: eval.ScalarValueType | eval.PatternValueType, }, } - allCapabilities["rename"] = Capabilities{ - "rename.file.name": { - ValueTypeBitmask: eval.ScalarValueType | eval.PatternValueType, + allCapabilities["rename"] = rules.FieldCapabilities{ + { + Field: "rename.file.name", + TypeBitmask: eval.ScalarValueType | eval.PatternValueType, }, } - allCapabilities["delete"] = Capabilities{ - "delete.file.name": { - ValueTypeBitmask: eval.ScalarValueType | eval.PatternValueType, + allCapabilities["delete"] = rules.FieldCapabilities{ + { + Field: "delete.file.name", + TypeBitmask: eval.ScalarValueType | eval.PatternValueType, }, } - allCapabilities["write"] = Capabilities{ - "write.file.name": { - ValueTypeBitmask: eval.ScalarValueType | eval.PatternValueType, + allCapabilities["write"] = rules.FieldCapabilities{ + { + Field: "write.file.name", + TypeBitmask: eval.ScalarValueType | eval.PatternValueType, }, } } diff --git a/pkg/security/probe/kfilters/kfilters.go b/pkg/security/probe/kfilters/kfilters.go index 146c122ed0e3a..596d0fbd06772 100644 --- a/pkg/security/probe/kfilters/kfilters.go +++ b/pkg/security/probe/kfilters/kfilters.go @@ -9,11 +9,10 @@ package kfilters // FilterPolicy describes a filtering policy type FilterPolicy struct { - Mode PolicyMode - Flags PolicyFlag + Mode PolicyMode } // Bytes returns the binary representation of a FilterPolicy func (f *FilterPolicy) Bytes() ([]byte, error) { - return []byte{uint8(f.Mode), uint8(f.Flags)}, nil + return []byte{uint8(f.Mode)}, nil } diff --git a/pkg/security/probe/kfilters/kfilters_bpf.go b/pkg/security/probe/kfilters/kfilters_bpf.go index c58f04f5de1b1..da8a0c4251c3b 100644 --- a/pkg/security/probe/kfilters/kfilters_bpf.go +++ b/pkg/security/probe/kfilters/kfilters_bpf.go @@ -9,6 +9,9 @@ package kfilters import ( + "encoding" + "encoding/hex" + "github.com/DataDog/datadog-agent/pkg/security/probe/managerhelper" manager "github.com/DataDog/ebpf-manager" ) @@ -58,7 +61,7 @@ func (ak ActiveKFilters) Remove(a activeKFilter) { delete(ak, a.Key()) } -type mapHash struct { +type entryKey struct { tableName string key interface{} } @@ -71,7 +74,7 @@ type arrayEntry struct { } func (e *arrayEntry) Key() interface{} { - return mapHash{ + return entryKey{ tableName: e.tableName, key: e.index, } @@ -97,25 +100,34 @@ func (e *arrayEntry) Apply(manager *manager.Manager) error { return table.Put(e.index, e.value) } -type mapEventMask struct { +type eventMaskEntry struct { tableName string tableKey interface{} - key interface{} eventMask uint64 } -func (e *mapEventMask) Key() interface{} { - return mapHash{ +func (e *eventMaskEntry) Key() interface{} { + mb, ok := e.tableKey.(encoding.BinaryMarshaler) + if !ok { + return entryKey{ + tableName: e.tableName, + key: e.tableKey, + } + } + + data, _ := mb.MarshalBinary() + + return entryKey{ tableName: e.tableName, - key: e.key, + key: hex.EncodeToString(data), } } -func (e *mapEventMask) GetTableName() string { +func (e *eventMaskEntry) GetTableName() string { return e.tableName } -func (e *mapEventMask) Remove(manager *manager.Manager) error { +func (e *eventMaskEntry) Remove(manager *manager.Manager) error { table, err := managerhelper.Map(manager, e.tableName) if err != nil { return err @@ -130,7 +142,7 @@ func (e *mapEventMask) Remove(manager *manager.Manager) error { return table.Put(e.tableKey, eventMask) } -func (e *mapEventMask) Apply(manager *manager.Manager) error { +func (e *eventMaskEntry) Apply(manager *manager.Manager) error { table, err := managerhelper.Map(manager, e.tableName) if err != nil { return err diff --git a/pkg/security/probe/kfilters/mmap.go b/pkg/security/probe/kfilters/mmap.go index 1e6a39062b614..5292379e4c103 100644 --- a/pkg/security/probe/kfilters/mmap.go +++ b/pkg/security/probe/kfilters/mmap.go @@ -16,19 +16,23 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/secl/rules" ) -var mmapCapabilities = Capabilities{ - "mmap.file.path": { - ValueTypeBitmask: eval.ScalarValueType | eval.PatternValueType, - ValidateFnc: validateBasenameFilter, +var mmapCapabilities = rules.FieldCapabilities{ + { + Field: "mmap.file.path", + TypeBitmask: eval.ScalarValueType | eval.PatternValueType, + ValidateFnc: validateBasenameFilter, }, - "mmap.file.name": { - ValueTypeBitmask: eval.ScalarValueType, + { + Field: "mmap.file.name", + TypeBitmask: eval.ScalarValueType, }, - "mmap.protection": { - ValueTypeBitmask: eval.ScalarValueType | eval.BitmaskValueType, + { + Field: "mmap.protection", + TypeBitmask: eval.ScalarValueType | eval.BitmaskValueType, }, - "mmap.flags": { - ValueTypeBitmask: eval.ScalarValueType | eval.BitmaskValueType, + { + Field: "mmap.flags", + TypeBitmask: eval.ScalarValueType | eval.BitmaskValueType, }, } diff --git a/pkg/security/probe/kfilters/mprotect.go b/pkg/security/probe/kfilters/mprotect.go index 2049ae4a6b074..88b961dd048ee 100644 --- a/pkg/security/probe/kfilters/mprotect.go +++ b/pkg/security/probe/kfilters/mprotect.go @@ -15,12 +15,14 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/secl/rules" ) -var mprotectCapabilities = Capabilities{ - "mprotect.req_protection": { - ValueTypeBitmask: eval.ScalarValueType | eval.BitmaskValueType, +var mprotectCapabilities = rules.FieldCapabilities{ + { + Field: "mprotect.req_protection", + TypeBitmask: eval.ScalarValueType | eval.BitmaskValueType, }, - "mprotect.vm_protection": { - ValueTypeBitmask: eval.ScalarValueType | eval.BitmaskValueType, + { + Field: "mprotect.vm_protection", + TypeBitmask: eval.ScalarValueType | eval.BitmaskValueType, }, } diff --git a/pkg/security/probe/kfilters/open.go b/pkg/security/probe/kfilters/open.go index 7b7c269ad312b..c0d14c8bbdac1 100644 --- a/pkg/security/probe/kfilters/open.go +++ b/pkg/security/probe/kfilters/open.go @@ -16,18 +16,21 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/secl/rules" ) -var openCapabilities = Capabilities{ - "open.file.path": { - ValueTypeBitmask: eval.ScalarValueType | eval.PatternValueType | eval.GlobValueType, - ValidateFnc: validateBasenameFilter, - FilterWeight: 15, +var openCapabilities = rules.FieldCapabilities{ + { + Field: "open.flags", + TypeBitmask: eval.ScalarValueType | eval.BitmaskValueType, }, - "open.file.name": { - ValueTypeBitmask: eval.ScalarValueType, - FilterWeight: 10, + { + Field: "open.file.path", + TypeBitmask: eval.ScalarValueType | eval.PatternValueType | eval.GlobValueType, + ValidateFnc: validateBasenameFilter, + FilterWeight: 15, }, - "open.flags": { - ValueTypeBitmask: eval.ScalarValueType | eval.BitmaskValueType, + { + Field: "open.file.name", + TypeBitmask: eval.ScalarValueType, + FilterWeight: 10, }, } diff --git a/pkg/security/probe/kfilters/policy.go b/pkg/security/probe/kfilters/policy.go index 117a3a074c835..f9c87b06dae08 100644 --- a/pkg/security/probe/kfilters/policy.go +++ b/pkg/security/probe/kfilters/policy.go @@ -14,21 +14,11 @@ import ( // PolicyMode represents the policy mode (accept or deny) type PolicyMode uint8 -// PolicyFlag is a bitmask of the active filtering policies -type PolicyFlag uint8 - // Policy modes const ( PolicyModeNoFilter PolicyMode = iota PolicyModeAccept PolicyModeDeny -) - -// Policy flags -const ( - PolicyFlagBasename PolicyFlag = 1 - PolicyFlagFlags PolicyFlag = 2 - PolicyFlagMode PolicyFlag = 4 // need to be aligned with the kernel size BasenameFilterSize = 256 @@ -55,26 +45,3 @@ func (m PolicyMode) MarshalJSON() ([]byte, error) { return json.Marshal(s) } - -// MarshalJSON returns the JSON encoding of the policy flags -func (f PolicyFlag) MarshalJSON() ([]byte, error) { - flags := f.StringArray() - - return json.Marshal(flags) -} - -// StringArray returns the policy flags as a string array -func (f PolicyFlag) StringArray() []string { - var flags []string - if f&PolicyFlagBasename != 0 { - flags = append(flags, "basename") - } - if f&PolicyFlagFlags != 0 { - flags = append(flags, "flags") - } - if f&PolicyFlagMode != 0 { - flags = append(flags, "mode") - } - - return flags -} diff --git a/pkg/security/probe/kfilters/report.go b/pkg/security/probe/kfilters/report.go index fa0e30330a818..e5e56e3f4ea16 100644 --- a/pkg/security/probe/kfilters/report.go +++ b/pkg/security/probe/kfilters/report.go @@ -7,8 +7,6 @@ package kfilters import ( - "math" - "github.com/DataDog/datadog-agent/pkg/security/probe/config" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" @@ -17,7 +15,6 @@ import ( // PolicyReport describes the result of the kernel policy and the approvers for an event type type PolicyReport struct { Mode PolicyMode - Flags PolicyFlag Approvers rules.Approvers } @@ -38,7 +35,7 @@ func NewApplyRuleSetReport(config *config.Config, rs *rules.RuleSet) (*ApplyRule } for _, eventType := range rs.GetEventTypes() { - report := &PolicyReport{Mode: PolicyModeDeny, Flags: math.MaxUint8} + report := &PolicyReport{Mode: PolicyModeDeny} policies[eventType] = report if !config.EnableKernelFilters { diff --git a/pkg/security/probe/kfilters/splice.go b/pkg/security/probe/kfilters/splice.go index 50091dc64e815..42ca2ca385326 100644 --- a/pkg/security/probe/kfilters/splice.go +++ b/pkg/security/probe/kfilters/splice.go @@ -16,19 +16,23 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/secl/rules" ) -var spliceCapabilities = Capabilities{ - "splice.file.path": { - ValueTypeBitmask: eval.ScalarValueType | eval.PatternValueType, - ValidateFnc: validateBasenameFilter, +var spliceCapabilities = rules.FieldCapabilities{ + { + Field: "splice.file.path", + TypeBitmask: eval.ScalarValueType | eval.PatternValueType, + ValidateFnc: validateBasenameFilter, }, - "splice.file.name": { - ValueTypeBitmask: eval.ScalarValueType, + { + Field: "splice.file.name", + TypeBitmask: eval.ScalarValueType, }, - "splice.pipe_entry_flag": { - ValueTypeBitmask: eval.ScalarValueType | eval.BitmaskValueType, + { + Field: "splice.pipe_entry_flag", + TypeBitmask: eval.ScalarValueType | eval.BitmaskValueType, }, - "splice.pipe_exit_flag": { - ValueTypeBitmask: eval.ScalarValueType | eval.BitmaskValueType, + { + Field: "splice.pipe_exit_flag", + TypeBitmask: eval.ScalarValueType | eval.BitmaskValueType, }, } diff --git a/pkg/security/probe/probe_ebpf.go b/pkg/security/probe/probe_ebpf.go index 95c6c93b71ce8..f5737aa88bdbe 100644 --- a/pkg/security/probe/probe_ebpf.go +++ b/pkg/security/probe/probe_ebpf.go @@ -1082,7 +1082,7 @@ func (p *EBPFProbe) OnNewDiscarder(rs *rules.RuleSet, ev *model.Event, field eva } // ApplyFilterPolicy is called when a passing policy for an event type is applied -func (p *EBPFProbe) ApplyFilterPolicy(eventType eval.EventType, mode kfilters.PolicyMode, flags kfilters.PolicyFlag) error { +func (p *EBPFProbe) ApplyFilterPolicy(eventType eval.EventType, mode kfilters.PolicyMode) error { seclog.Infof("Setting in-kernel filter policy to `%s` for `%s`", mode, eventType) table, err := managerhelper.Map(p.Manager, "filter_policy") if err != nil { @@ -1094,10 +1094,7 @@ func (p *EBPFProbe) ApplyFilterPolicy(eventType eval.EventType, mode kfilters.Po return errors.New("unable to parse the eval event type") } - policy := &kfilters.FilterPolicy{ - Mode: mode, - Flags: flags, - } + policy := &kfilters.FilterPolicy{Mode: mode} return table.Put(ebpf.Uint32MapItem(et), policy) } @@ -1315,11 +1312,6 @@ func (p *EBPFProbe) GetDiscarders() (*DiscardersDump, error) { return nil, err } - pidMap, err := managerhelper.Map(p.Manager, "pid_discarders") - if err != nil { - return nil, err - } - statsFB, err := managerhelper.Map(p.Manager, "fb_discarder_stats") if err != nil { return nil, err @@ -1330,7 +1322,7 @@ func (p *EBPFProbe) GetDiscarders() (*DiscardersDump, error) { return nil, err } - dump, err := dumpDiscarders(p.Resolvers.DentryResolver, pidMap, inodeMap, statsFB, statsBB) + dump, err := dumpDiscarders(p.Resolvers.DentryResolver, inodeMap, statsFB, statsBB) if err != nil { return nil, err } @@ -1536,7 +1528,7 @@ func (p *EBPFProbe) applyDefaultFilterPolicies() { mode = kfilters.PolicyModeDeny } - if err := p.ApplyFilterPolicy(eventType.String(), mode, math.MaxUint8); err != nil { + if err := p.ApplyFilterPolicy(eventType.String(), mode); err != nil { seclog.Debugf("unable to apply to filter policy `%s` for `%s`", eventType, mode) } } @@ -1567,7 +1559,7 @@ func (p *EBPFProbe) ApplyRuleSet(rs *rules.RuleSet) (*kfilters.ApplyRuleSetRepor } for eventType, report := range ars.Policies { - if err := p.ApplyFilterPolicy(eventType, report.Mode, report.Flags); err != nil { + if err := p.ApplyFilterPolicy(eventType, report.Mode); err != nil { return nil, err } if err := p.SetApprovers(eventType, report.Approvers); err != nil { @@ -1639,13 +1631,18 @@ func NewEBPFProbe(probe *Probe, config *config.Config, opts Opts, wmeta workload return nil, err } - ctx, cancelFnc := context.WithCancel(context.Background()) - onDemandRate := rate.Limit(math.Inf(1)) if config.RuntimeSecurity.OnDemandRateLimiterEnabled { onDemandRate = MaxOnDemandEventsPerSecond } + processKiller, err := NewProcessKiller(config) + if err != nil { + return nil, err + } + + ctx, cancelFnc := context.WithCancel(context.Background()) + p := &EBPFProbe{ probe: probe, config: config, @@ -1660,7 +1657,7 @@ func NewEBPFProbe(probe *Probe, config *config.Config, opts Opts, wmeta workload ctx: ctx, cancelFnc: cancelFnc, newTCNetDevices: make(chan model.NetDevice, 16), - processKiller: NewProcessKiller(), + processKiller: processKiller, onDemandRateLimiter: rate.NewLimiter(onDemandRate, 1), } diff --git a/pkg/security/probe/probe_ebpfless.go b/pkg/security/probe/probe_ebpfless.go index 9303577cfac4a..36e24729bddf7 100644 --- a/pkg/security/probe/probe_ebpfless.go +++ b/pkg/security/probe/probe_ebpfless.go @@ -634,6 +634,11 @@ func (p *EBPFLessProbe) zeroEvent() *model.Event { func NewEBPFLessProbe(probe *Probe, config *config.Config, opts Opts, telemetry telemetry.Component) (*EBPFLessProbe, error) { opts.normalize() + processKiller, err := NewProcessKiller(config) + if err != nil { + return nil, err + } + ctx, cancelFnc := context.WithCancel(context.Background()) var grpcOpts []grpc.ServerOption @@ -646,7 +651,7 @@ func NewEBPFLessProbe(probe *Probe, config *config.Config, opts Opts, telemetry ctx: ctx, cancelFnc: cancelFnc, clients: make(map[net.Conn]*client), - processKiller: NewProcessKiller(), + processKiller: processKiller, containerContexts: make(map[string]*ebpfless.ContainerContext), } @@ -654,7 +659,6 @@ func NewEBPFLessProbe(probe *Probe, config *config.Config, opts Opts, telemetry TagsResolver: opts.TagsResolver, } - var err error p.Resolvers, err = resolvers.NewEBPFLessResolvers(config, p.statsdClient, probe.scrubber, resolversOpts, telemetry) if err != nil { return nil, err diff --git a/pkg/security/probe/probe_windows.go b/pkg/security/probe/probe_windows.go index 574ee112f4175..e440f8f5bf66e 100644 --- a/pkg/security/probe/probe_windows.go +++ b/pkg/security/probe/probe_windows.go @@ -1160,6 +1160,11 @@ func initializeWindowsProbe(config *config.Config, opts Opts) (*WindowsProbe, er etwNotificationSize := config.RuntimeSecurity.ETWEventsChannelSize log.Infof("Setting ETW channel size to %d", etwNotificationSize) + processKiller, err := NewProcessKiller(config) + if err != nil { + return nil, err + } + ctx, cancelFnc := context.WithCancel(context.Background()) p := &WindowsProbe{ @@ -1188,7 +1193,7 @@ func initializeWindowsProbe(config *config.Config, opts Opts) (*WindowsProbe, er volumeMap: make(map[string]string), - processKiller: NewProcessKiller(), + processKiller: processKiller, blockonchannelsend: bocs, diff --git a/pkg/security/probe/process_killer.go b/pkg/security/probe/process_killer.go index 715d9297dec5d..2bb2559e50a10 100644 --- a/pkg/security/probe/process_killer.go +++ b/pkg/security/probe/process_killer.go @@ -13,6 +13,8 @@ import ( "sync" "time" + "github.com/DataDog/datadog-agent/pkg/security/config" + "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" "github.com/DataDog/datadog-agent/pkg/security/seclog" @@ -28,12 +30,29 @@ const ( type ProcessKiller struct { sync.Mutex - pendingReports []*KillActionReport + pendingReports []*KillActionReport + binariesExcluded []*eval.Glob + sourceAllowed []string } // NewProcessKiller returns a new ProcessKiller -func NewProcessKiller() *ProcessKiller { - return &ProcessKiller{} +func NewProcessKiller(cfg *config.Config) (*ProcessKiller, error) { + p := &ProcessKiller{ + sourceAllowed: cfg.RuntimeSecurity.EnforcementRuleSourceAllowed, + } + + binaries := append(binariesExcluded, cfg.RuntimeSecurity.EnforcementBinaryExcluded...) + + for _, str := range binaries { + glob, err := eval.NewGlob(str, false, false) + if err != nil { + return nil, err + } + + p.binariesExcluded = append(p.binariesExcluded, glob) + } + + return p, nil } // AddPendingReports add a pending reports @@ -79,8 +98,32 @@ func (p *ProcessKiller) HandleProcessExited(event *model.Event) { }) } +func (p *ProcessKiller) isKillAllowed(pids []uint32, paths []string) bool { + for i, pid := range pids { + if pid <= 1 || pid == utils.Getpid() { + return false + } + + if slices.ContainsFunc(p.binariesExcluded, func(glob *eval.Glob) bool { + return glob.Matches(paths[i]) + }) { + return false + } + } + return true +} + +func (p *ProcessKiller) isRuleAllowed(rule *rules.Rule) bool { + return slices.Contains(p.sourceAllowed, rule.Policy.Source) +} + // KillAndReport kill and report func (p *ProcessKiller) KillAndReport(scope string, signal string, rule *rules.Rule, ev *model.Event, killFnc func(pid uint32, sig uint32) error) { + if !p.isRuleAllowed(rule) { + log.Warnf("unable to kill, the source is not allowed: %v", rule) + return + } + entry, exists := ev.ResolveProcessCacheEntry() if !exists { return @@ -92,20 +135,22 @@ func (p *ProcessKiller) KillAndReport(scope string, signal string, rule *rules.R scope = "process" } - pids, err := p.getPids(scope, ev, entry) + pids, paths, err := p.getProcesses(scope, ev, entry) if err != nil { log.Errorf("unable to kill: %s", err) return } + // if one pids is not allowed don't kill anything + if !p.isKillAllowed(pids, paths) { + log.Warnf("unable to kill, some processes are protected: %v, %v", pids, paths) + return + } + sig := model.SignalConstants[signal] killedAt := time.Now() for _, pid := range pids { - if pid <= 1 || pid == utils.Getpid() { - continue - } - log.Debugf("requesting signal %s to be sent to %d", signal, pid) if err := killFnc(uint32(pid), uint32(sig)); err != nil { diff --git a/pkg/security/probe/process_killer_linux.go b/pkg/security/probe/process_killer_linux.go index 521a841074ac2..6586399fa8585 100644 --- a/pkg/security/probe/process_killer_linux.go +++ b/pkg/security/probe/process_killer_linux.go @@ -20,6 +20,29 @@ const ( userSpaceKillWithinMillis = 2000 ) +var ( + // list of binaries that can't be killed + binariesExcluded = []string{ + // package / image + "/opt/datadog-agent/bin/agent/agent", + "/opt/datadog-agent/embedded/bin/trace-agent", + "/opt/datadog-agent/embedded/bin/security-agent", + "/opt/datadog-agent/embedded/bin/process-agent", + "/opt/datadog-agent/embedded/bin/system-probe", + "/opt/datadog-agent/embedded/bin/cws-instrumentation", + "/opt/datadog-agent/bin/datadog-cluster-agent", + // installer + "/opt/datadog-packages/datadog-agent/*/bin/agent/agent", + "/opt/datadog-packages/datadog-agent/*/embedded/bin/trace-agent", + "/opt/datadog-packages/datadog-agent/*/embedded/bin/security-agent", + "/opt/datadog-packages/datadog-agent/*/embedded/bin/process-agent", + "/opt/datadog-packages/datadog-agent/*/embedded/bin/system-probe", + "/opt/datadog-packages/datadog-agent/*/embedded/bin/cws-instrumentation", + "/opt/datadog-packages/datadog-agent/*/bin/datadog-cluster-agent", + "/opt/datadog-packages/datadog-installer/*/bin/installer/installer", + } +) + // KillFromUserspace tries to kill from userspace func (p *ProcessKiller) KillFromUserspace(pid uint32, sig uint32, ev *model.Event) error { proc, err := psutil.NewProcess(int32(pid)) @@ -47,13 +70,17 @@ func (p *ProcessKiller) KillFromUserspace(pid uint32, sig uint32, ev *model.Even return syscall.Kill(int(pid), syscall.Signal(sig)) } -func (p *ProcessKiller) getPids(scope string, ev *model.Event, entry *model.ProcessCacheEntry) ([]uint32, error) { - var pids []uint32 +func (p *ProcessKiller) getProcesses(scope string, ev *model.Event, entry *model.ProcessCacheEntry) ([]uint32, []string, error) { + var ( + pids []uint32 + paths []string + ) if entry.ContainerID != "" && scope == "container" { - pids = entry.GetContainerPIDs() + pids, paths = entry.GetContainerPIDs() } else { pids = []uint32{ev.ProcessContext.Pid} + paths = []string{ev.ProcessContext.FileEvent.PathnameStr} } - return pids, nil + return pids, paths, nil } diff --git a/pkg/security/probe/process_killer_test.go b/pkg/security/probe/process_killer_test.go new file mode 100644 index 0000000000000..d9cb203df81cd --- /dev/null +++ b/pkg/security/probe/process_killer_test.go @@ -0,0 +1,41 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux + +// Package probe holds probe related files +package probe + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/DataDog/datadog-agent/pkg/security/config" + "github.com/DataDog/datadog-agent/pkg/security/utils" +) + +func TestProcessKillerExclusion(t *testing.T) { + p, err := NewProcessKiller( + &config.Config{ + RuntimeSecurity: &config.RuntimeSecurityConfig{ + EnforcementBinaryExcluded: []string{ + "/usr/bin/dd", + "/usr/sbin/*", + "/var/lib/*/runc", + }, + }, + }, + ) + + assert.Nil(t, err) + assert.True(t, p.isKillAllowed([]uint32{utils.Getpid() + 1}, []string{"/usr/bin/date"})) + assert.False(t, p.isKillAllowed([]uint32{utils.Getpid() + 1}, []string{"/usr/bin/dd"})) + assert.False(t, p.isKillAllowed([]uint32{utils.Getpid() + 1}, []string{"/usr/sbin/sudo"})) + assert.False(t, p.isKillAllowed([]uint32{utils.Getpid()}, []string{"/usr/bin/date"})) + assert.False(t, p.isKillAllowed([]uint32{1}, []string{"/usr/bin/date"})) + assert.False(t, p.isKillAllowed([]uint32{utils.Getpid() + 1}, []string{"/opt/datadog-agent/bin/agent/agent"})) + assert.False(t, p.isKillAllowed([]uint32{utils.Getpid() + 1}, []string{"/opt/datadog-packages/datadog-agent/v1.0.0/bin/agent/agent"})) +} diff --git a/pkg/security/probe/process_killer_windows.go b/pkg/security/probe/process_killer_windows.go index 25ff962f34136..36517f20404f3 100644 --- a/pkg/security/probe/process_killer_windows.go +++ b/pkg/security/probe/process_killer_windows.go @@ -13,6 +13,11 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/winutil" ) +var ( + // list of binaries that can't be killed + binariesExcluded = []string{} +) + // KillFromUserspace tries to kill from userspace func (p *ProcessKiller) KillFromUserspace(pid uint32, sig uint32, _ *model.Event) error { if sig != model.SIGKILL { @@ -21,9 +26,9 @@ func (p *ProcessKiller) KillFromUserspace(pid uint32, sig uint32, _ *model.Event return winutil.KillProcess(int(pid), 0) } -func (p *ProcessKiller) getPids(scope string, ev *model.Event, _ *model.ProcessCacheEntry) ([]uint32, error) { +func (p *ProcessKiller) getProcesses(scope string, ev *model.Event, _ *model.ProcessCacheEntry) ([]uint32, []string, error) { if scope == "container" { - return nil, errors.New("container scope not supported") + return nil, nil, errors.New("container scope not supported") } - return []uint32{ev.ProcessContext.Pid}, nil + return []uint32{ev.ProcessContext.Pid}, []string{ev.ProcessContext.FileEvent.PathnameStr}, nil } diff --git a/pkg/security/proto/api/api.pb.go b/pkg/security/proto/api/api.pb.go index fb4926df4b9e8..def5bb8076c25 100644 --- a/pkg/security/proto/api/api.pb.go +++ b/pkg/security/proto/api/api.pb.go @@ -486,9 +486,9 @@ type EventTypePolicy struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - EventType string `protobuf:"bytes,1,opt,name=EventType,proto3" json:"EventType,omitempty"` - Mode uint32 `protobuf:"varint,2,opt,name=Mode,proto3" json:"Mode,omitempty"` - Flags uint32 `protobuf:"varint,3,opt,name=Flags,proto3" json:"Flags,omitempty"` + EventType string `protobuf:"bytes,1,opt,name=EventType,proto3" json:"EventType,omitempty"` + Mode uint32 `protobuf:"varint,2,opt,name=Mode,proto3" json:"Mode,omitempty"` + // field 3 is deprecated Approvers *Approvers `protobuf:"bytes,4,opt,name=Approvers,proto3" json:"Approvers,omitempty"` } @@ -538,13 +538,6 @@ func (x *EventTypePolicy) GetMode() uint32 { return 0 } -func (x *EventTypePolicy) GetFlags() uint32 { - if x != nil { - return x.Flags - } - return 0 -} - func (x *EventTypePolicy) GetApprovers() *Approvers { if x != nil { return x.Approvers @@ -3177,444 +3170,443 @@ var file_pkg_security_proto_api_api_proto_rawDesc = []byte{ 0x65, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x52, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x22, 0x87, - 0x01, 0x0a, 0x0f, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x12, 0x0a, 0x04, 0x4d, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, - 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x05, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x2c, 0x0a, 0x09, 0x41, 0x70, - 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x41, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x73, 0x52, 0x09, 0x41, - 0x70, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x73, 0x22, 0x61, 0x0a, 0x09, 0x41, 0x70, 0x70, 0x72, - 0x6f, 0x76, 0x65, 0x72, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x3e, 0x0a, 0x0f, 0x41, - 0x70, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x70, 0x70, 0x72, 0x6f, - 0x76, 0x65, 0x72, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x0f, 0x41, 0x70, 0x70, 0x72, - 0x6f, 0x76, 0x65, 0x72, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x51, 0x0a, 0x0f, 0x41, - 0x70, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x14, + 0x6c, 0x69, 0x63, 0x79, 0x52, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x22, 0x71, + 0x0a, 0x0f, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x4d, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x4d, + 0x6f, 0x64, 0x65, 0x12, 0x2c, 0x0a, 0x09, 0x41, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x70, 0x70, + 0x72, 0x6f, 0x76, 0x65, 0x72, 0x73, 0x52, 0x09, 0x41, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, + 0x73, 0x22, 0x61, 0x0a, 0x09, 0x41, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x79, - 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x22, 0x18, - 0x0a, 0x16, 0x47, 0x65, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6f, - 0x72, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x84, 0x01, 0x0a, 0x1d, 0x47, 0x65, 0x74, - 0x52, 0x75, 0x6c, 0x65, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x4d, 0x0a, 0x14, 0x52, 0x75, + 0x69, 0x65, 0x6c, 0x64, 0x12, 0x3e, 0x0a, 0x0f, 0x41, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, + 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x41, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x44, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x52, 0x0f, 0x41, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x44, 0x65, 0x74, + 0x61, 0x69, 0x6c, 0x73, 0x22, 0x51, 0x0a, 0x0f, 0x41, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, + 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x14, 0x0a, + 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x22, 0x18, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x52, 0x75, + 0x6c, 0x65, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x22, 0x84, 0x01, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x53, 0x65, 0x74, + 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x12, 0x4d, 0x0a, 0x14, 0x52, 0x75, 0x6c, 0x65, 0x53, 0x65, 0x74, 0x52, 0x65, + 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x53, 0x65, 0x74, 0x52, + 0x65, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x14, 0x52, 0x75, 0x6c, 0x65, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, - 0x75, 0x6c, 0x65, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x52, 0x14, 0x52, 0x75, 0x6c, 0x65, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6f, - 0x72, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x45, 0x72, 0x72, - 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x22, - 0x16, 0x0a, 0x14, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, - 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x1d, 0x0a, 0x1b, 0x52, 0x65, 0x6c, 0x6f, 0x61, - 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x13, 0x0a, 0x11, 0x52, 0x75, 0x6e, 0x53, 0x65, 0x6c, - 0x66, 0x54, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x45, 0x0a, 0x1d, 0x53, - 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x65, 0x6c, 0x66, 0x54, 0x65, 0x73, 0x74, 0x52, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, - 0x4f, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x02, 0x4f, 0x6b, 0x12, 0x14, 0x0a, 0x05, - 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, - 0x6f, 0x72, 0x22, 0x11, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x56, 0x0a, 0x16, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, - 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x41, 0x6e, 0x64, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, - 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, - 0x14, 0x0a, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0x67, 0x0a, - 0x0f, 0x53, 0x65, 0x6c, 0x66, 0x54, 0x65, 0x73, 0x74, 0x73, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x24, 0x0a, 0x0d, 0x4c, 0x61, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x4c, 0x61, 0x73, 0x74, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x18, 0x0a, 0x07, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x12, 0x14, 0x0a, 0x05, 0x46, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x05, 0x46, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x4a, 0x0a, 0x0a, 0x52, 0x75, 0x6c, 0x65, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x02, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, - 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, - 0x6f, 0x72, 0x22, 0x63, 0x0a, 0x0c, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x27, - 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xb1, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x12, 0x38, 0x0a, 0x0b, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x45, 0x6e, - 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x0b, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x32, 0x0a, 0x09, - 0x53, 0x65, 0x6c, 0x66, 0x54, 0x65, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x6c, 0x66, 0x54, 0x65, 0x73, 0x74, 0x73, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x09, 0x53, 0x65, 0x6c, 0x66, 0x54, 0x65, 0x73, 0x74, 0x73, - 0x12, 0x39, 0x0a, 0x0e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0e, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x69, 0x65, 0x73, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x68, 0x0a, 0x15, 0x43, - 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x46, 0x65, 0x74, 0x63, 0x68, 0x65, 0x72, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x46, 0x65, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x46, 0x65, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, - 0x12, 0x33, 0x0a, 0x06, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x1b, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x41, 0x6e, 0x64, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x06, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0xe1, 0x01, 0x0a, 0x11, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, - 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x57, - 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x57, - 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x46, 0x65, 0x74, 0x63, 0x68, 0x65, 0x72, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x09, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, - 0x73, 0x12, 0x26, 0x0a, 0x0e, 0x4b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x4c, 0x6f, 0x63, 0x6b, 0x64, - 0x6f, 0x77, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x4b, 0x65, 0x72, 0x6e, 0x65, - 0x6c, 0x4c, 0x6f, 0x63, 0x6b, 0x64, 0x6f, 0x77, 0x6e, 0x12, 0x28, 0x0a, 0x0f, 0x55, 0x73, 0x65, - 0x4d, 0x6d, 0x61, 0x70, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x70, 0x73, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x0f, 0x55, 0x73, 0x65, 0x4d, 0x6d, 0x61, 0x70, 0x61, 0x62, 0x6c, 0x65, 0x4d, - 0x61, 0x70, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x55, 0x73, 0x65, 0x52, 0x69, 0x6e, 0x67, 0x42, 0x75, - 0x66, 0x66, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x55, 0x73, 0x65, 0x52, - 0x69, 0x6e, 0x67, 0x42, 0x75, 0x66, 0x66, 0x65, 0x72, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x75, 0x6d, - 0x70, 0x44, 0x69, 0x73, 0x63, 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x22, 0x3b, 0x0a, 0x15, 0x44, 0x75, 0x6d, 0x70, 0x44, 0x69, 0x73, 0x63, 0x61, 0x72, 0x64, - 0x65, 0x72, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x44, 0x75, - 0x6d, 0x70, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x44, 0x75, 0x6d, 0x70, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xa8, - 0x02, 0x0a, 0x14, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x34, 0x0a, 0x15, 0x4c, 0x6f, 0x63, 0x61, 0x6c, - 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x30, 0x0a, - 0x13, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x46, 0x6f, 0x72, - 0x6d, 0x61, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x4c, 0x6f, 0x63, 0x61, - 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x73, 0x12, - 0x38, 0x0a, 0x17, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, - 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x17, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, - 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x14, 0x52, 0x65, 0x6d, - 0x6f, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, - 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x53, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x73, 0x12, 0x3a, 0x0a, - 0x18, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, - 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x18, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, - 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xb3, 0x01, 0x0a, 0x12, 0x41, 0x63, - 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x12, 0x18, 0x0a, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x2c, 0x0a, 0x11, 0x44, 0x69, - 0x66, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x74, 0x65, 0x41, 0x72, 0x67, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x44, 0x69, 0x66, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x74, - 0x69, 0x61, 0x74, 0x65, 0x41, 0x72, 0x67, 0x73, 0x12, 0x33, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x52, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x20, 0x0a, - 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x22, - 0xcf, 0x03, 0x0a, 0x0f, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x41, 0x67, 0x65, 0x6e, 0x74, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x41, 0x67, 0x65, 0x6e, 0x74, - 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x41, 0x67, - 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x4b, 0x65, 0x72, - 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0d, 0x4b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, - 0x2c, 0x0a, 0x11, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x4c, 0x69, 0x6e, 0x75, - 0x78, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, - 0x04, 0x41, 0x72, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x41, 0x72, 0x63, - 0x68, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, - 0x2c, 0x0a, 0x11, 0x44, 0x69, 0x66, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x74, 0x65, - 0x41, 0x72, 0x67, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x44, 0x69, 0x66, 0x66, - 0x65, 0x72, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x74, 0x65, 0x41, 0x72, 0x67, 0x73, 0x12, 0x16, 0x0a, - 0x04, 0x43, 0x6f, 0x6d, 0x6d, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, - 0x04, 0x43, 0x6f, 0x6d, 0x6d, 0x12, 0x20, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, - 0x65, 0x72, 0x49, 0x44, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x43, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x72, 0x74, - 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x18, 0x0a, - 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x53, 0x69, 0x7a, 0x65, 0x18, - 0x0d, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x53, - 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0d, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x22, 0x79, 0x0a, 0x15, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x79, - 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, - 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x43, 0x6f, 0x6d, - 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x22, 0xbe, 0x02, 0x0a, - 0x13, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x48, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x12, 0x18, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x61, - 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x54, 0x61, 0x67, 0x73, 0x12, 0x34, - 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x07, 0x53, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x08, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x1a, 0x0a, 0x08, - 0x44, 0x4e, 0x53, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, - 0x44, 0x4e, 0x53, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x33, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, - 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, - 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x72, 0x65, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x05, 0x53, 0x74, 0x61, 0x74, 0x73, 0x22, 0x18, 0x0a, - 0x16, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x4c, 0x69, 0x73, - 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x5f, 0x0a, 0x17, 0x41, 0x63, 0x74, 0x69, 0x76, - 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x05, 0x44, 0x75, 0x6d, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x18, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, - 0x44, 0x75, 0x6d, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x05, 0x44, 0x75, 0x6d, - 0x70, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x4e, 0x0a, 0x16, 0x41, 0x63, 0x74, 0x69, - 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x74, 0x6f, 0x70, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x43, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x22, 0x2f, 0x0a, 0x17, 0x41, 0x63, 0x74, 0x69, - 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x74, 0x6f, 0x70, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x7b, 0x0a, 0x18, 0x54, 0x72, 0x61, - 0x6e, 0x73, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x2a, 0x0a, 0x10, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, - 0x79, 0x44, 0x75, 0x6d, 0x70, 0x46, 0x69, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x10, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x46, 0x69, 0x6c, - 0x65, 0x12, 0x33, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x6c, 0x6f, + 0x61, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x22, 0x1d, 0x0a, 0x1b, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, + 0x65, 0x73, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, + 0x13, 0x0a, 0x11, 0x52, 0x75, 0x6e, 0x53, 0x65, 0x6c, 0x66, 0x54, 0x65, 0x73, 0x74, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x22, 0x45, 0x0a, 0x1d, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x53, 0x65, 0x6c, 0x66, 0x54, 0x65, 0x73, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x4f, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x02, 0x4f, 0x6b, 0x12, 0x14, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x11, 0x0a, 0x0f, 0x47, + 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x56, + 0x0a, 0x16, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x41, + 0x6e, 0x64, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x16, + 0x0a, 0x06, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0x67, 0x0a, 0x0f, 0x53, 0x65, 0x6c, 0x66, 0x54, 0x65, + 0x73, 0x74, 0x73, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x4c, 0x61, 0x73, + 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x4c, 0x61, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, + 0x18, 0x0a, 0x07, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x07, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x46, 0x61, 0x69, + 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x46, 0x61, 0x69, 0x6c, 0x73, 0x22, + 0x4a, 0x0a, 0x0a, 0x52, 0x75, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0e, 0x0a, + 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x16, 0x0a, + 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x63, 0x0a, 0x0c, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x4e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x16, 0x0a, 0x06, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x27, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x75, + 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x22, 0xb1, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x38, 0x0a, 0x0b, 0x45, + 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x16, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, + 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0b, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, + 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x32, 0x0a, 0x09, 0x53, 0x65, 0x6c, 0x66, 0x54, 0x65, 0x73, + 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, + 0x65, 0x6c, 0x66, 0x54, 0x65, 0x73, 0x74, 0x73, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x09, + 0x53, 0x65, 0x6c, 0x66, 0x54, 0x65, 0x73, 0x74, 0x73, 0x12, 0x39, 0x0a, 0x0e, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x69, 0x65, 0x73, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x11, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x0e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x22, 0x68, 0x0a, 0x15, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, + 0x46, 0x65, 0x74, 0x63, 0x68, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1a, 0x0a, + 0x08, 0x46, 0x65, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x08, 0x46, 0x65, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x12, 0x33, 0x0a, 0x06, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x41, 0x6e, 0x64, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x06, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0xe1, + 0x01, 0x0a, 0x11, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, + 0x12, 0x38, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x74, 0x46, 0x65, 0x74, 0x63, 0x68, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x09, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0e, 0x4b, 0x65, + 0x72, 0x6e, 0x65, 0x6c, 0x4c, 0x6f, 0x63, 0x6b, 0x64, 0x6f, 0x77, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0e, 0x4b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x4c, 0x6f, 0x63, 0x6b, 0x64, 0x6f, + 0x77, 0x6e, 0x12, 0x28, 0x0a, 0x0f, 0x55, 0x73, 0x65, 0x4d, 0x6d, 0x61, 0x70, 0x61, 0x62, 0x6c, + 0x65, 0x4d, 0x61, 0x70, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x55, 0x73, 0x65, + 0x4d, 0x6d, 0x61, 0x70, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x70, 0x73, 0x12, 0x24, 0x0a, 0x0d, + 0x55, 0x73, 0x65, 0x52, 0x69, 0x6e, 0x67, 0x42, 0x75, 0x66, 0x66, 0x65, 0x72, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0d, 0x55, 0x73, 0x65, 0x52, 0x69, 0x6e, 0x67, 0x42, 0x75, 0x66, 0x66, + 0x65, 0x72, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x75, 0x6d, 0x70, 0x44, 0x69, 0x73, 0x63, 0x61, 0x72, + 0x64, 0x65, 0x72, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x3b, 0x0a, 0x15, 0x44, 0x75, + 0x6d, 0x70, 0x44, 0x69, 0x73, 0x63, 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x44, 0x75, 0x6d, 0x70, 0x46, 0x69, 0x6c, 0x65, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x44, 0x75, 0x6d, 0x70, 0x46, + 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xa8, 0x02, 0x0a, 0x14, 0x53, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x12, 0x34, 0x0a, 0x15, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x15, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x44, 0x69, 0x72, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x30, 0x0a, 0x13, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x13, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x73, 0x12, 0x38, 0x0a, 0x17, 0x4c, 0x6f, 0x63, 0x61, + 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, 0x4c, 0x6f, 0x63, 0x61, 0x6c, + 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x14, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x14, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x46, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x18, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0xb3, 0x01, 0x0a, 0x12, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, + 0x75, 0x6d, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x54, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x54, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x12, 0x2c, 0x0a, 0x11, 0x44, 0x69, 0x66, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x74, 0x65, 0x41, 0x72, 0x67, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, + 0x44, 0x69, 0x66, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x74, 0x65, 0x41, 0x72, 0x67, + 0x73, 0x12, 0x33, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x07, 0x53, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x22, 0x67, 0x0a, 0x19, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x63, - 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x34, 0x0a, 0x07, 0x53, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x22, - 0x1a, 0x0a, 0x18, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x53, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x5d, 0x0a, 0x19, 0x41, - 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2c, 0x0a, 0x04, 0x44, 0x75, 0x6d, 0x70, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, - 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x52, 0x04, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x44, 0x61, 0x74, 0x61, 0x22, 0x3f, 0x0a, 0x17, 0x57, 0x6f, - 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x61, 0x67, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x54, 0x61, 0x67, 0x22, 0x87, 0x01, 0x0a, 0x1b, - 0x4c, 0x61, 0x73, 0x74, 0x41, 0x6e, 0x6f, 0x6d, 0x61, 0x6c, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2c, 0x0a, 0x11, 0x49, 0x73, 0x53, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x11, 0x49, 0x73, 0x53, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x22, 0x47, 0x0a, 0x0f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, - 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x43, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x61, - 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x54, 0x61, 0x67, 0x73, 0x22, 0xec, - 0x01, 0x0a, 0x18, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x72, 0x65, 0x65, 0x53, - 0x74, 0x61, 0x74, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2c, 0x0a, 0x11, 0x50, - 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x11, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x4e, - 0x6f, 0x64, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x46, 0x69, 0x6c, - 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x0e, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, - 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x44, 0x4e, 0x53, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x44, 0x4e, 0x53, 0x4e, 0x6f, 0x64, - 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x53, 0x6f, 0x63, 0x6b, 0x65, - 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x10, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x43, 0x6f, - 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x0f, 0x41, 0x70, 0x70, 0x72, 0x6f, 0x78, 0x69, 0x6d, 0x61, - 0x74, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x41, 0x70, - 0x70, 0x72, 0x6f, 0x78, 0x69, 0x6d, 0x61, 0x74, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x6e, 0x0a, - 0x10, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, - 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x61, 0x6e, 0x6f, 0x6d, 0x61, 0x6c, - 0x79, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x6c, 0x61, - 0x73, 0x74, 0x41, 0x6e, 0x6f, 0x6d, 0x61, 0x6c, 0x79, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x2e, 0x0a, - 0x13, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x22, 0x9b, 0x02, - 0x0a, 0x15, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x72, 0x73, 0x74, - 0x5f, 0x73, 0x65, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x66, 0x69, 0x72, - 0x73, 0x74, 0x53, 0x65, 0x65, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x73, - 0x65, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6c, 0x61, 0x73, 0x74, 0x53, - 0x65, 0x65, 0x6e, 0x12, 0x58, 0x0a, 0x10, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, - 0x78, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x12, 0x0a, - 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, - 0x73, 0x1a, 0x58, 0x0a, 0x13, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2b, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x43, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x22, 0xcf, 0x03, 0x0a, 0x0f, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x22, 0x0a, 0x0c, + 0x41, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x20, 0x0a, 0x0b, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6d, 0x6d, + 0x69, 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x4b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x4b, 0x65, 0x72, 0x6e, 0x65, + 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x11, 0x4c, 0x69, 0x6e, 0x75, + 0x78, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x11, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, + 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x41, 0x72, 0x63, 0x68, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x41, 0x72, 0x63, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, + 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x28, + 0x0a, 0x0f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x11, 0x44, 0x69, 0x66, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x74, 0x65, 0x41, 0x72, 0x67, 0x73, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x11, 0x44, 0x69, 0x66, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x74, 0x69, 0x61, + 0x74, 0x65, 0x41, 0x72, 0x67, 0x73, 0x12, 0x16, 0x0a, 0x04, 0x43, 0x6f, 0x6d, 0x6d, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, 0x43, 0x6f, 0x6d, 0x6d, 0x12, 0x20, + 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, + 0x12, 0x14, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x72, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x12, 0x12, 0x0a, 0x04, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, + 0x53, 0x69, 0x7a, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x53, 0x65, 0x72, + 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x79, 0x0a, 0x15, 0x53, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x20, 0x0a, 0x0b, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x46, 0x69, 0x6c, 0x65, 0x22, 0xbe, 0x02, 0x0a, 0x13, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, + 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x48, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x48, 0x6f, 0x73, + 0x74, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x61, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x04, 0x54, 0x61, 0x67, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x52, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, + 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, + 0x14, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x44, 0x4e, 0x53, 0x4e, 0x61, 0x6d, 0x65, + 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x44, 0x4e, 0x53, 0x4e, 0x61, 0x6d, 0x65, + 0x73, 0x12, 0x33, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, + 0x72, 0x65, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, + 0x05, 0x53, 0x74, 0x61, 0x74, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, + 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x22, 0x5f, 0x0a, 0x17, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, + 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x05, 0x44, + 0x75, 0x6d, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x52, 0x05, 0x44, 0x75, 0x6d, 0x70, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x45, + 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, + 0x72, 0x22, 0x4e, 0x0a, 0x16, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, + 0x70, 0x53, 0x74, 0x6f, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x4e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x20, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, + 0x44, 0x22, 0x2f, 0x0a, 0x17, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, + 0x70, 0x53, 0x74, 0x6f, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, + 0x6f, 0x72, 0x22, 0x7b, 0x0a, 0x18, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x6f, 0x64, 0x69, 0x6e, + 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x2a, + 0x0a, 0x10, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x46, 0x69, + 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, + 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x53, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x22, + 0x67, 0x0a, 0x19, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, + 0x6f, 0x72, 0x12, 0x34, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, + 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x22, 0x1a, 0x0a, 0x18, 0x41, 0x63, 0x74, 0x69, + 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x22, 0x5d, 0x0a, 0x19, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, + 0x44, 0x75, 0x6d, 0x70, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x2c, 0x0a, 0x04, 0x44, 0x75, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x18, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, + 0x6d, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x04, 0x44, 0x75, 0x6d, 0x70, 0x12, + 0x12, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x44, + 0x61, 0x74, 0x61, 0x22, 0x3f, 0x0a, 0x17, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x53, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x61, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x54, 0x61, 0x67, 0x22, 0x87, 0x01, 0x0a, 0x1b, 0x4c, 0x61, 0x73, 0x74, 0x41, 0x6e, 0x6f, + 0x6d, 0x61, 0x6c, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x12, 0x2c, 0x0a, 0x11, 0x49, 0x73, 0x53, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x49, 0x73, 0x53, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x22, 0x47, + 0x0a, 0x0f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x61, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x04, 0x54, 0x61, 0x67, 0x73, 0x22, 0xec, 0x01, 0x0a, 0x18, 0x41, 0x63, 0x74, 0x69, + 0x76, 0x69, 0x74, 0x79, 0x54, 0x72, 0x65, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x12, 0x2c, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x4e, + 0x6f, 0x64, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x11, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x46, 0x69, 0x6c, 0x65, + 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x44, 0x4e, + 0x53, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x0d, 0x44, 0x4e, 0x53, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x12, 0x2a, 0x0a, 0x10, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x53, 0x6f, 0x63, 0x6b, + 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x0f, + 0x41, 0x70, 0x70, 0x72, 0x6f, 0x78, 0x69, 0x6d, 0x61, 0x74, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x41, 0x70, 0x70, 0x72, 0x6f, 0x78, 0x69, 0x6d, 0x61, + 0x74, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x6e, 0x0a, 0x10, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x61, + 0x73, 0x74, 0x5f, 0x61, 0x6e, 0x6f, 0x6d, 0x61, 0x6c, 0x79, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x6c, 0x61, 0x73, 0x74, 0x41, 0x6e, 0x6f, 0x6d, 0x61, + 0x6c, 0x79, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x2e, 0x0a, 0x13, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, + 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x11, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x22, 0x9b, 0x02, 0x0a, 0x15, 0x50, 0x72, 0x6f, 0x66, 0x69, + 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x65, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x66, 0x69, 0x72, 0x73, 0x74, 0x53, 0x65, 0x65, 0x6e, 0x12, + 0x1b, 0x0a, 0x09, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x08, 0x6c, 0x61, 0x73, 0x74, 0x53, 0x65, 0x65, 0x6e, 0x12, 0x58, 0x0a, 0x10, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xa0, 0x06, 0x0a, 0x16, - 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x26, 0x0a, 0x0e, 0x4c, 0x6f, 0x61, 0x64, 0x65, 0x64, - 0x49, 0x6e, 0x4b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, - 0x4c, 0x6f, 0x61, 0x64, 0x65, 0x64, 0x49, 0x6e, 0x4b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x12, 0x38, - 0x0a, 0x17, 0x4c, 0x6f, 0x61, 0x64, 0x65, 0x64, 0x49, 0x6e, 0x4b, 0x65, 0x72, 0x6e, 0x65, 0x6c, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x17, 0x4c, 0x6f, 0x61, 0x64, 0x65, 0x64, 0x49, 0x6e, 0x4b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x38, 0x0a, 0x08, 0x53, 0x65, 0x6c, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x08, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x12, 0x24, 0x0a, 0x0d, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6f, - 0x6b, 0x69, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x50, 0x72, 0x6f, 0x66, 0x69, - 0x6c, 0x65, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x4a, 0x0a, 0x0d, 0x4c, 0x61, 0x73, 0x74, - 0x41, 0x6e, 0x6f, 0x6d, 0x61, 0x6c, 0x69, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x20, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x73, 0x74, 0x41, 0x6e, 0x6f, 0x6d, 0x61, 0x6c, - 0x79, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0d, 0x4c, 0x61, 0x73, 0x74, 0x41, 0x6e, 0x6f, 0x6d, 0x61, - 0x6c, 0x69, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x09, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, - 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x49, 0x6e, - 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x09, 0x49, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x1c, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0a, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x08, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x12, 0x16, 0x0a, 0x04, 0x54, 0x61, 0x67, 0x73, 0x18, 0x0b, 0x20, 0x03, - 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, 0x54, 0x61, 0x67, 0x73, 0x12, 0x33, 0x0a, 0x05, - 0x53, 0x74, 0x61, 0x74, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x72, 0x65, 0x65, 0x53, 0x74, - 0x61, 0x74, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x05, 0x53, 0x74, 0x61, 0x74, - 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x47, 0x6c, 0x6f, 0x62, - 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x50, - 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x12, 0x5b, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, - 0x74, 0x65, 0x78, 0x74, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, - 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, - 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x70, - 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x73, 0x1a, 0x5e, - 0x0a, 0x14, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x30, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x72, - 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3f, - 0x0a, 0x19, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, - 0x65, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x49, - 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0c, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x22, - 0x6b, 0x0a, 0x1a, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, - 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, - 0x08, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x1b, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, - 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x08, 0x50, 0x72, - 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x55, 0x0a, 0x19, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x72, 0x6f, + 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x1a, 0x58, 0x0a, 0x13, 0x45, 0x76, + 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x2b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0xa0, 0x06, 0x0a, 0x16, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, + 0x26, 0x0a, 0x0e, 0x4c, 0x6f, 0x61, 0x64, 0x65, 0x64, 0x49, 0x6e, 0x4b, 0x65, 0x72, 0x6e, 0x65, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x4c, 0x6f, 0x61, 0x64, 0x65, 0x64, 0x49, + 0x6e, 0x4b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x12, 0x38, 0x0a, 0x17, 0x4c, 0x6f, 0x61, 0x64, 0x65, + 0x64, 0x49, 0x6e, 0x4b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x4c, 0x6f, 0x61, 0x64, 0x65, 0x64, + 0x49, 0x6e, 0x4b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x12, 0x38, 0x0a, 0x08, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, + 0x61, 0x64, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x52, 0x08, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x24, 0x0a, 0x0d, 0x50, + 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x0d, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6f, 0x6b, 0x69, + 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x73, 0x12, 0x4a, 0x0a, 0x0d, 0x4c, 0x61, 0x73, 0x74, 0x41, 0x6e, 0x6f, 0x6d, 0x61, 0x6c, 0x69, + 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, + 0x61, 0x73, 0x74, 0x41, 0x6e, 0x6f, 0x6d, 0x61, 0x6c, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0d, + 0x4c, 0x61, 0x73, 0x74, 0x41, 0x6e, 0x6f, 0x6d, 0x61, 0x6c, 0x69, 0x65, 0x73, 0x12, 0x32, 0x0a, + 0x09, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x09, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x73, 0x12, 0x1a, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1c, 0x0a, + 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, + 0x18, 0x01, 0x52, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x08, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x52, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x16, 0x0a, + 0x04, 0x54, 0x61, 0x67, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, + 0x04, 0x54, 0x61, 0x67, 0x73, 0x12, 0x33, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x73, 0x18, 0x0c, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, + 0x69, 0x74, 0x79, 0x54, 0x72, 0x65, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x52, 0x05, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x50, 0x72, + 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x47, + 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x5b, 0x0a, 0x10, 0x70, 0x72, + 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x73, 0x18, 0x0e, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x43, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x73, 0x1a, 0x5e, 0x0a, 0x14, 0x50, 0x72, 0x6f, 0x66, 0x69, + 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x30, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3f, 0x0a, 0x19, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x43, + 0x61, 0x63, 0x68, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x49, 0x6e, 0x63, 0x6c, + 0x75, 0x64, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x22, 0x6b, 0x0a, 0x1a, 0x53, 0x65, 0x63, 0x75, + 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, + 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x08, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x12, + 0x14, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x55, 0x0a, 0x19, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x61, 0x76, 0x65, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x12, 0x38, 0x0a, 0x08, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, + 0x6f, 0x61, 0x64, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x52, 0x08, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x22, 0x46, 0x0a, 0x1a, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x53, - 0x61, 0x76, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x38, 0x0a, 0x08, 0x53, 0x65, 0x6c, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x08, 0x53, 0x65, 0x6c, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x22, 0x46, 0x0a, 0x1a, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, - 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x61, 0x76, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x32, 0x8a, 0x0a, 0x0a, 0x0e, - 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x12, 0x3f, - 0x0a, 0x09, 0x47, 0x65, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x13, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x1a, 0x19, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, - 0x57, 0x0a, 0x10, 0x44, 0x75, 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x43, 0x61, - 0x63, 0x68, 0x65, 0x12, 0x1b, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x50, 0x72, - 0x6f, 0x63, 0x65, 0x73, 0x73, 0x43, 0x61, 0x63, 0x68, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x1a, 0x24, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, - 0x75, 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x65, 0x74, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x1a, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x30, 0x0a, 0x09, 0x47, 0x65, 0x74, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x65, 0x74, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x0b, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0b, 0x52, - 0x75, 0x6e, 0x53, 0x65, 0x6c, 0x66, 0x54, 0x65, 0x73, 0x74, 0x12, 0x16, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x52, 0x75, 0x6e, 0x53, 0x65, 0x6c, 0x66, 0x54, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x1a, 0x22, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, - 0x79, 0x53, 0x65, 0x6c, 0x66, 0x54, 0x65, 0x73, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x52, - 0x75, 0x6c, 0x65, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x1b, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, - 0x6f, 0x72, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x22, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x47, 0x65, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, - 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, - 0x4f, 0x0a, 0x0e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, - 0x73, 0x12, 0x19, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x20, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, - 0x73, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, - 0x12, 0x5b, 0x0a, 0x14, 0x44, 0x75, 0x6d, 0x70, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, - 0x75, 0x6d, 0x70, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x20, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x44, 0x75, 0x6d, 0x70, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x49, 0x0a, - 0x0e, 0x44, 0x75, 0x6d, 0x70, 0x44, 0x69, 0x73, 0x63, 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x12, - 0x19, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x44, 0x69, 0x73, 0x63, 0x61, 0x72, - 0x64, 0x65, 0x72, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x1a, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x44, 0x69, 0x73, 0x63, 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0c, 0x44, 0x75, 0x6d, 0x70, - 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x12, 0x17, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, - 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x1a, 0x18, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, - 0x44, 0x75, 0x6d, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x50, 0x0a, - 0x11, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, - 0x70, 0x73, 0x12, 0x1b, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, - 0x79, 0x44, 0x75, 0x6d, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, - 0x1c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, - 0x6d, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, - 0x4f, 0x0a, 0x10, 0x53, 0x74, 0x6f, 0x70, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, - 0x75, 0x6d, 0x70, 0x12, 0x1b, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, - 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x74, 0x6f, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x1a, 0x1c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, - 0x75, 0x6d, 0x70, 0x53, 0x74, 0x6f, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, - 0x12, 0x55, 0x0a, 0x12, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x54, 0x72, 0x61, - 0x6e, 0x73, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x1e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x54, 0x72, 0x61, 0x6e, - 0x73, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x41, 0x63, - 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x12, 0x1d, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, - 0x75, 0x6d, 0x70, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, - 0x1e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, - 0x6d, 0x70, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, - 0x00, 0x30, 0x01, 0x12, 0x59, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, - 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x1e, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, - 0x65, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x1f, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, - 0x65, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x58, - 0x0a, 0x13, 0x53, 0x61, 0x76, 0x65, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, - 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x1e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, - 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x61, 0x76, 0x65, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x1f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, - 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x61, 0x76, 0x65, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x42, 0x18, 0x5a, 0x16, 0x70, 0x6b, 0x67, 0x2f, - 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x61, - 0x70, 0x69, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x61, 0x76, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x45, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, + 0x12, 0x12, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x46, 0x69, 0x6c, 0x65, 0x32, 0x8a, 0x0a, 0x0a, 0x0e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x12, 0x3f, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x45, 0x76, + 0x65, 0x6e, 0x74, 0x73, 0x12, 0x13, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x76, + 0x65, 0x6e, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x19, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x57, 0x0a, 0x10, 0x44, 0x75, 0x6d, 0x70, + 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x1b, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x43, 0x61, + 0x63, 0x68, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x24, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x63, + 0x65, 0x73, 0x73, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, + 0x00, 0x12, 0x3f, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x14, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x1a, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x22, 0x00, 0x12, 0x30, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, + 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x0b, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0b, 0x52, 0x75, 0x6e, 0x53, 0x65, 0x6c, 0x66, 0x54, + 0x65, 0x73, 0x74, 0x12, 0x16, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x75, 0x6e, 0x53, 0x65, 0x6c, + 0x66, 0x54, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x22, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x65, 0x6c, 0x66, 0x54, 0x65, + 0x73, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, + 0x00, 0x12, 0x55, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x53, 0x65, 0x74, 0x52, + 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x1b, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x65, 0x74, 0x52, + 0x75, 0x6c, 0x65, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x1a, 0x22, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x75, 0x6c, 0x65, + 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x0e, 0x52, 0x65, 0x6c, 0x6f, + 0x61, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x19, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x20, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x6c, 0x6f, + 0x61, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a, 0x14, 0x44, 0x75, 0x6d, + 0x70, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x12, 0x1f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x4e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x1a, 0x20, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x4e, 0x65, 0x74, + 0x77, 0x6f, 0x72, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x49, 0x0a, 0x0e, 0x44, 0x75, 0x6d, 0x70, 0x44, 0x69, + 0x73, 0x63, 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x12, 0x19, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, + 0x75, 0x6d, 0x70, 0x44, 0x69, 0x73, 0x63, 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x1a, 0x1a, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x44, 0x69, + 0x73, 0x63, 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, + 0x00, 0x12, 0x43, 0x0a, 0x0c, 0x44, 0x75, 0x6d, 0x70, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, + 0x79, 0x12, 0x17, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, + 0x44, 0x75, 0x6d, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x18, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x50, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x63, + 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x73, 0x12, 0x1b, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x4c, 0x69, + 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x1c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, + 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x10, 0x53, 0x74, 0x6f, 0x70, + 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x1b, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x53, + 0x74, 0x6f, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x1c, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x74, 0x6f, 0x70, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x12, 0x54, 0x72, 0x61, + 0x6e, 0x73, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1d, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x6f, 0x64, 0x69, 0x6e, + 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x1e, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, + 0x12, 0x5a, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, + 0x75, 0x6d, 0x70, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x1d, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x1e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, + 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x59, 0x0a, 0x14, + 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, + 0x69, 0x6c, 0x65, 0x73, 0x12, 0x1e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x1f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x13, 0x53, 0x61, 0x76, 0x65, 0x53, + 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x1e, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, + 0x66, 0x69, 0x6c, 0x65, 0x53, 0x61, 0x76, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x1f, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, + 0x66, 0x69, 0x6c, 0x65, 0x53, 0x61, 0x76, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, + 0x00, 0x42, 0x18, 0x5a, 0x16, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( diff --git a/pkg/security/proto/api/api.proto b/pkg/security/proto/api/api.proto index a0a5a9d979b4e..223c1a9d6f903 100644 --- a/pkg/security/proto/api/api.proto +++ b/pkg/security/proto/api/api.proto @@ -46,7 +46,7 @@ message RuleSetReportMessage{ message EventTypePolicy{ string EventType = 1; uint32 Mode = 2; - uint32 Flags = 3; + // field 3 is deprecated Approvers Approvers = 4; } diff --git a/pkg/security/proto/api/api_vtproto.pb.go b/pkg/security/proto/api/api_vtproto.pb.go index ce91615c6f4be..f823e79e6186e 100644 --- a/pkg/security/proto/api/api_vtproto.pb.go +++ b/pkg/security/proto/api/api_vtproto.pb.go @@ -475,11 +475,6 @@ func (m *EventTypePolicy) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i-- dAtA[i] = 0x22 } - if m.Flags != 0 { - i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Flags)) - i-- - dAtA[i] = 0x18 - } if m.Mode != 0 { i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Mode)) i-- @@ -3010,9 +3005,6 @@ func (m *EventTypePolicy) SizeVT() (n int) { if m.Mode != 0 { n += 1 + protohelpers.SizeOfVarint(uint64(m.Mode)) } - if m.Flags != 0 { - n += 1 + protohelpers.SizeOfVarint(uint64(m.Flags)) - } if m.Approvers != nil { l = m.Approvers.SizeVT() n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) @@ -4879,25 +4871,6 @@ func (m *EventTypePolicy) UnmarshalVT(dAtA []byte) error { break } } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType) - } - m.Flags = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return protohelpers.ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Flags |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Approvers", wireType) diff --git a/pkg/security/proto/api/transform_functions.go b/pkg/security/proto/api/transform_functions.go index 5156a9b7f4527..7c35c680e6cd5 100644 --- a/pkg/security/proto/api/transform_functions.go +++ b/pkg/security/proto/api/transform_functions.go @@ -25,7 +25,6 @@ func (protoRuleSetReport *RuleSetReportMessage) FromProtoToKFiltersRuleSetReport } policies[policy.EventType] = &kfilters.PolicyReport{ Mode: kfilters.PolicyMode(policy.GetMode()), - Flags: kfilters.PolicyFlag(policy.GetFlags()), Approvers: approversToPrint, } } @@ -69,7 +68,6 @@ func FromKFiltersToProtoRuleSetReport(ruleSetReport *kfilters.ApplyRuleSetReport detail := &EventTypePolicy{ EventType: key, Mode: uint32(policyReport.Mode), - Flags: uint32(policyReport.Flags), Approvers: FromKFiltersToProtoApprovers(policyReport.Approvers), } diff --git a/pkg/security/resolvers/tags/resolver.go b/pkg/security/resolvers/tags/resolver.go index 7e2169280517b..57dd03cbb3caf 100644 --- a/pkg/security/resolvers/tags/resolver.go +++ b/pkg/security/resolvers/tags/resolver.go @@ -13,6 +13,7 @@ import ( taggerTelemetry "github.com/DataDog/datadog-agent/comp/core/tagger/telemetry" "github.com/DataDog/datadog-agent/comp/core/tagger/types" "github.com/DataDog/datadog-agent/comp/core/telemetry" + rootconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/security/probe/config" "github.com/DataDog/datadog-agent/pkg/security/utils" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -71,13 +72,15 @@ func (t *DefaultResolver) Start(ctx context.Context) error { // Resolve returns the tags for the given id func (t *DefaultResolver) Resolve(id string) []string { - tags, _ := t.tagger.Tag("container_id://"+id, types.OrchestratorCardinality) + entityID := types.NewEntityID(types.ContainerID, id) + tags, _ := t.tagger.Tag(entityID.String(), types.OrchestratorCardinality) return tags } // ResolveWithErr returns the tags for the given id func (t *DefaultResolver) ResolveWithErr(id string) ([]string, error) { - return t.tagger.Tag("container_id://"+id, types.OrchestratorCardinality) + entityID := types.NewEntityID(types.ContainerID, id) + return t.tagger.Tag(entityID.String(), types.OrchestratorCardinality) } // GetValue return the tag value for the given id and tag name @@ -93,12 +96,12 @@ func (t *DefaultResolver) Stop() error { // NewResolver returns a new tags resolver func NewResolver(config *config.Config, telemetry telemetry.Component) Resolver { if config.RemoteTaggerEnabled { - options, err := remote.NodeAgentOptionsForSecurityResolvers() + options, err := remote.NodeAgentOptionsForSecurityResolvers(rootconfig.Datadog()) if err != nil { log.Errorf("unable to configure the remote tagger: %s", err) } else { return &DefaultResolver{ - tagger: remote.NewTagger(options, taggerTelemetry.NewStore(telemetry)), + tagger: remote.NewTagger(options, rootconfig.Datadog(), taggerTelemetry.NewStore(telemetry)), } } } diff --git a/pkg/security/secl/go.mod b/pkg/security/secl/go.mod index 59dace20333ec..b4da8135a20ad 100644 --- a/pkg/security/secl/go.mod +++ b/pkg/security/secl/go.mod @@ -3,8 +3,8 @@ module github.com/DataDog/datadog-agent/pkg/security/secl go 1.22.0 require ( - github.com/Masterminds/semver/v3 v3.2.1 - github.com/Masterminds/sprig/v3 v3.2.3 + github.com/Masterminds/semver/v3 v3.3.0 + github.com/Masterminds/sprig/v3 v3.3.0 github.com/alecthomas/participle v0.7.1 github.com/davecgh/go-spew v1.1.1 github.com/fatih/structtag v1.2.0 @@ -13,7 +13,7 @@ require ( github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/skydive-project/go-debouncer v1.0.0 - github.com/spf13/cast v1.6.0 + github.com/spf13/cast v1.7.0 github.com/stretchr/testify v1.9.0 golang.org/x/sys v0.24.0 golang.org/x/text v0.17.0 @@ -24,16 +24,16 @@ require ( ) require ( + dario.cat/mergo v1.0.1 // indirect github.com/Masterminds/goutils v1.1.1 // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/hashicorp/errwrap v1.0.0 // indirect - github.com/huandu/xstrings v1.3.3 // indirect - github.com/imdario/mergo v0.3.12 // indirect + github.com/huandu/xstrings v1.5.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect - github.com/shopspring/decimal v1.2.0 // indirect + github.com/shopspring/decimal v1.4.0 // indirect golang.org/x/crypto v0.26.0 // indirect golang.org/x/mod v0.20.0 // indirect golang.org/x/sync v0.8.0 // indirect diff --git a/pkg/security/secl/go.sum b/pkg/security/secl/go.sum index b8e7938fb5c9e..b0fa6b12c6903 100644 --- a/pkg/security/secl/go.sum +++ b/pkg/security/secl/go.sum @@ -1,10 +1,11 @@ +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= -github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= -github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= +github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= +github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= github.com/alecthomas/participle v0.7.1 h1:2bN7reTw//5f0cugJcTOnY/NYZcWQOaajW+BwZB5xWs= github.com/alecthomas/participle v0.7.1/go.mod h1:HfdmEuwvr12HXQN44HPWXR0lHmVolVYe4dyL6lQ3duY= github.com/alecthomas/repr v0.0.0-20181024024818-d37bc2a10ba1/go.mod h1:xTS7Pm1pD1mvyM075QCDSRqH6qRLXylzS24ZTpRiSzQ= @@ -19,20 +20,16 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= -github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= -github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= +github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -40,10 +37,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -52,64 +47,32 @@ github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94 github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/skydive-project/go-debouncer v1.0.0 h1:cqU19PyN7WXsnSlMTANvnHws6lGcbVOH2aDQzwe6qbk= github.com/skydive-project/go-debouncer v1.0.0/go.mod h1:7pK+5HBlYCD8W2cXhvMRsMsdWelDEPfpbE6PwSlDX68= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= -github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= +github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/pkg/security/secl/model/consts_map_names_linux.go b/pkg/security/secl/model/consts_map_names_linux.go index e38b0eb0692b5..55a6198fcfa78 100644 --- a/pkg/security/secl/model/consts_map_names_linux.go +++ b/pkg/security/secl/model/consts_map_names_linux.go @@ -57,7 +57,6 @@ var bpfMapNames = []string{ "path_id", "pathnames", "pid_cache", - "pid_discarders", "pid_ignored", "proc_cache", "process_event_g", diff --git a/pkg/security/secl/model/process_cache_entry_unix.go b/pkg/security/secl/model/process_cache_entry_unix.go index 7b30482b3875e..55fb265984c79 100644 --- a/pkg/security/secl/model/process_cache_entry_unix.go +++ b/pkg/security/secl/model/process_cache_entry_unix.go @@ -106,19 +106,23 @@ func (pc *ProcessCacheEntry) Exec(entry *ProcessCacheEntry) { } // GetContainerPIDs return the pids -func (pc *ProcessCacheEntry) GetContainerPIDs() []uint32 { - var pids []uint32 +func (pc *ProcessCacheEntry) GetContainerPIDs() ([]uint32, []string) { + var ( + pids []uint32 + paths []string + ) for pc != nil { if pc.ContainerID == "" { break } pids = append(pids, pc.Pid) + paths = append(paths, pc.FileEvent.PathnameStr) pc = pc.Ancestor } - return pids + return pids, paths } // SetParentOfForkChild set the parent of a fork child diff --git a/pkg/security/secl/rules/approvers.go b/pkg/security/secl/rules/approvers.go index 1b6d287f70bce..7e470d55684a7 100644 --- a/pkg/security/secl/rules/approvers.go +++ b/pkg/security/secl/rules/approvers.go @@ -68,6 +68,29 @@ func bitmaskCombinations(bitmasks []int) []int { return result } +// 1. all the rule for a given event type has to have approvers +// with: +// * caps: open.file.name # only able to apply approver for open.file.name, not for open.flags +// ok: +// * open.file.name == "123" && process.uid == 33 +// * open.file.name == "567" && process.gid == 55 +// ko: +// * open.file.name == "123" && process.uid == 33 +// * open.flags == O_RDONLY +// reason: +// * We can let pass only the event for the `open.file.name` of the first rule as the second one has to be evaluated on all the open events. +// +// 2. all the approver values has to be captured and used by the in-kernel filtering mechanism +// ex: +// * open.file.name in ["123", "456"] && process.uid == 33 +// * open.file.name == "567" && process.gid == 55 +// => approver("123", "456", "567") +// +// 3. non approver values can co-exists with approver value in the same rule +// ex: +// * open.file.name in ["123", "456"] && open.file.name != "4.*" && open.file.name != "888" +// reason: +// * event will be approved kernel side and will be rejected userspace side func getApprovers(rules []*Rule, event eval.Event, fieldCaps FieldCapabilities) (Approvers, error) { approvers := make(Approvers) @@ -82,7 +105,6 @@ func getApprovers(rules []*Rule, event eval.Event, fieldCaps FieldCapabilities) bestFilterMode FilterMode ) - LOOP: for _, fieldCap := range fieldCaps { field := fieldCap.Field @@ -99,10 +121,6 @@ func getApprovers(rules []*Rule, event eval.Event, fieldCaps FieldCapabilities) if isAnApprover { filterValues = filterValues.Merge(FilterValue{Field: field, Value: value.Value, Type: value.Type, Mode: fieldCap.FilterMode}) - } else if fieldCap.TypeBitmask&eval.BitmaskValueType == 0 { - // if not a bitmask we need to have all the value as approvers - // basically a list of values ex: in ["test123", "test456"] - continue LOOP } case eval.BitmaskValueType: bitmasks = append(bitmasks, value.Value.(int)) diff --git a/pkg/security/secl/rules/ruleset_test.go b/pkg/security/secl/rules/ruleset_test.go index 83828113767a3..520d2ca426cdc 100644 --- a/pkg/security/secl/rules/ruleset_test.go +++ b/pkg/security/secl/rules/ruleset_test.go @@ -649,6 +649,102 @@ func TestRuleSetApprovers16(t *testing.T) { } } +func TestRuleSetApprovers17(t *testing.T) { + exprs := []string{ + `open.file.path in ["/etc/passwd", "/etc/shadow"] && open.file.path != ~"/var/*"`, + `open.file.path == "/var/lib/httpd"`, + } + + rs := newRuleSet() + AddTestRuleExpr(t, rs, exprs...) + + caps := FieldCapabilities{ + { + Field: "open.file.path", + TypeBitmask: eval.ScalarValueType | eval.GlobValueType, + FilterWeight: 3, + }, + } + + approvers, _ := rs.GetEventTypeApprovers("open", caps) + if len(approvers) != 1 || len(approvers["open.file.path"]) != 3 { + t.Fatalf("should get an approver for `open.file.path`: %v", approvers) + } +} + +func TestRuleSetApprovers18(t *testing.T) { + exprs := []string{ + `open.file.path in ["/etc/passwd", "/etc/shadow"] && open.file.path != ~"/var/*"`, + `open.flags == O_RDONLY`, + } + + rs := newRuleSet() + AddTestRuleExpr(t, rs, exprs...) + + caps := FieldCapabilities{ + { + Field: "open.file.path", + TypeBitmask: eval.ScalarValueType | eval.GlobValueType, + FilterWeight: 3, + }, + { + Field: "open.flags", + TypeBitmask: eval.ScalarValueType | eval.BitmaskValueType, + }, + } + + approvers, _ := rs.GetEventTypeApprovers("open", caps) + if len(approvers) != 2 || len(approvers["open.file.path"]) != 2 || len(approvers["open.flags"]) != 1 { + t.Fatalf("should get approvers: %v", approvers) + } +} + +func TestRuleSetApprovers19(t *testing.T) { + exprs := []string{ + `open.file.path in ["/etc/passwd", "/etc/shadow"] && open.file.path != ~"/var/*"`, + `open.flags == O_RDONLY`, + } + + rs := newRuleSet() + AddTestRuleExpr(t, rs, exprs...) + + caps := FieldCapabilities{ + { + Field: "open.file.path", + TypeBitmask: eval.ScalarValueType | eval.GlobValueType, + FilterWeight: 3, + }, + } + + approvers, _ := rs.GetEventTypeApprovers("open", caps) + if len(approvers) != 0 { + t.Fatal("shouldn't get an approver") + } +} + +func TestRuleSetApprovers20(t *testing.T) { + exprs := []string{ + `open.file.path in ["/etc/passwd", "/etc/shadow"] && open.file.path != ~"/var/*"`, + `unlink.file.name == "test"`, + } + + rs := newRuleSet() + AddTestRuleExpr(t, rs, exprs...) + + caps := FieldCapabilities{ + { + Field: "open.file.path", + TypeBitmask: eval.ScalarValueType | eval.GlobValueType, + FilterWeight: 3, + }, + } + + approvers, _ := rs.GetEventTypeApprovers("open", caps) + if len(approvers) != 1 || len(approvers["open.file.path"]) != 2 { + t.Fatalf("should get approvers: %v", approvers) + } +} + func TestGetRuleEventType(t *testing.T) { t.Run("ok", func(t *testing.T) { rule := eval.NewRule("aaa", `open.file.name == "test"`, &eval.Opts{}) diff --git a/pkg/security/seclwin/model/consts_map_names_linux.go b/pkg/security/seclwin/model/consts_map_names_linux.go index e38b0eb0692b5..55a6198fcfa78 100644 --- a/pkg/security/seclwin/model/consts_map_names_linux.go +++ b/pkg/security/seclwin/model/consts_map_names_linux.go @@ -57,7 +57,6 @@ var bpfMapNames = []string{ "path_id", "pathnames", "pid_cache", - "pid_discarders", "pid_ignored", "proc_cache", "process_event_g", diff --git a/pkg/security/tests/action_test.go b/pkg/security/tests/action_test.go index a574b8d227054..8a9f9b6084ac1 100644 --- a/pkg/security/tests/action_test.go +++ b/pkg/security/tests/action_test.go @@ -19,12 +19,13 @@ import ( "testing" "time" - "github.com/DataDog/datadog-agent/pkg/config/env" - "github.com/DataDog/datadog-agent/pkg/security/ebpf/kernel" "github.com/avast/retry-go/v4" "github.com/oliveagle/jsonpath" "github.com/stretchr/testify/assert" + "go.uber.org/atomic" + "github.com/DataDog/datadog-agent/pkg/config/env" + "github.com/DataDog/datadog-agent/pkg/security/ebpf/kernel" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" ) @@ -189,6 +190,62 @@ func TestActionKill(t *testing.T) { }) } +func TestActionKillExcludeBinary(t *testing.T) { + SkipIfNotAvailable(t) + + checkKernelCompatibility(t, "bpf_send_signal is not supported on this kernel and agent is running in container mode", func(kv *kernel.Version) bool { + return !kv.SupportBPFSendSignal() && env.IsContainerized() + }) + + ruleDefs := []*rules.RuleDefinition{ + { + ID: "kill_action_kill_exclude", + Expression: `exec.file.name == "sleep" && exec.argv in ["1234567"]`, + Actions: []*rules.ActionDefinition{ + { + Kill: &rules.KillDefinition{ + Signal: "SIGKILL", + }, + }, + }, + }, + } + + executable := which(t, "sleep") + + test, err := newTestModule(t, nil, ruleDefs, withStaticOpts(testOpts{enforcementExcludeBinary: executable})) + if err != nil { + t.Fatal(err) + } + defer test.Close() + + killed := atomic.NewBool(false) + + err = test.GetEventSent(t, func() error { + go func() { + timeoutCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + cmd := exec.CommandContext(timeoutCtx, "sleep", "1234567") + _ = cmd.Run() + + killed.Store(true) + }() + + return nil + }, func(rule *rules.Rule, event *model.Event) bool { + return true + }, time.Second*5, "kill_action_kill_exclude") + + if err != nil { + t.Error("should get an event") + } + + if killed.Load() { + t.Error("shouldn't be killed") + } +} + func TestActionKillRuleSpecific(t *testing.T) { SkipIfNotAvailable(t) diff --git a/pkg/security/tests/main_linux.go b/pkg/security/tests/main_linux.go index 63e5bdbd63b9a..f503c3090829e 100644 --- a/pkg/security/tests/main_linux.go +++ b/pkg/security/tests/main_linux.go @@ -100,6 +100,7 @@ func SkipIfNotAvailable(t *testing.T) { "TestChdir/syscall-context", "TestLoginUID/login-uid-open-test", "TestLoginUID/login-uid-exec-test", + "TestActionKillExcludeBinary", } if disableSeccomp { diff --git a/pkg/security/tests/module_tester.go b/pkg/security/tests/module_tester.go index b50317bef5a10..bee5088cccb82 100644 --- a/pkg/security/tests/module_tester.go +++ b/pkg/security/tests/module_tester.go @@ -774,6 +774,7 @@ func genTestConfigs(cfgDir string, opts testOpts) (*emconfig.Config, *secconfig. "FIMEnabled": opts.enableFIM, // should only be enabled/disabled on windows "NetworkIngressEnabled": opts.networkIngressEnabled, "OnDemandRateLimiterEnabled": !opts.disableOnDemandRateLimiter, + "EnforcementExcludeBinary": opts.enforcementExcludeBinary, }); err != nil { return nil, nil, err } diff --git a/pkg/security/tests/module_tester_linux.go b/pkg/security/tests/module_tester_linux.go index 46568b1074163..13b8b1b608f59 100644 --- a/pkg/security/tests/module_tester_linux.go +++ b/pkg/security/tests/module_tester_linux.go @@ -118,6 +118,11 @@ runtime_security_config: enabled: {{ .SBOMEnabled }} host: enabled: {{ .HostSBOMEnabled }} + enforcement: + exclude_binaries: + - {{ .EnforcementExcludeBinary }} + rule_source_allowed: + - file activity_dump: enabled: {{ .EnableActivityDump }} syscall_monitor: diff --git a/pkg/security/tests/testopts.go b/pkg/security/tests/testopts.go index 1c8477fe3e7a5..1ea52a0344b79 100644 --- a/pkg/security/tests/testopts.go +++ b/pkg/security/tests/testopts.go @@ -63,6 +63,7 @@ type testOpts struct { disableOnDemandRateLimiter bool ebpfLessEnabled bool dontWaitEBPFLessClient bool + enforcementExcludeBinary string } type dynamicTestOpts struct { @@ -137,5 +138,6 @@ func (to testOpts) Equal(opts testOpts) bool { to.preStartCallback == nil && opts.preStartCallback == nil && to.networkIngressEnabled == opts.networkIngressEnabled && to.disableOnDemandRateLimiter == opts.disableOnDemandRateLimiter && - to.ebpfLessEnabled == opts.ebpfLessEnabled + to.ebpfLessEnabled == opts.ebpfLessEnabled && + to.enforcementExcludeBinary == opts.enforcementExcludeBinary } diff --git a/pkg/serverless/logs/scheduler.go b/pkg/serverless/logs/scheduler.go index 6a46d8240fb3a..e09cc1cf00736 100644 --- a/pkg/serverless/logs/scheduler.go +++ b/pkg/serverless/logs/scheduler.go @@ -6,6 +6,7 @@ package logs import ( + "github.com/DataDog/datadog-agent/comp/core/tagger" logsAgent "github.com/DataDog/datadog-agent/comp/logs/agent" "github.com/DataDog/datadog-agent/comp/logs/agent/agentimpl" "github.com/DataDog/datadog-agent/comp/logs/agent/config" @@ -18,8 +19,8 @@ import ( var logsScheduler *channel.Scheduler // SetupLogAgent sets up the logs agent to handle messages on the given channel. -func SetupLogAgent(logChannel chan *config.ChannelMessage, sourceName string, source string) (logsAgent.ServerlessLogsAgent, error) { - agent := agentimpl.NewServerlessLogsAgent() +func SetupLogAgent(logChannel chan *config.ChannelMessage, sourceName string, source string, tagger tagger.Component) (logsAgent.ServerlessLogsAgent, error) { + agent := agentimpl.NewServerlessLogsAgent(tagger) err := agent.Start() if err != nil { log.Error("Could not start an instance of the Logs Agent:", err) diff --git a/pkg/trace/stats/aggregation_test.go b/pkg/trace/stats/aggregation_test.go index 4dde3a4b4efc9..0e44634c1fff3 100644 --- a/pkg/trace/stats/aggregation_test.go +++ b/pkg/trace/stats/aggregation_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/assert" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" + "github.com/DataDog/datadog-agent/pkg/trace/traceutil" ) func TestGetStatusCode(t *testing.T) { @@ -66,14 +67,14 @@ func TestNewAggregation(t *testing.T) { }{ { "nil case, peer tag aggregation disabled", - &pb.Span{Metrics: map[string]float64{measuredKey: 1}}, + &pb.Span{}, nil, Aggregation{}, nil, }, { "nil case, peer tag aggregation enabled", - &pb.Span{Metrics: map[string]float64{measuredKey: 1}}, + &pb.Span{}, []string{"db.instance", "db.system", "peer.service"}, Aggregation{}, nil, @@ -83,7 +84,6 @@ func TestNewAggregation(t *testing.T) { &pb.Span{ Service: "a", Meta: map[string]string{"span.kind": "client", "peer.service": "remote-service"}, - Metrics: map[string]float64{measuredKey: 1}, }, nil, Aggregation{BucketsAggregationKey: BucketsAggregationKey{Service: "a", SpanKind: "client"}}, @@ -94,7 +94,6 @@ func TestNewAggregation(t *testing.T) { &pb.Span{ Service: "a", Meta: map[string]string{"span.kind": "", "peer.service": "remote-service"}, - Metrics: map[string]float64{measuredKey: 1}, }, []string{"db.instance", "db.system", "peer.service"}, Aggregation{BucketsAggregationKey: BucketsAggregationKey{Service: "a"}}, @@ -105,7 +104,6 @@ func TestNewAggregation(t *testing.T) { &pb.Span{ Service: "a", Meta: map[string]string{"span.kind": "client", "peer.service": "remote-service"}, - Metrics: map[string]float64{measuredKey: 1}, }, []string{"db.instance", "db.system", "peer.service"}, Aggregation{BucketsAggregationKey: BucketsAggregationKey{Service: "a", SpanKind: "client", PeerTagsHash: peerSvcOnlyHash}}, @@ -116,7 +114,6 @@ func TestNewAggregation(t *testing.T) { &pb.Span{ Service: "a", Meta: map[string]string{"span.kind": "producer", "peer.service": "remote-service"}, - Metrics: map[string]float64{measuredKey: 1}, }, []string{"db.instance", "db.system", "peer.service"}, Aggregation{BucketsAggregationKey: BucketsAggregationKey{Service: "a", SpanKind: "producer", PeerTagsHash: peerSvcOnlyHash}}, @@ -127,7 +124,6 @@ func TestNewAggregation(t *testing.T) { &pb.Span{ Service: "a", Meta: map[string]string{"span.kind": "consumer", "messaging.destination": "topic-foo", "messaging.system": "kafka"}, - Metrics: map[string]float64{measuredKey: 1}, }, []string{"db.instance", "db.system", "messaging.destination", "messaging.system"}, Aggregation{BucketsAggregationKey: BucketsAggregationKey{Service: "a", SpanKind: "consumer", PeerTagsHash: 0xf5eeb51fbe7929b4}}, @@ -138,7 +134,6 @@ func TestNewAggregation(t *testing.T) { &pb.Span{ Service: "a", Meta: map[string]string{"span.kind": "client", "field1": "val1", "peer.service": "remote-service", "db.instance": "i-1234", "db.system": "postgres"}, - Metrics: map[string]float64{measuredKey: 1}, }, []string{"db.instance", "db.system", "peer.service"}, Aggregation{BucketsAggregationKey: BucketsAggregationKey{Service: "a", SpanKind: "client", PeerTagsHash: peerTagsHash}}, @@ -149,7 +144,6 @@ func TestNewAggregation(t *testing.T) { &pb.Span{ Service: "a", Meta: map[string]string{"span.kind": "client", "field1": "val1", "peer.service": "", "db.instance": "", "db.system": ""}, - Metrics: map[string]float64{measuredKey: 1}, }, []string{"db.instance", "db.system", "peer.service"}, Aggregation{BucketsAggregationKey: BucketsAggregationKey{Service: "a", SpanKind: "client", PeerTagsHash: 0}}, @@ -160,13 +154,13 @@ func TestNewAggregation(t *testing.T) { &pb.Span{ Service: "a", Meta: map[string]string{"span.kind": "client", "field1": "val1", "peer.service": "remote-service", "db.instance": "", "db.system": ""}, - Metrics: map[string]float64{measuredKey: 1}, }, []string{"db.instance", "db.system", "peer.service"}, Aggregation{BucketsAggregationKey: BucketsAggregationKey{Service: "a", SpanKind: "client", PeerTagsHash: peerSvcOnlyHash}}, []string{"peer.service:remote-service"}, }, } { + traceutil.SetMeasured(tt.in, true) // mark span as measured to ensure we calculate stats on it sc := &SpanConcentrator{} statSpan, _ := sc.NewStatSpanFromPB(tt.in, tt.peerTags) agg := NewAggregationFromSpan(statSpan, "", PayloadAggregationKey{}) @@ -206,24 +200,23 @@ func TestIsRootSpan(t *testing.T) { isTraceRoot pb.Trilean }{ { - &pb.Span{Metrics: map[string]float64{measuredKey: 1}}, + &pb.Span{}, pb.Trilean_TRUE, }, { &pb.Span{ ParentID: 0, - Metrics: map[string]float64{measuredKey: 1}, }, pb.Trilean_TRUE, }, { &pb.Span{ ParentID: 123, - Metrics: map[string]float64{measuredKey: 1}, }, pb.Trilean_FALSE, }, } { + traceutil.SetMeasured(tt.in, true) statSpan, _ := sc.NewStatSpanFromPB(tt.in, nil) agg := NewAggregationFromSpan(statSpan, "", PayloadAggregationKey{}) assert.Equal(t, tt.isTraceRoot, agg.IsTraceRoot) diff --git a/pkg/trace/stats/span_concentrator.go b/pkg/trace/stats/span_concentrator.go index e554e5549f31e..d886b9e7495f7 100644 --- a/pkg/trace/stats/span_concentrator.go +++ b/pkg/trace/stats/span_concentrator.go @@ -13,17 +13,9 @@ import ( "github.com/DataDog/datadog-agent/pkg/obfuscate" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/log" + "github.com/DataDog/datadog-agent/pkg/trace/traceutil" ) -// topLevelKey is a special metric, it's 1 if the span is top-level, 0 if not. -const topLevelKey = "_top_level" - -// measuredKey is a special metric flag that marks a span for trace metrics calculation. -const measuredKey = "_dd.measured" - -// partialVersionKey is a metric carrying the snapshot seq number in the case the span is a partial snapshot -const partialVersionKey = "_dd.partial_version" - // SpanConcentratorConfig exposes configuration options for a SpanConcentrator type SpanConcentratorConfig struct { // ComputeStatsBySpanKind enables/disables the computing of stats based on a span's `span.kind` field @@ -140,12 +132,12 @@ func (sc *SpanConcentrator) NewStatSpan( if metrics == nil { metrics = make(map[string]float64) } - partialVersion, hasPartialVersion := metrics[partialVersionKey] eligibleSpanKind := sc.computeStatsBySpanKind && computeStatsForSpanKind(meta["span.kind"]) - if !(metrics[topLevelKey] == 1 || metrics[measuredKey] == 1 || eligibleSpanKind) { + isTopLevel := traceutil.HasTopLevelMetrics(metrics) + if !(isTopLevel || traceutil.IsMeasuredMetrics(metrics) || eligibleSpanKind) { return nil, false } - if hasPartialVersion && partialVersion >= 0 { + if traceutil.IsPartialSnapshotMetrics(metrics) { return nil, false } return &StatSpan{ @@ -159,7 +151,7 @@ func (sc *SpanConcentrator) NewStatSpan( duration: duration, spanKind: meta[tagSpanKind], statusCode: getStatusCode(meta, metrics), - isTopLevel: metrics[topLevelKey] == 1, + isTopLevel: isTopLevel, matchingPeerTags: matchingPeerTags(meta, peerTags), }, true } diff --git a/pkg/trace/stats/statsraw_test.go b/pkg/trace/stats/statsraw_test.go index 6d7888d4ac084..0147d51f41568 100644 --- a/pkg/trace/stats/statsraw_test.go +++ b/pkg/trace/stats/statsraw_test.go @@ -42,7 +42,7 @@ func TestGrainWithPeerTags(t *testing.T) { sc := &SpanConcentrator{} t.Run("none present", func(t *testing.T) { assert := assert.New(t) - s, _ := sc.NewStatSpan("thing", "yo", "other", "", 0, 0, 0, 0, map[string]string{"span.kind": "client"}, map[string]float64{measuredKey: 1}, []string{"aws.s3.bucket", "db.instance", "db.system", "peer.service"}) + s, _ := sc.NewStatSpan("thing", "yo", "other", "", 0, 0, 0, 0, map[string]string{"span.kind": "client"}, map[string]float64{"_dd.measured": 1}, []string{"aws.s3.bucket", "db.instance", "db.system", "peer.service"}) aggr := NewAggregationFromSpan(s, "", PayloadAggregationKey{ Env: "default", Hostname: "default", @@ -85,7 +85,7 @@ func TestGrainWithPeerTags(t *testing.T) { for _, spanKind := range []string{"client", "internal"} { t.Run(spanKind, func(t *testing.T) { assert := assert.New(t) - s, _ := sc.NewStatSpan("thing", "yo", "other", "", 0, 0, 0, 0, map[string]string{"span.kind": spanKind, "_dd.base_service": "the-real-base", "server.address": "foo"}, map[string]float64{measuredKey: 1}, []string{"_dd.base_service", "server.address"}) + s, _ := sc.NewStatSpan("thing", "yo", "other", "", 0, 0, 0, 0, map[string]string{"span.kind": spanKind, "_dd.base_service": "the-real-base", "server.address": "foo"}, map[string]float64{"_dd.measured": 1}, []string{"_dd.base_service", "server.address"}) if spanKind == "client" { assert.Equal([]string{"_dd.base_service:the-real-base", "server.address:foo"}, s.matchingPeerTags) } else { @@ -97,7 +97,7 @@ func TestGrainWithPeerTags(t *testing.T) { t.Run("partially present", func(t *testing.T) { assert := assert.New(t) meta := map[string]string{"span.kind": "client", "peer.service": "aws-s3", "aws.s3.bucket": "bucket-a"} - s, _ := sc.NewStatSpan("thing", "yo", "other", "", 0, 0, 0, 0, meta, map[string]float64{measuredKey: 1}, []string{"aws.s3.bucket", "db.instance", "db.system", "peer.service"}) + s, _ := sc.NewStatSpan("thing", "yo", "other", "", 0, 0, 0, 0, meta, map[string]float64{"_dd.measured": 1}, []string{"aws.s3.bucket", "db.instance", "db.system", "peer.service"}) aggr := NewAggregationFromSpan(s, "", PayloadAggregationKey{ Env: "default", @@ -124,7 +124,7 @@ func TestGrainWithPeerTags(t *testing.T) { t.Run("peer ip quantization", func(t *testing.T) { assert := assert.New(t) meta := map[string]string{"span.kind": "client", "server.address": "129.49.218.65"} - s, _ := sc.NewStatSpan("thing", "yo", "other", "", 0, 0, 0, 0, meta, map[string]float64{measuredKey: 1}, []string{"server.address"}) + s, _ := sc.NewStatSpan("thing", "yo", "other", "", 0, 0, 0, 0, meta, map[string]float64{"_dd.measured": 1}, []string{"server.address"}) aggr := NewAggregationFromSpan(s, "", PayloadAggregationKey{ Env: "default", @@ -152,7 +152,7 @@ func TestGrainWithPeerTags(t *testing.T) { t.Run("all present", func(t *testing.T) { assert := assert.New(t) meta := map[string]string{"span.kind": "client", "peer.service": "aws-dynamodb", "db.instance": "dynamo.test.us1", "db.system": "dynamodb"} - s, _ := sc.NewStatSpan("thing", "yo", "other", "", 0, 0, 0, 0, meta, map[string]float64{measuredKey: 1}, []string{"aws.s3.bucket", "db.instance", "db.system", "peer.service"}) + s, _ := sc.NewStatSpan("thing", "yo", "other", "", 0, 0, 0, 0, meta, map[string]float64{"_dd.measured": 1}, []string{"aws.s3.bucket", "db.instance", "db.system", "peer.service"}) aggr := NewAggregationFromSpan(s, "", PayloadAggregationKey{ Env: "default", @@ -183,7 +183,7 @@ func TestGrainWithSynthetics(t *testing.T) { assert := assert.New(t) sc := &SpanConcentrator{} meta := map[string]string{tagStatusCode: "418"} - s, _ := sc.NewStatSpan("thing", "yo", "other", "", 0, 0, 0, 0, meta, map[string]float64{measuredKey: 1}, nil) + s, _ := sc.NewStatSpan("thing", "yo", "other", "", 0, 0, 0, 0, meta, map[string]float64{"_dd.measured": 1}, nil) aggr := NewAggregationFromSpan(s, "synthetics-browser", PayloadAggregationKey{ Hostname: "host-id", diff --git a/pkg/trace/traceutil/span.go b/pkg/trace/traceutil/span.go index c88917928805a..2b416531d402c 100644 --- a/pkg/trace/traceutil/span.go +++ b/pkg/trace/traceutil/span.go @@ -14,7 +14,8 @@ import ( ) const ( - // This is a special metric, it's 1 if the span is top-level, 0 if not. + // topLevelKey is a special metric, it's 1 if the span is top-level, 0 if not, this is kept for backwards + // compatibility but will eventually be replaced with just using the preferred tracerTopLevelKey topLevelKey = "_top_level" // measuredKey is a special metric flag that marks a span for trace metrics calculation. measuredKey = "_dd.measured" @@ -26,7 +27,12 @@ const ( // HasTopLevel returns true if span is top-level. func HasTopLevel(s *pb.Span) bool { - return s.Metrics[topLevelKey] == 1 + return HasTopLevelMetrics(s.Metrics) +} + +// HasTopLevelMetrics returns true if the provided metrics map indicates the span is top-level. +func HasTopLevelMetrics(metrics map[string]float64) bool { + return metrics[topLevelKey] == 1 || metrics[tracerTopLevelKey] == 1 } // UpdateTracerTopLevel sets _top_level tag on spans flagged by the tracer @@ -38,7 +44,12 @@ func UpdateTracerTopLevel(s *pb.Span) { // IsMeasured returns true if a span should be measured (i.e., it should get trace metrics calculated). func IsMeasured(s *pb.Span) bool { - return s.Metrics[measuredKey] == 1 + return IsMeasuredMetrics(s.Metrics) +} + +// IsMeasuredMetrics returns true if a span should be measured (i.e., it should get trace metrics calculated). +func IsMeasuredMetrics(metrics map[string]float64) bool { + return metrics[measuredKey] == 1 } // IsPartialSnapshot returns true if the span is a partial snapshot. @@ -46,7 +57,15 @@ func IsMeasured(s *pb.Span) bool { // When incomplete, a partial snapshot has a metric _dd.partial_version which is a positive integer. // The metric usually increases each time a new version of the same span is sent by the tracer func IsPartialSnapshot(s *pb.Span) bool { - v, ok := s.Metrics[partialVersionKey] + return IsPartialSnapshotMetrics(s.Metrics) +} + +// IsPartialSnapshotMetrics returns true if the span is a partial snapshot. +// These kinds of spans are partial images of long-running spans. +// When incomplete, a partial snapshot has a metric _dd.partial_version which is a positive integer. +// The metric usually increases each time a new version of the same span is sent by the tracer +func IsPartialSnapshotMetrics(metrics map[string]float64) bool { + v, ok := metrics[partialVersionKey] return ok && v >= 0 } diff --git a/pkg/util/containers/entity.go b/pkg/util/containers/entity.go index 693c083053793..fb43f39697e32 100644 --- a/pkg/util/containers/entity.go +++ b/pkg/util/containers/entity.go @@ -27,14 +27,6 @@ func BuildEntityName(runtime, id string) string { return runtime + EntitySeparator + id } -// BuildTaggerEntityName builds a valid tagger entity name for a given cid. -func BuildTaggerEntityName(id string) string { - if id == "" { - return "" - } - return ContainerEntityPrefix + id -} - // SplitEntityName returns the prefix and container cid parts of a valid entity name func SplitEntityName(name string) (string, string) { if !IsEntityName(name) { diff --git a/pkg/util/containers/entity_test.go b/pkg/util/containers/entity_test.go index b5f373be27186..3799f9785c787 100644 --- a/pkg/util/containers/entity_test.go +++ b/pkg/util/containers/entity_test.go @@ -10,6 +10,8 @@ import ( "testing" "github.com/stretchr/testify/assert" + + "github.com/DataDog/datadog-agent/comp/core/tagger/types" ) func TestBuildEntityName(t *testing.T) { @@ -40,12 +42,12 @@ func TestBuildTaggerEntityName(t *testing.T) { expected string }{ // Empty - {"", ""}, + {"", "container_id://"}, // Empty runtime {"5bef08742407ef", "container_id://5bef08742407ef"}, } { t.Run(fmt.Sprintf("case %d: %s", nb, tc.expected), func(t *testing.T) { - out := BuildTaggerEntityName(tc.cID) + out := types.NewEntityID(types.ContainerID, tc.cID).String() assert.Equal(t, tc.expected, out) }) } diff --git a/pkg/util/containers/env_vars_filter.go b/pkg/util/containers/env_vars_filter.go index 24d52a02c7efb..80c6166005819 100644 --- a/pkg/util/containers/env_vars_filter.go +++ b/pkg/util/containers/env_vars_filter.go @@ -9,7 +9,7 @@ import ( "strings" "sync" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) var ( @@ -43,12 +43,12 @@ var ( func EnvVarFilterFromConfig() EnvFilter { envFilterOnce.Do(func() { configEnvVars := make([]string, 0) - dockerEnvs := config.Datadog().GetStringMapString("docker_env_as_tags") + dockerEnvs := pkgconfigsetup.Datadog().GetStringMapString("docker_env_as_tags") for envName := range dockerEnvs { configEnvVars = append(configEnvVars, envName) } - containerEnvs := config.Datadog().GetStringMapString("container_env_as_tags") + containerEnvs := pkgconfigsetup.Datadog().GetStringMapString("container_env_as_tags") for envName := range containerEnvs { configEnvVars = append(configEnvVars, envName) } diff --git a/pkg/util/containers/filter.go b/pkg/util/containers/filter.go index a0c389dfdde63..1e2ab52934e77 100644 --- a/pkg/util/containers/filter.go +++ b/pkg/util/containers/filter.go @@ -12,7 +12,7 @@ import ( "strconv" "strings" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -173,7 +173,7 @@ func GetSharedMetricFilter() (*Filter, error) { // GetPauseContainerFilter returns a filter only excluding pause containers func GetPauseContainerFilter() (*Filter, error) { var excludeList []string - if config.Datadog().GetBool("exclude_pause_container") { + if pkgconfigsetup.Datadog().GetBool("exclude_pause_container") { excludeList = append(excludeList, pauseContainerGCR, pauseContainerOpenshift, @@ -256,21 +256,21 @@ func NewFilter(ft FilterType, includeList, excludeList []string) (*Filter, error func newMetricFilterFromConfig() (*Filter, error) { // We merge `container_include` and `container_include_metrics` as this filter // is used by all core and python checks (so components sending metrics). - includeList := config.Datadog().GetStringSlice("container_include") - excludeList := config.Datadog().GetStringSlice("container_exclude") - includeList = append(includeList, config.Datadog().GetStringSlice("container_include_metrics")...) - excludeList = append(excludeList, config.Datadog().GetStringSlice("container_exclude_metrics")...) + includeList := pkgconfigsetup.Datadog().GetStringSlice("container_include") + excludeList := pkgconfigsetup.Datadog().GetStringSlice("container_exclude") + includeList = append(includeList, pkgconfigsetup.Datadog().GetStringSlice("container_include_metrics")...) + excludeList = append(excludeList, pkgconfigsetup.Datadog().GetStringSlice("container_exclude_metrics")...) if len(includeList) == 0 { // support legacy "ac_include" config - includeList = config.Datadog().GetStringSlice("ac_include") + includeList = pkgconfigsetup.Datadog().GetStringSlice("ac_include") } if len(excludeList) == 0 { // support legacy "ac_exclude" config - excludeList = config.Datadog().GetStringSlice("ac_exclude") + excludeList = pkgconfigsetup.Datadog().GetStringSlice("ac_exclude") } - if config.Datadog().GetBool("exclude_pause_container") { + if pkgconfigsetup.Datadog().GetBool("exclude_pause_container") { excludeList = append(excludeList, pauseContainerGCR, pauseContainerOpenshift, @@ -303,22 +303,22 @@ func NewAutodiscoveryFilter(ft FilterType) (*Filter, error) { excludeList := []string{} switch ft { case GlobalFilter: - includeList = config.Datadog().GetStringSlice("container_include") - excludeList = config.Datadog().GetStringSlice("container_exclude") + includeList = pkgconfigsetup.Datadog().GetStringSlice("container_include") + excludeList = pkgconfigsetup.Datadog().GetStringSlice("container_exclude") if len(includeList) == 0 { // fallback and support legacy "ac_include" config - includeList = config.Datadog().GetStringSlice("ac_include") + includeList = pkgconfigsetup.Datadog().GetStringSlice("ac_include") } if len(excludeList) == 0 { // fallback and support legacy "ac_exclude" config - excludeList = config.Datadog().GetStringSlice("ac_exclude") + excludeList = pkgconfigsetup.Datadog().GetStringSlice("ac_exclude") } case MetricsFilter: - includeList = config.Datadog().GetStringSlice("container_include_metrics") - excludeList = config.Datadog().GetStringSlice("container_exclude_metrics") + includeList = pkgconfigsetup.Datadog().GetStringSlice("container_include_metrics") + excludeList = pkgconfigsetup.Datadog().GetStringSlice("container_exclude_metrics") case LogsFilter: - includeList = config.Datadog().GetStringSlice("container_include_logs") - excludeList = config.Datadog().GetStringSlice("container_exclude_logs") + includeList = pkgconfigsetup.Datadog().GetStringSlice("container_include_logs") + excludeList = pkgconfigsetup.Datadog().GetStringSlice("container_exclude_logs") } return NewFilter(ft, includeList, excludeList) } diff --git a/pkg/util/containers/filter_test.go b/pkg/util/containers/filter_test.go index f6b75d5637429..1c1d768a4dfc0 100644 --- a/pkg/util/containers/filter_test.go +++ b/pkg/util/containers/filter_test.go @@ -14,7 +14,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) type ctnDef struct { @@ -418,9 +418,9 @@ func TestIsExcludedByAnnotation(t *testing.T) { } func TestNewMetricFilterFromConfig(t *testing.T) { - config.Datadog().SetDefault("exclude_pause_container", true) - config.Datadog().SetDefault("ac_include", []string{"image:apache.*"}) - config.Datadog().SetDefault("ac_exclude", []string{"name:dd-.*"}) + pkgconfigsetup.Datadog().SetDefault("exclude_pause_container", true) + pkgconfigsetup.Datadog().SetDefault("ac_include", []string{"image:apache.*"}) + pkgconfigsetup.Datadog().SetDefault("ac_exclude", []string{"name:dd-.*"}) f, err := newMetricFilterFromConfig() require.NoError(t, err) @@ -431,20 +431,20 @@ func TestNewMetricFilterFromConfig(t *testing.T) { assert.True(t, f.IsExcluded(nil, "dummy", "k8s.gcr.io/pause-amd64:3.1", "")) assert.True(t, f.IsExcluded(nil, "dummy", "rancher/pause-amd64:3.1", "")) - config.Datadog().SetDefault("exclude_pause_container", false) + pkgconfigsetup.Datadog().SetDefault("exclude_pause_container", false) f, err = newMetricFilterFromConfig() require.NoError(t, err) assert.False(t, f.IsExcluded(nil, "dummy", "k8s.gcr.io/pause-amd64:3.1", "")) - config.Datadog().SetDefault("exclude_pause_container", true) - config.Datadog().SetDefault("ac_include", []string{}) - config.Datadog().SetDefault("ac_exclude", []string{}) + pkgconfigsetup.Datadog().SetDefault("exclude_pause_container", true) + pkgconfigsetup.Datadog().SetDefault("ac_include", []string{}) + pkgconfigsetup.Datadog().SetDefault("ac_exclude", []string{}) - config.Datadog().SetDefault("exclude_pause_container", false) - config.Datadog().SetDefault("container_include", []string{"image:apache.*"}) - config.Datadog().SetDefault("container_exclude", []string{"name:dd-.*"}) - config.Datadog().SetDefault("container_include_metrics", []string{"image:nginx.*"}) - config.Datadog().SetDefault("container_exclude_metrics", []string{"name:ddmetric-.*"}) + pkgconfigsetup.Datadog().SetDefault("exclude_pause_container", false) + pkgconfigsetup.Datadog().SetDefault("container_include", []string{"image:apache.*"}) + pkgconfigsetup.Datadog().SetDefault("container_exclude", []string{"name:dd-.*"}) + pkgconfigsetup.Datadog().SetDefault("container_include_metrics", []string{"image:nginx.*"}) + pkgconfigsetup.Datadog().SetDefault("container_exclude_metrics", []string{"name:ddmetric-.*"}) f, err = newMetricFilterFromConfig() require.NoError(t, err) @@ -460,8 +460,8 @@ func TestNewAutodiscoveryFilter(t *testing.T) { resetConfig() // Global - legacy config - config.Datadog().SetDefault("ac_include", []string{"image:apache.*"}) - config.Datadog().SetDefault("ac_exclude", []string{"name:dd-.*"}) + pkgconfigsetup.Datadog().SetDefault("ac_include", []string{"image:apache.*"}) + pkgconfigsetup.Datadog().SetDefault("ac_exclude", []string{"name:dd-.*"}) f, err := NewAutodiscoveryFilter(GlobalFilter) require.NoError(t, err) @@ -474,10 +474,10 @@ func TestNewAutodiscoveryFilter(t *testing.T) { resetConfig() // Global - new config - legacy config ignored - config.Datadog().SetDefault("container_include", []string{"image:apache.*"}) - config.Datadog().SetDefault("container_exclude", []string{"name:dd-.*"}) - config.Datadog().SetDefault("ac_include", []string{"image:apache/legacy.*"}) - config.Datadog().SetDefault("ac_exclude", []string{"name:dd/legacy-.*"}) + pkgconfigsetup.Datadog().SetDefault("container_include", []string{"image:apache.*"}) + pkgconfigsetup.Datadog().SetDefault("container_exclude", []string{"name:dd-.*"}) + pkgconfigsetup.Datadog().SetDefault("ac_include", []string{"image:apache/legacy.*"}) + pkgconfigsetup.Datadog().SetDefault("ac_exclude", []string{"name:dd/legacy-.*"}) f, err = NewAutodiscoveryFilter(GlobalFilter) require.NoError(t, err) @@ -491,8 +491,8 @@ func TestNewAutodiscoveryFilter(t *testing.T) { resetConfig() // Metrics - config.Datadog().SetDefault("container_include_metrics", []string{"image:apache.*"}) - config.Datadog().SetDefault("container_exclude_metrics", []string{"name:dd-.*"}) + pkgconfigsetup.Datadog().SetDefault("container_include_metrics", []string{"image:apache.*"}) + pkgconfigsetup.Datadog().SetDefault("container_exclude_metrics", []string{"name:dd-.*"}) f, err = NewAutodiscoveryFilter(MetricsFilter) require.NoError(t, err) @@ -505,8 +505,8 @@ func TestNewAutodiscoveryFilter(t *testing.T) { resetConfig() // Logs - config.Datadog().SetDefault("container_include_logs", []string{"image:apache.*"}) - config.Datadog().SetDefault("container_exclude_logs", []string{"name:dd-.*"}) + pkgconfigsetup.Datadog().SetDefault("container_include_logs", []string{"image:apache.*"}) + pkgconfigsetup.Datadog().SetDefault("container_exclude_logs", []string{"name:dd-.*"}) f, err = NewAutodiscoveryFilter(LogsFilter) require.NoError(t, err) @@ -519,8 +519,8 @@ func TestNewAutodiscoveryFilter(t *testing.T) { resetConfig() // Filter errors - non-duplicate error messages - config.Datadog().SetDefault("container_include", []string{"image:apache.*", "invalid"}) - config.Datadog().SetDefault("container_exclude", []string{"name:dd-.*", "invalid"}) + pkgconfigsetup.Datadog().SetDefault("container_include", []string{"image:apache.*", "invalid"}) + pkgconfigsetup.Datadog().SetDefault("container_exclude", []string{"name:dd-.*", "invalid"}) f, err = NewAutodiscoveryFilter(GlobalFilter) require.NoError(t, err) @@ -538,8 +538,8 @@ func TestNewAutodiscoveryFilter(t *testing.T) { resetConfig() // Filter errors - invalid regex - config.Datadog().SetDefault("container_include", []string{"image:apache.*", "kube_namespace:?"}) - config.Datadog().SetDefault("container_exclude", []string{"name:dd-.*", "invalid"}) + pkgconfigsetup.Datadog().SetDefault("container_include", []string{"image:apache.*", "kube_namespace:?"}) + pkgconfigsetup.Datadog().SetDefault("container_exclude", []string{"name:dd-.*", "invalid"}) f, err = NewAutodiscoveryFilter(GlobalFilter) assert.Error(t, err, errors.New("invalid regex '?': error parsing regexp: missing argument to repetition operator: `?`")) @@ -672,13 +672,13 @@ func TestParseFilters(t *testing.T) { } func resetConfig() { - config.Datadog().SetDefault("exclude_pause_container", true) - config.Datadog().SetDefault("container_include", []string{}) - config.Datadog().SetDefault("container_exclude", []string{}) - config.Datadog().SetDefault("container_include_metrics", []string{}) - config.Datadog().SetDefault("container_exclude_metrics", []string{}) - config.Datadog().SetDefault("container_include_logs", []string{}) - config.Datadog().SetDefault("container_exclude_logs", []string{}) - config.Datadog().SetDefault("ac_include", []string{}) - config.Datadog().SetDefault("ac_exclude", []string{}) + pkgconfigsetup.Datadog().SetDefault("exclude_pause_container", true) + pkgconfigsetup.Datadog().SetDefault("container_include", []string{}) + pkgconfigsetup.Datadog().SetDefault("container_exclude", []string{}) + pkgconfigsetup.Datadog().SetDefault("container_include_metrics", []string{}) + pkgconfigsetup.Datadog().SetDefault("container_exclude_metrics", []string{}) + pkgconfigsetup.Datadog().SetDefault("container_include_logs", []string{}) + pkgconfigsetup.Datadog().SetDefault("container_exclude_logs", []string{}) + pkgconfigsetup.Datadog().SetDefault("ac_include", []string{}) + pkgconfigsetup.Datadog().SetDefault("ac_exclude", []string{}) } diff --git a/pkg/util/containers/image/go.mod b/pkg/util/containers/image/go.mod new file mode 100644 index 0000000000000..7108f9f3db1a9 --- /dev/null +++ b/pkg/util/containers/image/go.mod @@ -0,0 +1,11 @@ +module github.com/DataDog/datadog-agent/pkg/util/containers/image + +go 1.22.0 + +require github.com/stretchr/testify v1.9.0 + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/pkg/util/containers/image/go.sum b/pkg/util/containers/image/go.sum new file mode 100644 index 0000000000000..60ce688a04104 --- /dev/null +++ b/pkg/util/containers/image/go.sum @@ -0,0 +1,10 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/pkg/util/containers/image.go b/pkg/util/containers/image/image.go similarity index 94% rename from pkg/util/containers/image.go rename to pkg/util/containers/image/image.go index e7cb6a0d20039..eaccc3f174460 100644 --- a/pkg/util/containers/image.go +++ b/pkg/util/containers/image/image.go @@ -3,7 +3,8 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package containers +// Package image provides utilities to handle container images for pkg/util/containers +package image import ( "errors" diff --git a/pkg/util/containers/image_test.go b/pkg/util/containers/image/image_test.go similarity index 99% rename from pkg/util/containers/image_test.go rename to pkg/util/containers/image/image_test.go index 7574af49675f0..50a6746771f2f 100644 --- a/pkg/util/containers/image_test.go +++ b/pkg/util/containers/image/image_test.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package containers +package image import ( "fmt" diff --git a/pkg/util/kubernetes/kubelet/conversion.go b/pkg/util/kubernetes/kubelet/conversion.go new file mode 100644 index 0000000000000..4c98c9a9b5e9a --- /dev/null +++ b/pkg/util/kubernetes/kubelet/conversion.go @@ -0,0 +1,348 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build kubelet && kubeapiserver + +package kubelet + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +// ConvertKubeletPodToK8sPod converts a Pod to a Kubernetes Pod. +// The Pod in this package is a simplification of the one in the Kubernetes +// library, so the result will not contain all the fields. That's OK, because +// this function is only called from the KSM check, so we only need to convert +// the fields that are used by the check. +func ConvertKubeletPodToK8sPod(pod *Pod) *corev1.Pod { + return &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: pod.Metadata.Name, + UID: types.UID(pod.Metadata.UID), + Namespace: pod.Metadata.Namespace, + CreationTimestamp: metav1.Time{ + Time: pod.Metadata.CreationTimestamp, + }, + Annotations: pod.Metadata.Annotations, + Labels: pod.Metadata.Labels, + OwnerReferences: convertToK8sOwnerReferences(pod.Metadata.Owners), + }, + Spec: corev1.PodSpec{ + HostNetwork: pod.Spec.HostNetwork, + NodeName: pod.Spec.NodeName, + InitContainers: convertToK8sContainers(pod.Spec.InitContainers), + Containers: convertToK8sContainers(pod.Spec.Containers), + Volumes: convertToK8sVolumes(pod.Spec.Volumes), + PriorityClassName: pod.Spec.PriorityClassName, + SecurityContext: convertToK8sPodSecurityContext(pod.Spec.SecurityContext), + RuntimeClassName: pod.Spec.RuntimeClassName, + Tolerations: convertToK8sPodTolerations(pod.Spec.Tolerations), + }, + Status: corev1.PodStatus{ + Phase: corev1.PodPhase(pod.Status.Phase), + HostIP: pod.Status.HostIP, + PodIP: pod.Status.PodIP, + ContainerStatuses: convertToK8sContainerStatuses(pod.Status.Containers), + InitContainerStatuses: convertToK8sContainerStatuses(pod.Status.InitContainers), + Conditions: convertToK8sConditions(pod.Status.Conditions), + QOSClass: corev1.PodQOSClass(pod.Status.QOSClass), + StartTime: &metav1.Time{ + Time: pod.Status.StartTime, + }, + Reason: pod.Status.Reason, + }, + } +} + +func convertToK8sOwnerReferences(owners []PodOwner) []metav1.OwnerReference { + if owners == nil { + return nil + } + + k8sOwnerReferences := make([]metav1.OwnerReference, len(owners)) + for i, owner := range owners { + k8sOwnerReferences[i] = metav1.OwnerReference{ + Kind: owner.Kind, + Name: owner.Name, + Controller: owner.Controller, + } + } + return k8sOwnerReferences +} + +func convertToK8sContainers(containerSpecs []ContainerSpec) []corev1.Container { + if containerSpecs == nil { + return nil + } + + k8sContainers := make([]corev1.Container, len(containerSpecs)) + for i, containerSpec := range containerSpecs { + k8sContainers[i] = corev1.Container{ + Name: containerSpec.Name, + Image: containerSpec.Image, + Ports: convertToK8sContainerPorts(containerSpec.Ports), + ReadinessProbe: convertToK8sProbe(containerSpec.ReadinessProbe), + Env: convertToK8sEnvVars(containerSpec.Env), + SecurityContext: convertToK8sContainerSecurityContext(containerSpec.SecurityContext), + Resources: convertToK8sResourceRequirements(containerSpec.Resources), + } + } + return k8sContainers +} + +func convertToK8sVolumes(volumeSpecs []VolumeSpec) []corev1.Volume { + if volumeSpecs == nil { + return nil + } + + k8sVolumes := make([]corev1.Volume, len(volumeSpecs)) + for i, volumeSpec := range volumeSpecs { + k8sVolumes[i] = corev1.Volume{ + Name: volumeSpec.Name, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: convertToK8sPersistentVolumeClaim(volumeSpec.PersistentVolumeClaim), + Ephemeral: convertToK8sEphemeralVolume(volumeSpec.Ephemeral), + }, + } + } + return k8sVolumes +} + +func convertToK8sPodSecurityContext(podSecurityContextSpec *PodSecurityContextSpec) *corev1.PodSecurityContext { + if podSecurityContextSpec == nil { + return nil + } + + runAsUser := int64(podSecurityContextSpec.RunAsUser) + runAsGroup := int64(podSecurityContextSpec.RunAsGroup) + fsGroup := int64(podSecurityContextSpec.FsGroup) + + return &corev1.PodSecurityContext{ + RunAsUser: &runAsUser, + RunAsGroup: &runAsGroup, + FSGroup: &fsGroup, + } +} + +func convertToK8sContainerPorts(containerPortSpecs []ContainerPortSpec) []corev1.ContainerPort { + if containerPortSpecs == nil { + return nil + } + + k8sPorts := make([]corev1.ContainerPort, len(containerPortSpecs)) + for i, containerPortSpec := range containerPortSpecs { + k8sPorts[i] = corev1.ContainerPort{ + ContainerPort: int32(containerPortSpec.ContainerPort), + HostPort: int32(containerPortSpec.HostPort), + Name: containerPortSpec.Name, + Protocol: corev1.Protocol(containerPortSpec.Protocol), + } + } + return k8sPorts +} + +func convertToK8sProbe(containerProbe *ContainerProbe) *corev1.Probe { + if containerProbe == nil { + return nil + } + return &corev1.Probe{ + InitialDelaySeconds: int32(containerProbe.InitialDelaySeconds), + } +} + +func convertToK8sEnvVars(envVars []EnvVar) []corev1.EnvVar { + if envVars == nil { + return nil + } + + k8sEnvVars := make([]corev1.EnvVar, len(envVars)) + for i, envVar := range envVars { + k8sEnvVars[i] = corev1.EnvVar{ + Name: envVar.Name, + Value: envVar.Value, + } + } + return k8sEnvVars +} + +func convertToK8sContainerSecurityContext(containerSecurityContextSpec *ContainerSecurityContextSpec) *corev1.SecurityContext { + if containerSecurityContextSpec == nil { + return nil + } + return &corev1.SecurityContext{ + Capabilities: convertToK8sCapabilities(containerSecurityContextSpec.Capabilities), + Privileged: containerSecurityContextSpec.Privileged, + SeccompProfile: convertToK8sSeccompProfile(containerSecurityContextSpec.SeccompProfile), + } +} + +func convertToK8sCapabilities(capabilities *CapabilitiesSpec) *corev1.Capabilities { + if capabilities == nil { + return nil + } + + res := &corev1.Capabilities{} + + for _, addCapability := range capabilities.Add { + res.Add = append(res.Add, corev1.Capability(addCapability)) + } + + for _, dropCapability := range capabilities.Drop { + res.Drop = append(res.Drop, corev1.Capability(dropCapability)) + } + + return res +} + +func convertToK8sSeccompProfile(seccompProfileSpec *SeccompProfileSpec) *corev1.SeccompProfile { + if seccompProfileSpec == nil { + return nil + } + return &corev1.SeccompProfile{ + Type: corev1.SeccompProfileType(seccompProfileSpec.Type), + LocalhostProfile: seccompProfileSpec.LocalhostProfile, + } +} + +func convertToK8sResourceRequirements(containerResourcesSpec *ContainerResourcesSpec) corev1.ResourceRequirements { + if containerResourcesSpec == nil { + return corev1.ResourceRequirements{} + } + return corev1.ResourceRequirements{ + Requests: convertToK8sResourceList(containerResourcesSpec.Requests), + Limits: convertToK8sResourceList(containerResourcesSpec.Limits), + } +} + +func convertToK8sResourceList(resourceList ResourceList) corev1.ResourceList { + k8sResourceList := make(corev1.ResourceList) + for k, v := range resourceList { + k8sResourceList[corev1.ResourceName(k)] = v + } + return k8sResourceList +} + +func convertToK8sPersistentVolumeClaim(persistentVolumeClaimSpec *PersistentVolumeClaimSpec) *corev1.PersistentVolumeClaimVolumeSource { + if persistentVolumeClaimSpec == nil { + return nil + } + return &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: persistentVolumeClaimSpec.ClaimName, + ReadOnly: persistentVolumeClaimSpec.ReadOnly, + } +} + +func convertToK8sEphemeralVolume(ephemeralSpec *EphemeralSpec) *corev1.EphemeralVolumeSource { + if ephemeralSpec == nil { + return nil + } + return &corev1.EphemeralVolumeSource{ + VolumeClaimTemplate: &corev1.PersistentVolumeClaimTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: ephemeralSpec.VolumeClaimTemplate.Metadata.Name, + UID: types.UID(ephemeralSpec.VolumeClaimTemplate.Metadata.UID), + Annotations: ephemeralSpec.VolumeClaimTemplate.Metadata.Annotations, + Labels: ephemeralSpec.VolumeClaimTemplate.Metadata.Labels, + }, + }, + } +} + +func convertToK8sContainerStatuses(containerStatuses []ContainerStatus) []corev1.ContainerStatus { + if containerStatuses == nil { + return nil + } + + k8sStatuses := make([]corev1.ContainerStatus, len(containerStatuses)) + for i, containerStatus := range containerStatuses { + k8sStatuses[i] = corev1.ContainerStatus{ + Name: containerStatus.Name, + Image: containerStatus.Image, + ImageID: containerStatus.ImageID, + ContainerID: containerStatus.ID, + Ready: containerStatus.Ready, + RestartCount: int32(containerStatus.RestartCount), + State: convertToK8sContainerState(containerStatus.State), + LastTerminationState: convertToK8sContainerState(containerStatus.LastState), + } + } + return k8sStatuses +} + +func convertToK8sContainerState(containerState ContainerState) corev1.ContainerState { + return corev1.ContainerState{ + Waiting: convertToK8sContainerStateWaiting(containerState.Waiting), + Running: convertToK8sContainerStateRunning(containerState.Running), + Terminated: convertToK8sContainerStateTerminated(containerState.Terminated), + } +} + +func convertToK8sContainerStateWaiting(containerStateWaiting *ContainerStateWaiting) *corev1.ContainerStateWaiting { + if containerStateWaiting == nil { + return nil + } + return &corev1.ContainerStateWaiting{ + Reason: containerStateWaiting.Reason, + } +} + +func convertToK8sContainerStateRunning(containerStateRunning *ContainerStateRunning) *corev1.ContainerStateRunning { + if containerStateRunning == nil { + return nil + } + return &corev1.ContainerStateRunning{ + StartedAt: metav1.Time{Time: containerStateRunning.StartedAt}, + } +} + +func convertToK8sContainerStateTerminated(containerStateTerminated *ContainerStateTerminated) *corev1.ContainerStateTerminated { + if containerStateTerminated == nil { + return nil + } + return &corev1.ContainerStateTerminated{ + ExitCode: containerStateTerminated.ExitCode, + StartedAt: metav1.Time{Time: containerStateTerminated.StartedAt}, + FinishedAt: metav1.Time{Time: containerStateTerminated.FinishedAt}, + Reason: containerStateTerminated.Reason, + } +} + +func convertToK8sConditions(conditions []Conditions) []corev1.PodCondition { + if conditions == nil { + return nil + } + + k8sConditions := make([]corev1.PodCondition, len(conditions)) + for i, condition := range conditions { + k8sConditions[i] = corev1.PodCondition{ + Type: corev1.PodConditionType(condition.Type), + Status: corev1.ConditionStatus(condition.Status), + } + } + return k8sConditions +} + +func convertToK8sPodTolerations(tolerations []Toleration) []corev1.Toleration { + if tolerations == nil { + return nil + } + + k8sTolerations := make([]corev1.Toleration, len(tolerations)) + for i, toleration := range tolerations { + k8sTolerations[i] = corev1.Toleration{ + Key: toleration.Key, + Operator: corev1.TolerationOperator(toleration.Operator), + Value: toleration.Value, + Effect: corev1.TaintEffect(toleration.Effect), + TolerationSeconds: toleration.TolerationSeconds, + } + } + return k8sTolerations +} diff --git a/pkg/util/kubernetes/kubelet/conversion_test.go b/pkg/util/kubernetes/kubelet/conversion_test.go new file mode 100644 index 0000000000000..f75ad0aca4976 --- /dev/null +++ b/pkg/util/kubernetes/kubelet/conversion_test.go @@ -0,0 +1,299 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build kubelet && kubeapiserver + +package kubelet + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" +) + +func TestConvertKubeletPodToK8sPod(t *testing.T) { + now := time.Now() + + pod := &Pod{ + Metadata: PodMetadata{ + Name: "test-pod", + UID: "12345", + Namespace: "default", + CreationTimestamp: now, + Annotations: map[string]string{ + "annotation-key": "annotation-value", + }, + Labels: map[string]string{ + "label-key": "label-value", + }, + Owners: []PodOwner{ + { + Kind: "ReplicaSet", + Name: "rs1", + Controller: ptr.To(true), + }, + }, + }, + Spec: Spec{ + HostNetwork: true, + NodeName: "node1", + InitContainers: []ContainerSpec{ + { + Name: "init-container", + Image: "init-image", + }, + }, + Containers: []ContainerSpec{ + { + Name: "main-container", + Image: "main-image", + Ports: []ContainerPortSpec{ + { + ContainerPort: 7777, + HostPort: 8888, + Name: "port", + Protocol: "TCP", + }, + }, + ReadinessProbe: &ContainerProbe{ + InitialDelaySeconds: 10, + }, + Env: []EnvVar{ + { + Name: "SOME_ENV", + Value: "some_env_value", + }, + }, + SecurityContext: &ContainerSecurityContextSpec{ + Capabilities: &CapabilitiesSpec{ + Add: []string{"CAP_SYS_ADMIN"}, + Drop: []string{"CAP_NET_RAW"}, + }, + Privileged: ptr.To(true), + SeccompProfile: &SeccompProfileSpec{ + Type: SeccompProfileTypeRuntimeDefault, + LocalhostProfile: ptr.To("localhost-profile"), + }, + }, + Resources: &ContainerResourcesSpec{ + Requests: ResourceList{ + "cpu": resource.MustParse("100m"), + "memory": resource.MustParse("200Mi"), + }, + Limits: ResourceList{ + "cpu": resource.MustParse("200m"), + "memory": resource.MustParse("400Mi"), + }, + }, + }, + }, + Volumes: []VolumeSpec{ + { + Name: "volume1", + PersistentVolumeClaim: &PersistentVolumeClaimSpec{ + ClaimName: "some-claim", + ReadOnly: true, + }, + }, + }, + PriorityClassName: "high-priority", + SecurityContext: &PodSecurityContextSpec{ + RunAsUser: 1000, + RunAsGroup: 2000, + FsGroup: 3000, + }, + RuntimeClassName: ptr.To("runtime-class"), + Tolerations: []Toleration{ + { + Key: "key1", + Operator: "Exists", + Effect: "NoSchedule", + }, + }, + }, + Status: Status{ + Phase: "Running", + HostIP: "192.168.1.1", + PodIP: "10.0.0.1", + Containers: []ContainerStatus{ + { + Name: "main-container", + Image: "main-image", + Ready: true, + State: ContainerState{ + Running: &ContainerStateRunning{ + StartedAt: now, + }, + }, + }, + }, + InitContainers: []ContainerStatus{ + { + Name: "init-container", + Image: "init-image", + Ready: true, + }, + }, + Conditions: []Conditions{ + { + Type: "Ready", + Status: "True", + }, + }, + QOSClass: "BestEffort", + StartTime: now, + Reason: "Started", + }, + } + + expectedPod := &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + UID: types.UID("12345"), + Namespace: "default", + CreationTimestamp: metav1.NewTime(now), + Annotations: map[string]string{ + "annotation-key": "annotation-value", + }, + Labels: map[string]string{ + "label-key": "label-value", + }, + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + Name: "rs1", + Controller: ptr.To(true), + }, + }, + }, + Spec: corev1.PodSpec{ + HostNetwork: true, + NodeName: "node1", + InitContainers: []corev1.Container{ + { + Name: "init-container", + Image: "init-image", + }, + }, + Containers: []corev1.Container{ + { + Name: "main-container", + Image: "main-image", + Ports: []corev1.ContainerPort{ + { + ContainerPort: 7777, + HostPort: 8888, + Name: "port", + Protocol: "TCP", + }, + }, + ReadinessProbe: &corev1.Probe{ + InitialDelaySeconds: 10, + }, + Env: []corev1.EnvVar{ + { + Name: "SOME_ENV", + Value: "some_env_value", + }, + }, + SecurityContext: &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{ + Add: []corev1.Capability{"CAP_SYS_ADMIN"}, + Drop: []corev1.Capability{"CAP_NET_RAW"}, + }, + Privileged: ptr.To(true), + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + LocalhostProfile: ptr.To("localhost-profile"), + }, + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + "cpu": resource.MustParse("100m"), + "memory": resource.MustParse("200Mi"), + }, + Limits: corev1.ResourceList{ + "cpu": resource.MustParse("200m"), + "memory": resource.MustParse("400Mi"), + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "volume1", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "some-claim", + ReadOnly: true, + }, + }, + }, + }, + PriorityClassName: "high-priority", + SecurityContext: &corev1.PodSecurityContext{ + RunAsUser: ptr.To[int64](1000), + RunAsGroup: ptr.To[int64](2000), + FSGroup: ptr.To[int64](3000), + }, + RuntimeClassName: ptr.To("runtime-class"), + Tolerations: []corev1.Toleration{ + { + Key: "key1", + Operator: "Exists", + Effect: "NoSchedule", + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodPhase("Running"), + HostIP: "192.168.1.1", + PodIP: "10.0.0.1", + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: "main-container", + Image: "main-image", + Ready: true, + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{ + StartedAt: metav1.Time{ + Time: now, + }, + }, + }, + }, + }, + InitContainerStatuses: []corev1.ContainerStatus{ + { + Name: "init-container", + Image: "init-image", + Ready: true, + }, + }, + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodConditionType("Ready"), + Status: corev1.ConditionStatus("True"), + }, + }, + QOSClass: corev1.PodQOSClass("BestEffort"), + StartTime: &metav1.Time{ + Time: now, + }, + Reason: "Started", + }, + } + + assert.Equal(t, expectedPod, ConvertKubeletPodToK8sPod(pod)) +} diff --git a/pkg/util/kubernetes/kubelet/kubelet_common.go b/pkg/util/kubernetes/kubelet/kubelet_common.go index 814f39d99ea14..e8fe1e94fafec 100644 --- a/pkg/util/kubernetes/kubelet/kubelet_common.go +++ b/pkg/util/kubernetes/kubelet/kubelet_common.go @@ -12,6 +12,7 @@ import ( "fmt" "strings" + "github.com/DataDog/datadog-agent/comp/core/tagger/types" "github.com/DataDog/datadog-agent/pkg/util/containers" ) @@ -41,14 +42,6 @@ func PodUIDToEntityName(uid string) string { return KubePodPrefix + uid } -// PodUIDToTaggerEntityName returns a prefixed tagger entity name from a pod UID -func PodUIDToTaggerEntityName(uid string) string { - if uid == "" { - return "" - } - return KubePodTaggerEntityPrefix + uid -} - // ParseMetricFromRaw parses a metric from raw prometheus text func ParseMetricFromRaw(raw []byte, metric string) (string, error) { bytesReader := bytes.NewReader(raw) @@ -70,7 +63,7 @@ func ParseMetricFromRaw(raw []byte, metric string) (string, error) { func KubeContainerIDToTaggerEntityID(ctrID string) (string, error) { sep := strings.LastIndex(ctrID, containers.EntitySeparator) if sep != -1 && len(ctrID) > sep+len(containers.EntitySeparator) { - return containers.ContainerEntityName + ctrID[sep:], nil + return types.NewEntityID(types.ContainerID, ctrID[sep+len(containers.EntitySeparator):]).String(), nil } return "", fmt.Errorf("can't extract an entity ID from container ID %s", ctrID) } @@ -80,7 +73,7 @@ func KubeContainerIDToTaggerEntityID(ctrID string) (string, error) { func KubePodUIDToTaggerEntityID(podUID string) (string, error) { sep := strings.LastIndex(podUID, containers.EntitySeparator) if sep != -1 && len(podUID) > sep+len(containers.EntitySeparator) { - return KubePodTaggerEntityName + podUID[sep:], nil + return types.NewEntityID(types.KubernetesPodUID, podUID[sep+len(containers.EntitySeparator):]).String(), nil } return "", fmt.Errorf("can't extract an entity ID from pod UID %s", podUID) } diff --git a/pkg/util/kubernetes/kubelet/podwatcher_test.go b/pkg/util/kubernetes/kubelet/podwatcher_test.go index 211c280701dfd..43460d34c2aac 100644 --- a/pkg/util/kubernetes/kubelet/podwatcher_test.go +++ b/pkg/util/kubernetes/kubelet/podwatcher_test.go @@ -14,10 +14,11 @@ import ( "testing" "time" - configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + + configmock "github.com/DataDog/datadog-agent/pkg/config/mock" ) /* diff --git a/pkg/util/kubernetes/kubelet/types_kubelet.go b/pkg/util/kubernetes/kubelet/types_kubelet.go index 1c44882e05b3d..f99c5209f3f03 100644 --- a/pkg/util/kubernetes/kubelet/types_kubelet.go +++ b/pkg/util/kubernetes/kubelet/types_kubelet.go @@ -28,20 +28,22 @@ type PodList struct { // PodMetadata contains fields for unmarshalling a pod's metadata type PodMetadata struct { - Name string `json:"name,omitempty"` - UID string `json:"uid,omitempty"` - Namespace string `json:"namespace,omitempty"` - ResVersion string `json:"resourceVersion,omitempty"` - Annotations map[string]string `json:"annotations,omitempty"` - Labels map[string]string `json:"labels,omitempty"` - Owners []PodOwner `json:"ownerReferences,omitempty"` + Name string `json:"name,omitempty"` + UID string `json:"uid,omitempty"` + Namespace string `json:"namespace,omitempty"` + ResVersion string `json:"resourceVersion,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + Owners []PodOwner `json:"ownerReferences,omitempty"` + CreationTimestamp time.Time `json:"creationTimestamp,omitempty"` } // PodOwner contains fields for unmarshalling a Pod.Metadata.Owners type PodOwner struct { - Kind string `json:"kind,omitempty"` - Name string `json:"name,omitempty"` - ID string `json:"uid,omitempty"` + Kind string `json:"kind,omitempty"` + Name string `json:"name,omitempty"` + ID string `json:"uid,omitempty"` + Controller *bool `json:"controller,omitempty"` } // Spec contains fields for unmarshalling a Pod.Spec @@ -54,6 +56,7 @@ type Spec struct { PriorityClassName string `json:"priorityClassName,omitempty"` SecurityContext *PodSecurityContextSpec `json:"securityContext,omitempty"` RuntimeClassName *string `json:"runtimeClassName,omitempty"` + Tolerations []Toleration `json:"tolerations,omitempty"` } // PodSecurityContextSpec contains fields for unmarshalling a Pod.Spec.SecurityContext @@ -74,6 +77,15 @@ type ContainerSpec struct { Resources *ContainerResourcesSpec `json:"resources,omitempty"` } +// Toleration contains fields for unmarshalling a Pod.Spec.Tolerations +type Toleration struct { + Key string `json:"key,omitempty"` + Operator string `json:"operator,omitempty"` + Value string `json:"value,omitempty"` + Effect string `json:"effect,omitempty"` + TolerationSeconds *int64 `json:"tolerationSeconds,omitempty"` +} + // ResourceName is the key to fields in in Pod.Spec.Containers.Resources type ResourceName string @@ -155,6 +167,7 @@ type VolumeSpec struct { // PersistentVolumeClaimSpec contains fields for unmarshalling a Pod.Spec.Volumes.PersistentVolumeClaim type PersistentVolumeClaimSpec struct { ClaimName string `json:"claimName"` + ReadOnly bool `json:"readOnly,omitempty"` } // EphemeralSpec contains fields for unmarshalling a Pod.Spec.Volumes.Ephemeral @@ -177,6 +190,8 @@ type Status struct { AllContainers []ContainerStatus Conditions []Conditions `json:"conditions,omitempty"` QOSClass string `json:"qosClass,omitempty"` + StartTime time.Time `json:"startTime,omitempty"` + Reason string `json:"reason,omitempty"` } // GetAllContainers returns the list of init and regular containers diff --git a/pkg/util/tagger/tagger.go b/pkg/util/tagger/tagger.go new file mode 100644 index 0000000000000..61615355e9747 --- /dev/null +++ b/pkg/util/tagger/tagger.go @@ -0,0 +1,16 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package tagger provides function to check if the tagger should use composite entity id and object store +package tagger + +import "github.com/DataDog/datadog-agent/pkg/config" + +// ShouldUseCompositeStore indicates whether the tagger should use the default or composite implementation +// of entity ID and object store. +// TODO: remove this when we switch over fully to the composite implementation +func ShouldUseCompositeStore() bool { + return config.Datadog().GetBool("tagger.tagstore_use_composite_entity_id") +} diff --git a/release.json b/release.json index 4e765326ced00..c6e75e4722f0d 100644 --- a/release.json +++ b/release.json @@ -3,7 +3,7 @@ "current_milestone": "7.58.0", "last_stable": { "6": "6.53.0", - "7": "7.56.1" + "7": "7.56.2" }, "nightly": { "INTEGRATIONS_CORE_VERSION": "master", diff --git a/releasenotes-dca/notes/configure-volumes-for-k8s-autoscaler-d96fa7357c4be699.yaml b/releasenotes-dca/notes/configure-volumes-for-k8s-autoscaler-d96fa7357c4be699.yaml new file mode 100644 index 0000000000000..3a189e5474685 --- /dev/null +++ b/releasenotes-dca/notes/configure-volumes-for-k8s-autoscaler-d96fa7357c4be699.yaml @@ -0,0 +1,12 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Fixed an issue that prevented the Kubernetes autoscaler from evicting pods + injected by the Admission Controller. diff --git a/releasenotes-dca/notes/ksm-pods-in-node-agent-13d9041602521d42.yaml b/releasenotes-dca/notes/ksm-pods-in-node-agent-13d9041602521d42.yaml new file mode 100644 index 0000000000000..de06910baa4f3 --- /dev/null +++ b/releasenotes-dca/notes/ksm-pods-in-node-agent-13d9041602521d42.yaml @@ -0,0 +1,16 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +features: + - | + The Kubernetes State Metrics (KSM) check can now be configured to collect + pods from the Kubelet in node agents instead of collecting them from the API + Server in the Cluster Agent or the Cluster check runners. This is useful in + clusters with a large number of pods where emitting pod metrics from a + single check instance can cause performance issues due to the large number + of metrics emitted. diff --git a/releasenotes/notes/ksm-pods-in-node-agent-13d9041602521d42.yaml b/releasenotes/notes/ksm-pods-in-node-agent-13d9041602521d42.yaml new file mode 100644 index 0000000000000..de06910baa4f3 --- /dev/null +++ b/releasenotes/notes/ksm-pods-in-node-agent-13d9041602521d42.yaml @@ -0,0 +1,16 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +features: + - | + The Kubernetes State Metrics (KSM) check can now be configured to collect + pods from the Kubelet in node agents instead of collecting them from the API + Server in the Cluster Agent or the Cluster check runners. This is useful in + clusters with a large number of pods where emitting pod metrics from a + single check instance can cause performance issues due to the large number + of metrics emitted. diff --git a/releasenotes/notes/kubelet-core-volume-metrics-fix-a95e22dbc29dea88.yaml b/releasenotes/notes/kubelet-core-volume-metrics-fix-a95e22dbc29dea88.yaml new file mode 100644 index 0000000000000..2e5c587b4576c --- /dev/null +++ b/releasenotes/notes/kubelet-core-volume-metrics-fix-a95e22dbc29dea88.yaml @@ -0,0 +1,12 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Fixes issue with the kubelet corecheck where `kubernetes.kubelet.volume.*` metrics + were not properly being reported if any matching namespace exclusion filter was present. diff --git a/tasks/agent.py b/tasks/agent.py index c92f5d8399782..8474176fc244f 100644 --- a/tasks/agent.py +++ b/tasks/agent.py @@ -76,6 +76,7 @@ "orchestrator_ecs", "cisco_sdwan", "network_path", + "service_discovery", ] WINDOWS_CORECHECKS = [ diff --git a/tasks/ebpf.py b/tasks/ebpf.py index 76eb2382b0af0..e84bb9f5183ce 100644 --- a/tasks/ebpf.py +++ b/tasks/ebpf.py @@ -690,13 +690,14 @@ def generate_complexity_summary_for_pr( raise Exit("tabulate is required to print the complexity summary") pr_comment_head = 'eBPF complexity changes' + github = GithubAPI() + prs = list(github.get_pr_for_branch(branch_name)) + has_prs = len(prs) > 0 if branch_name is None: branch_name = get_current_branch(ctx) if base_branch is None: - github = GithubAPI() - prs = list(github.get_pr_for_branch(branch_name)) if len(prs) == 0: print(f"Warning: No PR found for branch {branch_name}, using main branch as base") base_branch = "main" @@ -707,9 +708,9 @@ def generate_complexity_summary_for_pr( base_branch = prs[0].base.ref print(f"Found PR {prs[0].number} for this branch, using base branch {base_branch}") - def _exit_or_delete_github_comment(msg: str): - if skip_github_comment: - raise Exit(msg) + def _try_delete_github_comment(msg: str): + if skip_github_comment or not has_prs: + print(f"{msg}: exiting ({skip_github_comment=}, {has_prs=})") else: print(f"{msg}: removing GitHub comment in PR") pr_commenter(ctx, pr_comment_head, delete=True, force_delete=True) @@ -722,7 +723,7 @@ def _exit_or_delete_github_comment(msg: str): current_branch_artifacts_path = Path(current_branch_artifacts_path) complexity_files = list(current_branch_artifacts_path.glob("verifier-complexity-*.tar.gz")) if len(complexity_files) == 0: - _exit_or_delete_github_comment( + _try_delete_github_comment( f"No complexity data found for the current branch at {current_branch_artifacts_path}" ) return @@ -737,7 +738,7 @@ def _exit_or_delete_github_comment(msg: str): main_complexity_files = list(main_branch_complexity_path.glob("verifier-complexity-*")) if len(main_complexity_files) == 0: - _exit_or_delete_github_comment(f"No complexity data found for the main branch at {main_branch_complexity_path}") + _try_delete_github_comment(f"No complexity data found for the main branch at {main_branch_complexity_path}") return # Uncompress all local complexity files, and store the results @@ -795,7 +796,7 @@ def _exit_or_delete_github_comment(msg: str): ) if len(program_complexity) == 0: - _exit_or_delete_github_comment("No complexity data found, skipping report generation") + _try_delete_github_comment("No complexity data found, skipping report generation") return summarized_complexity_changes = [] @@ -904,7 +905,8 @@ def _build_table(orig_rows): print(msg) - if skip_github_comment: + if skip_github_comment or not has_prs: + print("Skipping commenting on PR") return if not has_any_changes: diff --git a/tasks/kernel_matrix_testing/compiler.py b/tasks/kernel_matrix_testing/compiler.py index e2f949d62f8d1..ff62f77ba1581 100644 --- a/tasks/kernel_matrix_testing/compiler.py +++ b/tasks/kernel_matrix_testing/compiler.py @@ -23,6 +23,9 @@ AMD64_DEBIAN_KERNEL_HEADERS_URL = "http://deb.debian.org/debian-security/pool/updates/main/l/linux-5.10/linux-headers-5.10.0-0.deb10.28-amd64_5.10.209-2~deb10u1_amd64.deb" ARM64_DEBIAN_KERNEL_HEADERS_URL = "http://deb.debian.org/debian-security/pool/updates/main/l/linux-5.10/linux-headers-5.10.0-0.deb10.28-arm64_5.10.209-2~deb10u1_arm64.deb" +DOCKER_REGISTRY = "486234852809.dkr.ecr.us-east-1.amazonaws.com" +DOCKER_IMAGE_BASE = f"{DOCKER_REGISTRY}/ci/datadog-agent-buildimages/system-probe" + def get_build_image_suffix_and_version() -> tuple[str, str]: gitlab_ci_file = Path(__file__).parent.parent.parent / ".gitlab-ci.yml" @@ -42,7 +45,7 @@ def get_docker_image_name(ctx: Context, container: str) -> str: return data[0]["Config"]["Image"] -def has_ddtool_helpers() -> bool: +def has_docker_auth_helpers() -> bool: docker_config = Path("~/.docker/config.json").expanduser() if not docker_config.exists(): return False @@ -54,8 +57,7 @@ def has_ddtool_helpers() -> bool: # Invalid JSON (or empty file), we don't have the helper return False - available_cred_helpers = set(config.get("credHelpers", {}).values()) - return "ddtool" in available_cred_helpers or "ecr-login" in available_cred_helpers + return DOCKER_REGISTRY in config.get("credHelpers", {}) class CompilerImage: @@ -70,9 +72,8 @@ def name(self): @property def image(self): suffix, version = get_build_image_suffix_and_version() - image_base = "486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/system-probe" - return f"{image_base}_{self.arch.ci_arch}{suffix}:{version}" + return f"{DOCKER_IMAGE_BASE}_{self.arch.ci_arch}{suffix}:{version}" def _check_container_exists(self, allow_stopped=False): if self.ctx.config.run["dry"]: @@ -136,7 +137,7 @@ def start(self) -> None: if res is None or not res.ok: info(f"[!] Image {self.image} not found, logging in and pulling...") - if has_ddtool_helpers(): + if has_docker_auth_helpers(): # With ddtool helpers (installed with ddtool auth helpers install), docker automatically # pulls credentials from ddtool, and we require the aws-vault context to pull docker_pull_auth = "aws-vault exec sso-build-stable-developer -- " diff --git a/tasks/kmt.py b/tasks/kmt.py index 993051959ba9a..486ce76fd1976 100644 --- a/tasks/kmt.py +++ b/tasks/kmt.py @@ -816,6 +816,7 @@ def build_target_packages(filter_packages): if filter_packages == []: return all_packages + filter_packages = [os.path.relpath(p) for p in go_package_dirs(filter_packages, [NPM_TAG, BPF_TAG])] return [pkg for pkg in all_packages if os.path.relpath(pkg) in filter_packages] @@ -878,7 +879,7 @@ def kmt_sysprobe_prepare( filter_pkgs = [] if packages: - filter_pkgs = [os.path.relpath(p) for p in packages.split(",")] + filter_pkgs = packages.split(",") kmt_paths = KMTPaths(stack, arch) nf_path = os.path.join(kmt_paths.arch_dir, "kmt-sysprobe.ninja") @@ -1098,7 +1099,7 @@ def test( pkgs = [] if packages is not None: - pkgs = packages.split(",") + pkgs = [os.path.relpath(p) for p in go_package_dirs(packages.split(","), [NPM_TAG, BPF_TAG])] if run is not None and len(pkgs) > 1: raise Exit("Only a single package can be specified when running specific tests") diff --git a/tasks/modules.py b/tasks/modules.py index 96757a0579592..39cbb81e14242 100644 --- a/tasks/modules.py +++ b/tasks/modules.py @@ -256,6 +256,7 @@ def dependency_path(self, agent_version): "pkg/util/cgroups", independent=True, condition=lambda: sys.platform == "linux", used_by_otel=True ), "pkg/util/common": GoModule("pkg/util/common", independent=True, used_by_otel=True), + "pkg/util/containers/image": GoModule("pkg/util/containers/image", independent=True, used_by_otel=True), "pkg/util/executable": GoModule("pkg/util/executable", independent=True, used_by_otel=True), "pkg/util/filesystem": GoModule("pkg/util/filesystem", independent=True, used_by_otel=True), "pkg/util/flavor": GoModule("pkg/util/flavor", independent=True), diff --git a/tasks/unit_tests/testdata/fake_gitlab-ci.yml b/tasks/unit_tests/testdata/fake_gitlab-ci.yml index 0ded780c29fcd..a07aa2828d10c 100644 --- a/tasks/unit_tests/testdata/fake_gitlab-ci.yml +++ b/tasks/unit_tests/testdata/fake_gitlab-ci.yml @@ -881,7 +881,6 @@ workflow: - test/kitchen/site-cookbooks/dd-system-probe-check/**/* - test/kitchen/test/integration/system-probe-test/**/* - test/kitchen/test/integration/win-sysprobe-test/**/* - - .gitlab/functional_test/system_probe_windows.yml - .gitlab/functional_test_sysprobe/system_probe.yml - .gitlab/kernel_version_testing/system_probe.yml - test/new-e2e/system-probe/**/* diff --git a/test/e2e/cws-tests/requirements.txt b/test/e2e/cws-tests/requirements.txt index 4a4ecbbbcc29e..cc9857c383b50 100644 --- a/test/e2e/cws-tests/requirements.txt +++ b/test/e2e/cws-tests/requirements.txt @@ -1,5 +1,5 @@ kubernetes==30.1.0 -datadog-api-client==2.26.0 +datadog-api-client==2.27.0 pyaml==24.7.0 docker==7.1.0 retry==0.9.2 diff --git a/test/fakeintake/aggregator/servicediscoveryAggregator.go b/test/fakeintake/aggregator/servicediscoveryAggregator.go index b834f08fc5156..3ee0a522e01ec 100644 --- a/test/fakeintake/aggregator/servicediscoveryAggregator.go +++ b/test/fakeintake/aggregator/servicediscoveryAggregator.go @@ -30,6 +30,7 @@ type ServiceDiscoveryPayload struct { LastSeen int64 `json:"last_seen"` APMInstrumentation string `json:"apm_instrumentation"` ServiceNameSource string `json:"service_name_source"` + RSSMemory uint64 `json:"rss_memory"` } `json:"payload"` } diff --git a/test/new-e2e/pkg/utils/infra/retriable_errors.go b/test/new-e2e/pkg/utils/infra/retriable_errors.go index d7de61ae684d3..b8d5c27b53195 100644 --- a/test/new-e2e/pkg/utils/infra/retriable_errors.go +++ b/test/new-e2e/pkg/utils/infra/retriable_errors.go @@ -6,17 +6,18 @@ // Package infra implements utilities to interact with a Pulumi infrastructure package infra -type retryType string +// RetryType is an enum to specify the type of retry to perform +type RetryType string const ( - reUp retryType = "ReUp" // Retry the up operation - reCreate retryType = "ReCreate" // Retry the up operation after destroying the stack - noRetry retryType = "NoRetry" + ReUp RetryType = "ReUp" // ReUp retries the up operation + ReCreate RetryType = "ReCreate" // ReCreate retries the up operation after destroying the stack + NoRetry RetryType = "NoRetry" // NoRetry does not retry the up operation ) type knownError struct { errorMessage string - retryType retryType + retryType RetryType } func getKnownErrors() []knownError { @@ -24,22 +25,22 @@ func getKnownErrors() []knownError { return []knownError{ { errorMessage: "i/o timeout", - retryType: reCreate, + retryType: ReCreate, }, { // https://datadoghq.atlassian.net/browse/ADXT-1 errorMessage: "failed attempts: dial tcp :22: connect: connection refused", - retryType: reCreate, + retryType: ReCreate, }, { // https://datadoghq.atlassian.net/browse/ADXT-295 errorMessage: "Resource provider reported that the resource did not exist while updating", - retryType: reCreate, + retryType: ReCreate, }, { // https://datadoghq.atlassian.net/browse/ADXT-558 errorMessage: "Process exited with status 2: running \" sudo cloud-init status --wait\"", - retryType: reCreate, + retryType: ReCreate, }, } } diff --git a/test/new-e2e/pkg/utils/infra/stack_manager.go b/test/new-e2e/pkg/utils/infra/stack_manager.go index 4bb12e2486e2a..2a580fd651d7d 100644 --- a/test/new-e2e/pkg/utils/infra/stack_manager.go +++ b/test/new-e2e/pkg/utils/infra/stack_manager.go @@ -56,11 +56,16 @@ var ( initStackManager sync.Once ) +// RetryStrategy is a function that given the current error and the number of retries, returns the type of retry to perform and a list of options to modify the configuration +type RetryStrategy func(error, int) (RetryType, []GetStackOption) + // StackManager handles type StackManager struct { - stacks *safeStackMap - + stacks *safeStackMap knownErrors []knownError + + // RetryStrategy defines how to handle retries. By default points to StackManager.getRetryStrategyFrom but can be overridden + RetryStrategy RetryStrategy } type safeStackMap struct { @@ -111,10 +116,13 @@ func GetStackManager() *StackManager { } func newStackManager() (*StackManager, error) { - return &StackManager{ + sm := &StackManager{ stacks: newSafeStackMap(), knownErrors: getKnownErrors(), - }, nil + } + sm.RetryStrategy = sm.getRetryStrategyFrom + + return sm, nil } // GetStack creates or return a stack based on stack name and config, if error occurs during stack creation it destroy all the resources created @@ -515,13 +523,13 @@ func (sm *StackManager) getStack(ctx context.Context, name string, deployFunc pu } } - retryStrategy := sm.getRetryStrategyFrom(upError, upCount) + retryStrategy, changedOpts := sm.RetryStrategy(upError, upCount) sendEventToDatadog(params.DatadogEventSender, fmt.Sprintf("[E2E] Stack %s : error on Pulumi stack up", name), upError.Error(), []string{"operation:up", "result:fail", fmt.Sprintf("retry:%s", retryStrategy), fmt.Sprintf("stack:%s", stack.Name()), fmt.Sprintf("retries:%d", upCount)}) switch retryStrategy { - case reUp: + case ReUp: fmt.Fprintf(logger, "Retrying stack on error during stack up: %v\n", upError) - case reCreate: + case ReCreate: fmt.Fprintf(logger, "Recreating stack on error during stack up: %v\n", upError) destroyCtx, cancel := context.WithTimeout(ctx, params.DestroyTimeout) _, err = stack.Destroy(destroyCtx, progressStreamsDestroyOption, optdestroy.DebugLogging(loggingOptions)) @@ -530,10 +538,27 @@ func (sm *StackManager) getStack(ctx context.Context, name string, deployFunc pu fmt.Fprintf(logger, "Error during stack destroy at recrate stack attempt: %v\n", err) return stack, auto.UpResult{}, err } - case noRetry: + case NoRetry: fmt.Fprintf(logger, "Giving up on error during stack up: %v\n", upError) return stack, upResult, upError } + + if len(changedOpts) > 0 { + // apply changed options from retry strategy + for _, opt := range changedOpts { + opt(¶ms) + } + + cm, err = runner.BuildStackParameters(profile, params.Config) + if err != nil { + return nil, auto.UpResult{}, fmt.Errorf("error trying to build new stack options on retry: %s", err) + } + + err = stack.SetAllConfig(ctx, cm.ToPulumi()) + if err != nil { + return nil, auto.UpResult{}, fmt.Errorf("error trying to change stack options on retry: %s", err) + } + } } return stack, upResult, upError @@ -587,19 +612,19 @@ func runFuncWithRecover(f pulumi.RunFunc) pulumi.RunFunc { } } -func (sm *StackManager) getRetryStrategyFrom(err error, upCount int) retryType { +func (sm *StackManager) getRetryStrategyFrom(err error, upCount int) (RetryType, []GetStackOption) { // if first attempt + retries count are higher than max retry, give up if upCount > stackUpMaxRetry { - return noRetry + return NoRetry, nil } for _, knownError := range sm.knownErrors { if strings.Contains(err.Error(), knownError.errorMessage) { - return knownError.retryType + return knownError.retryType, nil } } - return reUp + return ReUp, nil } // sendEventToDatadog sends an event to Datadog, it will use the API Key from environment variable DD_API_KEY if present, otherwise it will use the one from SSM Parameter Store diff --git a/test/new-e2e/tests/agent-subcommands/configcheck/configcheck_nix_test.go b/test/new-e2e/tests/agent-subcommands/configcheck/configcheck_nix_test.go index 74562bfff59eb..6f6e744e09bcb 100644 --- a/test/new-e2e/tests/agent-subcommands/configcheck/configcheck_nix_test.go +++ b/test/new-e2e/tests/agent-subcommands/configcheck/configcheck_nix_test.go @@ -25,7 +25,7 @@ func TestLinuxConfigCheckSuite(t *testing.T) { e2e.Run(t, &linuxConfigCheckSuite{}, e2e.WithProvisioner(awshost.ProvisionerNoFakeIntake())) } -// cpu, disk, file_handle, io, load, memory, network, ntp, uptime +// cpu, disk, file_handle, io, load, memory, network, ntp, uptime, service_discovery func (v *linuxConfigCheckSuite) TestDefaultInstalledChecks() { testChecks := []CheckConfigOutput{ { @@ -82,6 +82,12 @@ func (v *linuxConfigCheckSuite) TestDefaultInstalledChecks() { InstanceID: "uptime:", Settings: "{}", }, + { + CheckName: "service_discovery", + Filepath: "file:/etc/datadog-agent/conf.d/service_discovery.d/conf.yaml.default", + InstanceID: "service_discovery:", + Settings: "{}", + }, } output := v.Env().Agent.Client.ConfigCheck() diff --git a/test/new-e2e/tests/containers/k8s_test.go b/test/new-e2e/tests/containers/k8s_test.go index 06d0915cf268e..af2f9917a2f9e 100644 --- a/test/new-e2e/tests/containers/k8s_test.go +++ b/test/new-e2e/tests/containers/k8s_test.go @@ -983,8 +983,13 @@ func (suite *k8sSuite) testAdmissionControllerPod(namespace string, name string, } } + volumesMarkedAsSafeToEvict := strings.Split( + pod.Annotations["cluster-autoscaler.kubernetes.io/safe-to-evict-local-volumes"], ",", + ) + if suite.Contains(hostPathVolumes, "datadog") { suite.Equal("/var/run/datadog", hostPathVolumes["datadog"].Path) + suite.Contains(volumesMarkedAsSafeToEvict, "datadog") } volumeMounts := make(map[string][]string) @@ -1006,7 +1011,14 @@ func (suite *k8sSuite) testAdmissionControllerPod(namespace string, name string, } } - suite.Contains(emptyDirVolumes, "datadog-auto-instrumentation") + if suite.Contains(emptyDirVolumes, "datadog-auto-instrumentation") { + suite.Contains(volumesMarkedAsSafeToEvict, "datadog-auto-instrumentation") + } + + if suite.Contains(emptyDirVolumes, "datadog-auto-instrumentation-etc") { + suite.Contains(volumesMarkedAsSafeToEvict, "datadog-auto-instrumentation-etc") + } + if suite.Contains(volumeMounts, "datadog-auto-instrumentation") { suite.ElementsMatch([]string{ "/opt/datadog-packages/datadog-apm-inject", diff --git a/test/new-e2e/tests/discovery/linux_test.go b/test/new-e2e/tests/discovery/linux_test.go index b9fa1d0023a63..2ede187079619 100644 --- a/test/new-e2e/tests/discovery/linux_test.go +++ b/test/new-e2e/tests/discovery/linux_test.go @@ -14,12 +14,13 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/DataDog/test-infra-definitions/components/datadog/agentparams" + "github.com/DataDog/datadog-agent/test/fakeintake/aggregator" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/host" - "github.com/DataDog/test-infra-definitions/components/datadog/agentparams" ) //go:embed testdata/config/agent_config.yaml @@ -92,24 +93,28 @@ func (s *linuxTestSuite) TestServiceDiscoveryCheck() { if assert.NotNil(c, found) { assert.Equal(c, "none", found.Payload.APMInstrumentation) assert.Equal(c, "generated", found.Payload.ServiceNameSource) + assert.NotZero(c, found.Payload.RSSMemory) } found = foundMap["node-instrumented"] if assert.NotNil(c, found) { assert.Equal(c, "provided", found.Payload.APMInstrumentation) assert.Equal(c, "generated", found.Payload.ServiceNameSource) + assert.NotZero(c, found.Payload.RSSMemory) } found = foundMap["python.server"] if assert.NotNil(c, found) { assert.Equal(c, "none", found.Payload.APMInstrumentation) assert.Equal(c, "generated", found.Payload.ServiceNameSource) + assert.NotZero(c, found.Payload.RSSMemory) } found = foundMap["python.instrumented"] if assert.NotNil(c, found) { assert.Equal(c, "provided", found.Payload.APMInstrumentation) assert.Equal(c, "generated", found.Payload.ServiceNameSource) + assert.NotZero(c, found.Payload.RSSMemory) } assert.Contains(c, foundMap, "json-server") diff --git a/test/new-e2e/tests/discovery/testdata/provision/provision.sh b/test/new-e2e/tests/discovery/testdata/provision/provision.sh index f939003d3664f..9d3c61f06a425 100755 --- a/test/new-e2e/tests/discovery/testdata/provision/provision.sh +++ b/test/new-e2e/tests/discovery/testdata/provision/provision.sh @@ -22,9 +22,10 @@ fi export NVM_DIR="$HOME/.nvm" # shellcheck source=/dev/null source "${NVM_DIR}/nvm.sh" -nvm install 20 +# Retry a few times since occasional failures have been seen +nvm install 20 || nvm install 20 || nvm install 20 -npm install json-server +npm install json-server || npm install json-server npm install /home/ubuntu/e2e-test/node/instrumented # Install our own services diff --git a/test/new-e2e/tests/installer/all_packages_test.go b/test/new-e2e/tests/installer/all_packages_test.go index 76fd2272958d3..67d8186196aba 100644 --- a/test/new-e2e/tests/installer/all_packages_test.go +++ b/test/new-e2e/tests/installer/all_packages_test.go @@ -125,6 +125,11 @@ func TestPackages(t *testing.T) { if flavor.Flavor == e2eos.Fedora { flake.Mark(t) } + + // FIXME: Ansible tests are flaky on multiple tests/os + if method == installMethodAnsible { + flake.Mark(t) + } opts := []awshost.ProvisionerOption{ awshost.WithEC2InstanceOptions(ec2.WithOSArch(flavor, flavor.Architecture)), awshost.WithoutAgent(), diff --git a/test/new-e2e/tests/security-agent-functional/security_agent_test.go b/test/new-e2e/tests/security-agent-functional/security_agent_test.go index 283e622f4c913..6606c33a6d600 100644 --- a/test/new-e2e/tests/security-agent-functional/security_agent_test.go +++ b/test/new-e2e/tests/security-agent-functional/security_agent_test.go @@ -12,15 +12,17 @@ import ( "testing" "time" + componentsos "github.com/DataDog/test-infra-definitions/components/os" + "github.com/DataDog/test-infra-definitions/scenarios/aws/ec2" + "github.com/stretchr/testify/require" + + "github.com/DataDog/datadog-agent/pkg/util/testutil/flake" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/host" "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows" windowsCommon "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/common" windowsAgent "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/common/agent" - componentsos "github.com/DataDog/test-infra-definitions/components/os" - "github.com/DataDog/test-infra-definitions/scenarios/aws/ec2" - "github.com/stretchr/testify/require" ) type vmSuite struct { @@ -34,6 +36,7 @@ var ( ) func TestVMSuite(t *testing.T) { + flake.Mark(t) suiteParams := []e2e.SuiteOption{e2e.WithProvisioner(awshost.ProvisionerNoAgentNoFakeIntake(awshost.WithEC2InstanceOptions(ec2.WithOS(componentsos.WindowsDefault))))} if *devMode { suiteParams = append(suiteParams, e2e.WithDevMode()) diff --git a/test/new-e2e/tests/windows/remoteexecutable.go b/test/new-e2e/tests/windows/remoteexecutable.go index 680dea998e3cb..e9895b04fb356 100644 --- a/test/new-e2e/tests/windows/remoteexecutable.go +++ b/test/new-e2e/tests/windows/remoteexecutable.go @@ -11,8 +11,10 @@ import ( "strings" "testing" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components" ) // RemoteExecutable is a helper struct to run tests on a remote host @@ -138,15 +140,18 @@ func executeAndLogOutput(t *testing.T, vm *components.RemoteHost, command string outfilename := command + ".out" fullcommand := "cd " + cmdDir + ";" fullcommand += command + " " + strings.Join(args, " ") + " | Out-File -Encoding ASCII -FilePath " + outfilename - _, err := vm.Execute(fullcommand) - require.NoError(t, err) + _, testErr := vm.Execute(fullcommand) // get the output outbytes, err := vm.ReadFile(outfilename) - require.NoError(t, err) // log the output - for _, line := range strings.Split(string(outbytes[:]), "\n") { - t.Logf("TestSuite: %s", line) + if assert.NoError(t, err) { + for _, line := range strings.Split(string(outbytes[:]), "\n") { + t.Logf("TestSuite: %s", line) + } } + + require.NoError(t, testErr) + }