From 2f8a0fc55416eeb45b8c2fc17f784439b2a01fae Mon Sep 17 00:00:00 2001 From: Jonathan Ribas Date: Wed, 4 Sep 2024 17:38:16 +0200 Subject: [PATCH 001/128] [CWS] Fix activity dump local storage (#28402) --- .../security_profile/dump/local_storage.go | 22 ++++++++++++------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/pkg/security/security_profile/dump/local_storage.go b/pkg/security/security_profile/dump/local_storage.go index b47e7266a8e83..a2d3af607eb08 100644 --- a/pkg/security/security_profile/dump/local_storage.go +++ b/pkg/security/security_profile/dump/local_storage.go @@ -85,7 +85,9 @@ func NewActivityDumpLocalStorage(cfg *config.Config, m *ActivityDumpManager) (Ac // remove everything for _, f := range *files { - _ = os.Remove(f) + if err := os.Remove(path.Join(cfg.RuntimeSecurity.ActivityDumpLocalStorageDirectory, f)); err != nil { + seclog.Warnf("Failed to remove dump %s (limit of dumps reach): %v", f, err) + } } adls.deletedCount.Add(1) @@ -118,8 +120,18 @@ func NewActivityDumpLocalStorage(cfg *config.Config, m *ActivityDumpManager) (Ac // ignore this file continue } + // fetch MTime + dumpInfo, err := f.Info() + if err != nil { + seclog.Warnf("Failed to retrieve dump %s file informations: %v", f.Name(), err) + // ignore this file + continue + } // retrieve the basename of the dump dumpName := strings.TrimSuffix(filepath.Base(f.Name()), ext) + if ext == ".gz" { + dumpName = strings.TrimSuffix(dumpName, filepath.Ext(dumpName)) + } // insert the file in the list of dumps ad, ok := localDumps[dumpName] if !ok { @@ -130,11 +142,6 @@ func NewActivityDumpLocalStorage(cfg *config.Config, m *ActivityDumpManager) (Ac localDumps[dumpName] = ad } ad.Files = append(ad.Files, f.Name()) - dumpInfo, err := f.Info() - if err != nil { - // ignore this file - continue - } if !ad.MTime.IsZero() && ad.MTime.Before(dumpInfo.ModTime()) { ad.MTime = dumpInfo.ModTime() } @@ -144,8 +151,7 @@ func NewActivityDumpLocalStorage(cfg *config.Config, m *ActivityDumpManager) (Ac sort.Sort(dumps) // insert the dumps in cache (will trigger clean up if necessary) for _, ad := range dumps { - newFiles := ad.Files - adls.localDumps.Add(ad.Name, &newFiles) + adls.localDumps.Add(ad.Name, &ad.Files) } } From db33c8717b8fc10142a4d7ecb9f785c818fadf40 Mon Sep 17 00:00:00 2001 From: George Hahn Date: Wed, 4 Sep 2024 09:40:08 -0600 Subject: [PATCH 002/128] [SMP] Collect telemetry from checks during SMP runs (#29030) --- .../regression/cases/basic_py_check/datadog-agent/datadog.yaml | 1 + test/regression/cases/file_tree/datadog-agent/datadog.yaml | 1 + test/regression/cases/idle/datadog-agent/datadog.yaml | 1 + .../cases/otel_to_otel_logs/datadog-agent/datadog.yaml | 1 + .../cases/pycheck_lots_of_tags/datadog-agent/datadog.yaml | 1 + .../tcp_dd_logs_filter_exclude/datadog-agent/datadog.yaml | 1 + .../cases/tcp_syslog_to_blackhole/datadog-agent/datadog.yaml | 1 + .../cases/uds_dogstatsd_to_api/datadog-agent/datadog.yaml | 1 + .../cases/uds_dogstatsd_to_api_cpu/datadog-agent/datadog.yaml | 3 +++ 9 files changed, 11 insertions(+) diff --git a/test/regression/cases/basic_py_check/datadog-agent/datadog.yaml b/test/regression/cases/basic_py_check/datadog-agent/datadog.yaml index 88543e76b074d..7c77a5477d6a5 100644 --- a/test/regression/cases/basic_py_check/datadog-agent/datadog.yaml +++ b/test/regression/cases/basic_py_check/datadog-agent/datadog.yaml @@ -6,6 +6,7 @@ auth_token_file_path: /tmp/agent-auth-token cloud_provider_metadata: [] telemetry.enabled: true +telemetry.checks: '*' dd_url: http://localhost:9091 process_config.process_dd_url: http://localhost:9092 diff --git a/test/regression/cases/file_tree/datadog-agent/datadog.yaml b/test/regression/cases/file_tree/datadog-agent/datadog.yaml index 2d30e86cc40d5..986d0adcf6d84 100644 --- a/test/regression/cases/file_tree/datadog-agent/datadog.yaml +++ b/test/regression/cases/file_tree/datadog-agent/datadog.yaml @@ -9,4 +9,5 @@ logs_enabled: true dd_url: http://127.0.0.1:9092 telemetry.enabled: true +telemetry.checks: '*' process_collection.enabled: false diff --git a/test/regression/cases/idle/datadog-agent/datadog.yaml b/test/regression/cases/idle/datadog-agent/datadog.yaml index 3b61ada83da6e..1b9b15d83f17a 100644 --- a/test/regression/cases/idle/datadog-agent/datadog.yaml +++ b/test/regression/cases/idle/datadog-agent/datadog.yaml @@ -9,3 +9,4 @@ process_config.process_dd_url: http://localhost:9092 cloud_provider_metadata: [] telemetry.enabled: true +telemetry.checks: '*' diff --git a/test/regression/cases/otel_to_otel_logs/datadog-agent/datadog.yaml b/test/regression/cases/otel_to_otel_logs/datadog-agent/datadog.yaml index 66b8ab0b4da1a..35593c48fbd5c 100644 --- a/test/regression/cases/otel_to_otel_logs/datadog-agent/datadog.yaml +++ b/test/regression/cases/otel_to_otel_logs/datadog-agent/datadog.yaml @@ -9,6 +9,7 @@ dd_url: http://127.0.0.1:9092 process_config.process_dd_url: http://localhost:9093 telemetry.enabled: true +telemetry.checks: '*' apm_config: enabled: true diff --git a/test/regression/cases/pycheck_lots_of_tags/datadog-agent/datadog.yaml b/test/regression/cases/pycheck_lots_of_tags/datadog-agent/datadog.yaml index 661f27dda68dc..3847663675241 100644 --- a/test/regression/cases/pycheck_lots_of_tags/datadog-agent/datadog.yaml +++ b/test/regression/cases/pycheck_lots_of_tags/datadog-agent/datadog.yaml @@ -6,6 +6,7 @@ auth_token_file_path: /tmp/agent-auth-token cloud_provider_metadata: [] telemetry.enabled: true +telemetry.checks: '*' memtrack_enabled: false diff --git a/test/regression/cases/tcp_dd_logs_filter_exclude/datadog-agent/datadog.yaml b/test/regression/cases/tcp_dd_logs_filter_exclude/datadog-agent/datadog.yaml index 01c444918dbe5..e6d51841b1586 100644 --- a/test/regression/cases/tcp_dd_logs_filter_exclude/datadog-agent/datadog.yaml +++ b/test/regression/cases/tcp_dd_logs_filter_exclude/datadog-agent/datadog.yaml @@ -4,6 +4,7 @@ dd_url: http://localhost:9092 process_config.process_dd_url: http://localhost:9093 telemetry.enabled: true +telemetry.checks: '*' # Disable cloud detection. This stops the Agent from poking around the # execution environment & network. This is particularly important if the target diff --git a/test/regression/cases/tcp_syslog_to_blackhole/datadog-agent/datadog.yaml b/test/regression/cases/tcp_syslog_to_blackhole/datadog-agent/datadog.yaml index 6320ce2701c40..96e2d4f537c0e 100644 --- a/test/regression/cases/tcp_syslog_to_blackhole/datadog-agent/datadog.yaml +++ b/test/regression/cases/tcp_syslog_to_blackhole/datadog-agent/datadog.yaml @@ -4,6 +4,7 @@ dd_url: http://localhost:9091 process_config.process_dd_url: http://localhost:9093 telemetry.enabled: true +telemetry.checks: '*' # Disable cloud detection. This stops the Agent from poking around the # execution environment & network. This is particularly important if the target diff --git a/test/regression/cases/uds_dogstatsd_to_api/datadog-agent/datadog.yaml b/test/regression/cases/uds_dogstatsd_to_api/datadog-agent/datadog.yaml index cadf5be194ac6..5eb8f41ad2588 100644 --- a/test/regression/cases/uds_dogstatsd_to_api/datadog-agent/datadog.yaml +++ b/test/regression/cases/uds_dogstatsd_to_api/datadog-agent/datadog.yaml @@ -5,6 +5,7 @@ dd_url: http://127.0.0.1:9091 process_config.process_dd_url: http://localhost:9092 telemetry.enabled: true +telemetry.checks: '*' # Disable cloud detection. This stops the Agent from poking around the # execution environment & network. This is particularly important if the target diff --git a/test/regression/cases/uds_dogstatsd_to_api_cpu/datadog-agent/datadog.yaml b/test/regression/cases/uds_dogstatsd_to_api_cpu/datadog-agent/datadog.yaml index 28f7330c6d81d..a9f1cb85f20b1 100644 --- a/test/regression/cases/uds_dogstatsd_to_api_cpu/datadog-agent/datadog.yaml +++ b/test/regression/cases/uds_dogstatsd_to_api_cpu/datadog-agent/datadog.yaml @@ -3,6 +3,9 @@ auth_token_file_path: /tmp/agent-auth-token dd_url: http://127.0.0.1:9091 process_config.process_dd_url: http://localhost:9092 +telemetry.enabled: true +telemetry.checks: '*' + # Disable cloud detection. This stops the Agent from poking around the # execution environment & network. This is particularly important if the target # has network access. From ed455d043e37c0cf0f3ebc7c690455f37d4024b4 Mon Sep 17 00:00:00 2001 From: Mackenzie <63265430+mackjmr@users.noreply.github.com> Date: Wed, 4 Sep 2024 17:49:24 +0200 Subject: [PATCH 003/128] Include zpages data in flare (#28927) --- LICENSE-3rdparty.csv | 18 --------- .../collector/impl-pipeline/flare_filler.go | 39 ------------------- .../impl-pipeline/flare_filler_test.go | 21 +++------- comp/otelcol/ddflareextension/def/types.go | 3 +- comp/otelcol/ddflareextension/impl/config.go | 24 ++++++------ .../ddflareextension/impl/config_test.go | 12 ++---- .../ddflareextension/impl/extension.go | 33 +++++++++++----- go.mod | 9 ----- go.sum | 35 ----------------- 9 files changed, 44 insertions(+), 150 deletions(-) diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index 79e35081214b1..8701e0696d2b5 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -275,7 +275,6 @@ core,github.com/ProtonMail/go-crypto/openpgp/internal/ecc,BSD-3-Clause,Copyright core,github.com/ProtonMail/go-crypto/openpgp/internal/encoding,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved core,github.com/ProtonMail/go-crypto/openpgp/packet,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved core,github.com/ProtonMail/go-crypto/openpgp/s2k,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved -core,github.com/PuerkitoBio/goquery,BSD-3-Clause,"Copyright (c) 2012-2021, Martin Angers & Contributors" core,github.com/Showmax/go-fqdn,Apache-2.0,Copyright since 2015 Showmax s.r.o core,github.com/StackExchange/wmi,MIT,Copyright (c) 2013 Stack Exchange core,github.com/VividCortex/ewma,MIT,"Copyright (c) 2013 VividCortex | Copyright (c) 2013 VividCortex, Inc. All rights reserved" @@ -290,10 +289,6 @@ core,github.com/alecthomas/participle/v2,MIT,Copyright (C) 2017 Alec Thomas | Co core,github.com/alecthomas/participle/v2/lexer,MIT,Copyright (C) 2017 Alec Thomas | Copyright (C) 2017-2022 Alec Thomas core,github.com/alecthomas/units,MIT,Copyright (C) 2014 Alec Thomas core,github.com/anchore/go-struct-converter,Apache-2.0,"Copyright (c) 2022-2023 Anchore, Inc." -core,github.com/andybalholm/cascadia,BSD-2-Clause,Copyright (c) 2011 Andy Balholm. All rights reserved -core,github.com/antchfx/htmlquery,MIT,Copyright (c) 2016 Zheng Chun -core,github.com/antchfx/xmlquery,MIT,Copyright (c) 2016 Zheng Chun -core,github.com/antchfx/xpath,MIT,Copyright (c) 2016 Zheng Chun core,github.com/antlr/antlr4/runtime/Go/antlr/v4,BSD-3-Clause,Copyright 2021 The ANTLR Project core,github.com/apache/thrift/lib/go/thrift,Apache-2.0,"Copyright (C) 2006 - 2019, The Apache Software Foundation | Copyright (c) 2006- Facebook | Copyright (c) 2006-2008 Alexander Chemeris | Copyright (c) 2007 Thomas Porschberg | Copyright (c) 2008- Patrick Collison | Copyright 2007 by Nathan C. Myers ; some rights reserved | Copyright 2012 Twitter, Inc" core,github.com/aquasecurity/go-gem-version,Apache-2.0,Copyright (c) 2020 Teppei Fukuda (knqyf263) @@ -1078,9 +1073,6 @@ core,github.com/goccy/go-json/internal/encoder/vm_color_indent,MIT,Copyright (c) core,github.com/goccy/go-json/internal/encoder/vm_indent,MIT,Copyright (c) 2020 Masaaki Goshima core,github.com/goccy/go-json/internal/errors,MIT,Copyright (c) 2020 Masaaki Goshima core,github.com/goccy/go-json/internal/runtime,MIT,Copyright (c) 2020 Masaaki Goshima -core,github.com/gocolly/colly/v2,Apache-2.0,Copyright 2018 Adam Tauber -core,github.com/gocolly/colly/v2/debug,Apache-2.0,Copyright 2018 Adam Tauber -core,github.com/gocolly/colly/v2/storage,Apache-2.0,Copyright 2018 Adam Tauber core,github.com/gocomply/scap/pkg/scap/constants,CC0-1.0,CC0 1.0 Universal core,github.com/gocomply/scap/pkg/scap/models/cdf,CC0-1.0,CC0 1.0 Universal core,github.com/gocomply/scap/pkg/scap/models/cpe,CC0-1.0,CC0 1.0 Universal @@ -1391,7 +1383,6 @@ core,github.com/justincormack/go-memfd/msyscall,MIT,Copyright (c) 2017 Justin Co core,github.com/kardianos/osext,BSD-3-Clause,Copyright (c) 2012 The Go Authors. All rights reserved core,github.com/karrick/godirwalk,BSD-2-Clause,"Copyright (c) 2017, Karrick McDermott" core,github.com/kballard/go-shellquote,MIT,Copyright (C) 2014 Kevin Ballard -core,github.com/kennygrant/sanitize,BSD-3-Clause,Copyright (c) 2017 Mechanism Design. All rights reserved core,github.com/kevinburke/ssh_config,MIT,"Copyright (c) 2013 - 2017 Thomas Pelletier, Eric Anderton | Copyright (c) 2017 Kevin Burke" core,github.com/kjk/lzma,BSD-3-Clause,"Copyright (c) 2010, Andrei Vieru. All rights reserved" core,github.com/klauspost/compress,BSD-3-Clause,Copyright (c) 2011 The Snappy-Go Authors. All rights reserved | Copyright (c) 2012 The Go Authors. All rights reserved | Copyright (c) 2015 Klaus Post | Copyright (c) 2019 Klaus Post. All rights reserved | Copyright 2016 The filepathx Authors | Copyright 2016-2017 The New York Times Company @@ -1993,7 +1984,6 @@ core,github.com/rs/zerolog/log,MIT,Copyright (c) 2017 Olivier Poitrey core,github.com/ryanuber/go-glob,MIT,Copyright (c) 2014 Ryan Uber core,github.com/sagikazarmark/locafero,MIT,Copyright (c) 2023 Márk Sági-Kazár core,github.com/sagikazarmark/slog-shim,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved -core,github.com/saintfish/chardet,MIT,Copyright (c) 2012 chardet Authors | Sheng Yu (yusheng dot sjtu at gmail dot com) core,github.com/samber/lo,MIT,Copyright (c) 2022 Samuel Berthe | Copyright © 2022 [Samuel Berthe](https://github.com/samber) core,github.com/samber/lo/internal/constraints,MIT,Copyright (c) 2022 Samuel Berthe | Copyright © 2022 [Samuel Berthe](https://github.com/samber) core,github.com/samber/lo/internal/rand,MIT,Copyright (c) 2022 Samuel Berthe | Copyright © 2022 [Samuel Berthe](https://github.com/samber) @@ -2132,7 +2122,6 @@ core,github.com/syndtr/goleveldb/leveldb/table,BSD-2-Clause,Copyright 2012 Surya core,github.com/syndtr/goleveldb/leveldb/util,BSD-2-Clause,Copyright 2012 Suryandaru Triandana core,github.com/tchap/go-patricia/v2/patricia,MIT,Copyright (c) 2014 The AUTHORS | Ondřej Kupka | This is the complete list of go-patricia copyright holders: core,github.com/tedsuo/rata,MIT,Copyright (c) 2014 Ted Young -core,github.com/temoto/robotstxt,MIT,Copyright (c) 2010 Sergey Shepelev core,github.com/tetratelabs/wazero,Apache-2.0,Copyright 2020-2023 wazero authors core,github.com/tetratelabs/wazero/api,Apache-2.0,Copyright 2020-2023 wazero authors core,github.com/tetratelabs/wazero/experimental,Apache-2.0,Copyright 2020-2023 wazero authors @@ -2754,13 +2743,6 @@ core,google.golang.org/api/transport,BSD-3-Clause,Copyright (c) 2011 Google Inc. core,google.golang.org/api/transport/grpc,BSD-3-Clause,Copyright (c) 2011 Google Inc. All rights reserved. core,google.golang.org/api/transport/http,BSD-3-Clause,Copyright (c) 2011 Google Inc. All rights reserved. core,google.golang.org/api/transport/http/internal/propagation,BSD-3-Clause,Copyright (c) 2011 Google Inc. All rights reserved. -core,google.golang.org/appengine/internal,Apache-2.0,Copyright 2011 Google Inc. All rights reserved. -core,google.golang.org/appengine/internal/base,Apache-2.0,Copyright 2011 Google Inc. All rights reserved. -core,google.golang.org/appengine/internal/datastore,Apache-2.0,Copyright 2011 Google Inc. All rights reserved. -core,google.golang.org/appengine/internal/log,Apache-2.0,Copyright 2011 Google Inc. All rights reserved. -core,google.golang.org/appengine/internal/remote_api,Apache-2.0,Copyright 2011 Google Inc. All rights reserved. -core,google.golang.org/appengine/internal/urlfetch,Apache-2.0,Copyright 2011 Google Inc. All rights reserved. -core,google.golang.org/appengine/urlfetch,Apache-2.0,Copyright 2011 Google Inc. All rights reserved. core,google.golang.org/genproto/googleapis/api,Apache-2.0,Copyright 2015 Google LLC core,google.golang.org/genproto/googleapis/api/annotations,Apache-2.0,Copyright 2015 Google LLC core,google.golang.org/genproto/googleapis/api/expr/v1alpha1,Apache-2.0,Copyright 2015 Google LLC diff --git a/comp/otelcol/collector/impl-pipeline/flare_filler.go b/comp/otelcol/collector/impl-pipeline/flare_filler.go index b574a81cb5af1..109429fb4bb9d 100644 --- a/comp/otelcol/collector/impl-pipeline/flare_filler.go +++ b/comp/otelcol/collector/impl-pipeline/flare_filler.go @@ -20,8 +20,6 @@ import ( "strings" "time" - "github.com/gocolly/colly/v2" - flaretypes "github.com/DataDog/datadog-agent/comp/core/flare/types" extension "github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/def" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -106,48 +104,11 @@ func (c *collectorImpl) fillFlare(fb flaretypes.FlareBuilder) error { fb.AddFile(fmt.Sprintf("otel/otel-flare/%s.dat", name), data) } - if !src.Crawl { - continue - } - - // crawl the url by following any hyperlinks - col := colly.NewCollector() - col.OnHTML("a", func(e *colly.HTMLElement) { - // visit all links - link := e.Attr("href") - if err := e.Request.Visit(e.Request.AbsoluteURL(link)); err != nil { - filename := strings.ReplaceAll(url.PathEscape(link), ":", "_") - fb.AddFile(fmt.Sprintf("otel/otel-flare/crawl-%s.err", filename), []byte(err.Error())) - } - }) - col.OnResponse(func(r *colly.Response) { - // the root sources (from the extension.Response) were already fetched earlier - // don't re-fetch them - responseURL := r.Request.URL.String() - if contains(sourceURLs, responseURL) { - return - } - // use the url as the basis for the filename saved in the flare - filename := strings.ReplaceAll(url.PathEscape(responseURL), ":", "_") - fb.AddFile(fmt.Sprintf("otel/otel-flare/crawl-%s", filename), r.Body) - }) - if err := col.Visit(sourceURL); err != nil { - fb.AddFile("otel/otel-flare/crawl.err", []byte(err.Error())) - } } } return nil } -func contains(s []string, e string) bool { - for _, a := range s { - if a == e { - return true - } - } - return false -} - func toJSON(it interface{}) string { data, err := json.Marshal(it) if err != nil { diff --git a/comp/otelcol/collector/impl-pipeline/flare_filler_test.go b/comp/otelcol/collector/impl-pipeline/flare_filler_test.go index fe23d57bbb773..ed98632b0f171 100644 --- a/comp/otelcol/collector/impl-pipeline/flare_filler_test.go +++ b/comp/otelcol/collector/impl-pipeline/flare_filler_test.go @@ -11,7 +11,6 @@ package collectorimpl import ( "bytes" - "fmt" "io" "net/http" "net/http/httptest" @@ -41,8 +40,7 @@ func createFakeOTelExtensionHTTPServer() (string, func()) { io.WriteString(w, "data-source-2") return } else if r.URL.Path == "/three" { - pageTmpl := `Another source is here` - io.WriteString(w, fmt.Sprintf(pageTmpl, testServerURL)) + io.WriteString(w, "data-source-3") return } else if r.URL.Path == "/four" { io.WriteString(w, "data-source-4") @@ -93,27 +91,23 @@ func TestOTelExtFlareBuilder(t *testing.T) { "prometheus": { "url": [ "{{.url}}/one" - ], - "crawl": false + ] }, "health_check": { "url": [ "{{.url}}/two" - ], - "crawl": false + ] }, "zpages": { "url": [ "{{.url}}/three" - ], - "crawl": true + ] }, "pprof": { "url": [ "{{.url}}/four", "{{.url}}/five/six" - ], - "crawl": false + ] } }, "environment": {{.environment}} @@ -150,12 +144,9 @@ func TestOTelExtFlareBuilder(t *testing.T) { f.AssertFileExists("otel", "otel-response.json") - // Template for the crawable page - pageTmpl := `Another source is here` - f.AssertFileContent("data-source-1", "otel/otel-flare/prometheus_one.dat") f.AssertFileContent("data-source-2", "otel/otel-flare/health_check_two.dat") - f.AssertFileContent(fmt.Sprintf(pageTmpl, localServerURL), "otel/otel-flare/zpages_three.dat") + f.AssertFileContent("data-source-3", "otel/otel-flare/zpages_three.dat") f.AssertFileContent("data-source-4", "otel/otel-flare/pprof_four.dat") f.AssertFileContent("data-source-5-6", "otel/otel-flare/pprof_five_six.dat") diff --git a/comp/otelcol/ddflareextension/def/types.go b/comp/otelcol/ddflareextension/def/types.go index f25901754fe85..f7002d816225d 100644 --- a/comp/otelcol/ddflareextension/def/types.go +++ b/comp/otelcol/ddflareextension/def/types.go @@ -24,8 +24,7 @@ type ConfigResponse struct { // OTelFlareSource is the response struct for flare debug sources type OTelFlareSource struct { - URLs []string `json:"url"` - Crawl bool `json:"crawl"` + URLs []string `json:"url"` } // DebugSourceResponse is the response struct for a map of OTelFlareSource diff --git a/comp/otelcol/ddflareextension/impl/config.go b/comp/otelcol/ddflareextension/impl/config.go index 456559ff3eb09..e5c7aa8a66aba 100644 --- a/comp/otelcol/ddflareextension/impl/config.go +++ b/comp/otelcol/ddflareextension/impl/config.go @@ -16,15 +16,14 @@ import ( "go.opentelemetry.io/collector/confmap" ) -type extractDebugEndpoint func(conf *confmap.Conf) (string, bool, error) +type extractDebugEndpoint func(conf *confmap.Conf) (string, error) var ( errHTTPEndpointRequired = errors.New("http endpoint required") supportedDebugExtensions = map[string]extractDebugEndpoint{ "health_check": healthExtractEndpoint, - // disabled zpages from flare until solution to display data. - // "zpages": zPagesExtractEndpoint, - "pprof": pprofExtractEndpoint, + "zpages": zPagesExtractEndpoint, + "pprof": pprofExtractEndpoint, } ) @@ -58,20 +57,19 @@ func (c *Config) Unmarshal(conf *confmap.Conf) error { return nil } -// todo: uncomment once zpages data is re-added to flare -// func zPagesExtractEndpoint(c *confmap.Conf) (string, bool, error) { -// endpoint, err := regularStringEndpointExtractor(c) -// return endpoint, true, err -// } +func zPagesExtractEndpoint(c *confmap.Conf) (string, error) { + endpoint, err := regularStringEndpointExtractor(c) + return endpoint, err +} -func pprofExtractEndpoint(c *confmap.Conf) (string, bool, error) { +func pprofExtractEndpoint(c *confmap.Conf) (string, error) { endpoint, err := regularStringEndpointExtractor(c) - return endpoint, false, err + return endpoint, err } -func healthExtractEndpoint(c *confmap.Conf) (string, bool, error) { +func healthExtractEndpoint(c *confmap.Conf) (string, error) { endpoint, err := regularStringEndpointExtractor(c) - return endpoint, false, err + return endpoint, err } func regularStringEndpointExtractor(c *confmap.Conf) (string, error) { diff --git a/comp/otelcol/ddflareextension/impl/config_test.go b/comp/otelcol/ddflareextension/impl/config_test.go index 1c1aa99056858..b3c15eab8a784 100644 --- a/comp/otelcol/ddflareextension/impl/config_test.go +++ b/comp/otelcol/ddflareextension/impl/config_test.go @@ -73,21 +73,15 @@ func TestExtractors(t *testing.T) { myConfMap := confmap.NewFromStringMap(m) - for extension, extractor := range supportedDebugExtensions { - expectedCrawl := false - if extension == "zpages" { - expectedCrawl = true - } - - uri, crawl, err := extractor(myConfMap) + for _, extractor := range supportedDebugExtensions { + uri, err := extractor(myConfMap) assert.NoError(t, err) - assert.Equal(t, expectedCrawl, crawl) assert.Equal(t, endpoint, uri) } myConfMap = confmap.New() for _, extractor := range supportedDebugExtensions { - _, _, err := extractor(myConfMap) + _, err := extractor(myConfMap) assert.Error(t, err) } diff --git a/comp/otelcol/ddflareextension/impl/extension.go b/comp/otelcol/ddflareextension/impl/extension.go index e1cc0b2b9e743..fca2149b9544d 100644 --- a/comp/otelcol/ddflareextension/impl/extension.go +++ b/comp/otelcol/ddflareextension/impl/extension.go @@ -87,23 +87,36 @@ func (ext *ddExtension) Start(_ context.Context, host component.Host) error { continue } - uri, crawl, err := extractor(exconf) + uri, err := extractor(exconf) var uris []string - if extension.Type().String() == "pprof" { - uris = []string{uri + "/debug/pprof/heap", uri + "/debug/pprof/allocs", uri + "/debug/pprof/profile"} - } else { + switch extension.Type().String() { + case "pprof": + uris = []string{ + uri + "/debug/pprof/heap", + uri + "/debug/pprof/allocs", + uri + "/debug/pprof/profile", + } + case "zpages": + uris = []string{ + uri + "/debug/servicez", + uri + "/debug/pipelinez", + uri + "/debug/extensionz", + uri + "/debug/featurez", + uri + "/debug/tracez", + } + default: uris = []string{uri} } if err != nil { ext.telemetry.Logger.Info("Unavailable debug extension for", zap.String("extension", extension.String())) - } else { - ext.telemetry.Logger.Info("Found debug extension at", zap.String("uri", uri)) - ext.debug.Sources[extension.String()] = extensionDef.OTelFlareSource{ - URLs: uris, - Crawl: crawl, - } + continue + } + + ext.telemetry.Logger.Info("Found debug extension at", zap.String("uri", uri)) + ext.debug.Sources[extension.String()] = extensionDef.OTelFlareSource{ + URLs: uris, } } diff --git a/go.mod b/go.mod index ded7d7e482a48..0117a492a0912 100644 --- a/go.mod +++ b/go.mod @@ -712,7 +712,6 @@ require ( github.com/elastic/go-seccomp-bpf v1.4.0 github.com/fatih/structtag v1.2.0 github.com/glaslos/ssdeep v0.4.0 - github.com/gocolly/colly/v2 v2.1.0 github.com/gocomply/scap v0.1.2-0.20230531064509-55a00f73e8d6 github.com/godror/godror v0.37.0 github.com/jackc/pgx/v5 v5.6.0 @@ -774,16 +773,11 @@ require ( github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.0 // indirect github.com/Intevation/gval v1.3.0 // indirect github.com/Intevation/jsonpath v0.2.1 // indirect - github.com/PuerkitoBio/goquery v1.8.1 // indirect github.com/Showmax/go-fqdn v1.0.0 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/agext/levenshtein v1.2.3 // indirect github.com/alecthomas/participle/v2 v2.1.1 // indirect github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 // indirect - github.com/andybalholm/cascadia v1.3.2 // indirect - github.com/antchfx/htmlquery v1.3.0 // indirect - github.com/antchfx/xmlquery v1.3.1 // indirect - github.com/antchfx/xpath v1.2.3 // indirect github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect github.com/apache/thrift v0.20.0 // indirect github.com/aquasecurity/trivy-java-db v0.0.0-20240109071736-184bd7481d48 // indirect @@ -855,7 +849,6 @@ require ( github.com/jaegertracing/jaeger v1.58.1 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jpillora/backoff v1.0.0 // indirect - github.com/kennygrant/sanitize v1.2.4 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect github.com/knadh/koanf/v2 v2.1.1 // indirect github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b // indirect @@ -937,7 +930,6 @@ require ( github.com/ryanuber/go-glob v1.0.0 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect - github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca // indirect github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 // indirect github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25 // indirect github.com/shirou/gopsutil/v4 v4.24.5 // indirect @@ -953,7 +945,6 @@ require ( github.com/stormcat24/protodep v0.1.8 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/swaggest/refl v1.3.0 // indirect - github.com/temoto/robotstxt v1.1.1 // indirect github.com/tetratelabs/wazero v1.7.0 // indirect github.com/tidwall/gjson v1.17.1 // indirect github.com/tidwall/match v1.1.1 // indirect diff --git a/go.sum b/go.sum index 6ade4f6e537a8..f239ac2747fbf 100644 --- a/go.sum +++ b/go.sum @@ -793,9 +793,6 @@ github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8 github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78= github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= -github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc= -github.com/PuerkitoBio/goquery v1.8.1 h1:uQxhNlArOIdbrH1tr0UXwdVFgDcZDrZVdcpygAcwmWM= -github.com/PuerkitoBio/goquery v1.8.1/go.mod h1:Q8ICL1kNUJ2sXGoAhPGUdYDJvgQgHzJsnnd3H7Ho5jQ= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Showmax/go-fqdn v1.0.0 h1:0rG5IbmVliNT5O19Mfuvna9LL7zlHyRfsSvBPZmF9tM= @@ -836,24 +833,8 @@ github.com/alicebob/miniredis/v2 v2.31.1/go.mod h1:UB/T2Uztp7MlFSDakaX1sTXUv5CAS github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 h1:aM1rlcoLz8y5B2r4tTLMiVTrMtpfY0O8EScKJxaSaEc= github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092/go.mod h1:rYqSE9HbjzpHTI74vwPvae4ZVYZd1lue2ta6xHPdblA= github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= -github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= -github.com/andybalholm/cascadia v1.2.0/go.mod h1:YCyR8vOZT9aZ1CHEd8ap0gMVm2aFgxBp0T0eFw1RUQY= -github.com/andybalholm/cascadia v1.3.1/go.mod h1:R4bJ1UQfqADjvDa4P6HZHLh/3OxWWEqc0Sk8XGwHqvA= -github.com/andybalholm/cascadia v1.3.2 h1:3Xi6Dw5lHF15JtdcmAHD3i1+T8plmv7BQ/nsViSLyss= -github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= -github.com/antchfx/htmlquery v1.2.3/go.mod h1:B0ABL+F5irhhMWg54ymEZinzMSi0Kt3I2if0BLYa3V0= -github.com/antchfx/htmlquery v1.3.0 h1:5I5yNFOVI+egyia5F2s/5Do2nFWxJz41Tr3DyfKD25E= -github.com/antchfx/htmlquery v1.3.0/go.mod h1:zKPDVTMhfOmcwxheXUsx4rKJy8KEY/PU6eXr/2SebQ8= -github.com/antchfx/xmlquery v1.2.4/go.mod h1:KQQuESaxSlqugE2ZBcM/qn+ebIpt+d+4Xx7YcSGAIrM= -github.com/antchfx/xmlquery v1.3.1 h1:nIKWdtnhrXtj0/IRUAAw2I7TfpHUa3zMnHvNmPXFg+w= -github.com/antchfx/xmlquery v1.3.1/go.mod h1:64w0Xesg2sTaawIdNqMB+7qaW/bSqkQm+ssPaCMWNnc= -github.com/antchfx/xpath v1.1.6/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk= -github.com/antchfx/xpath v1.1.8/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk= -github.com/antchfx/xpath v1.1.10/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk= -github.com/antchfx/xpath v1.2.3 h1:CCZWOzv5bAqjVv0offZ2LVgVYFbeldKQVuLNbViZdes= -github.com/antchfx/xpath v1.2.3/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= @@ -1400,9 +1381,6 @@ github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/goccy/go-yaml v1.11.0 h1:n7Z+zx8S9f9KgzG6KtQKf+kwqXZlLNR2F6018Dgau54= github.com/goccy/go-yaml v1.11.0/go.mod h1:H+mJrWtjPTJAHvRbV09MCK9xYwODM+wRTVFFTWckfng= -github.com/gocolly/colly v1.2.0/go.mod h1:Hof5T3ZswNVsOHYmba1u03W65HDWgpV5HifSuueE0EA= -github.com/gocolly/colly/v2 v2.1.0 h1:k0DuZkDoCsx51bKpRJNEmcxcp+W5N8ziuwGaSDuFoGs= -github.com/gocolly/colly/v2 v2.1.0/go.mod h1:I2MuhsLjQ+Ex+IzK3afNS8/1qP3AedHOusRPcRdC5o0= github.com/gocomply/scap v0.1.2-0.20230531064509-55a00f73e8d6 h1:u1QKTc+GgWnBO1Mo0CwQ/4DXElFmSvNKRspxAr+AJuY= github.com/gocomply/scap v0.1.2-0.20230531064509-55a00f73e8d6/go.mod h1:ifGf7cSYIibtw3UXJy7QlbR8kJE6giDk7vGyCQZv0zo= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -1761,7 +1739,6 @@ github.com/jaegertracing/jaeger v1.58.1 h1:bFtX70yQbBfRbS8TB1JL4/ENr/qR09VJMeC/C github.com/jaegertracing/jaeger v1.58.1/go.mod h1:2qpJpm9BzpbxNpaillaCA4pvdAIRTJT0ZRxrzMglBlo= github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww= github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= -github.com/jawher/mow.cli v1.1.0/go.mod h1:aNaQlc7ozF3vw6IJ2dHjp2ZFiA4ozMIYY6PyuRJwlUg= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= @@ -1809,8 +1786,6 @@ github.com/karrick/godirwalk v1.17.0 h1:b4kY7nqDdioR/6qnbHQyDvmA17u5G1cZ6J+CZXwS github.com/karrick/godirwalk v1.17.0/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/kennygrant/sanitize v1.2.4 h1:gN25/otpP5vAsO2djbMhF/LQX6R7+O1TB4yv8NzpJ3o= -github.com/kennygrant/sanitize v1.2.4/go.mod h1:LGsjYYtgxbetdg5owWB2mpgUL6e2nfw2eObZ0u0qvak= github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= @@ -2355,8 +2330,6 @@ github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6ke github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= -github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca h1:NugYot0LIVPxTvN8n+Kvkn6TrbMyxQiuvKdEwFdR9vI= -github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca/go.mod h1:uugorj2VCxiV1x+LzaIdVa9b4S4qGAcH6cbhh4qVxOU= github.com/samber/lo v1.46.0 h1:w8G+oaCPgz1PoCJztqymCFaKwXt+5cCXn51uPxExFfQ= github.com/samber/lo v1.46.0/go.mod h1:RmDH9Ct32Qy3gduHQuKJ3gW1fMHAnE/fAzQuf6He5cU= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da h1:p3Vo3i64TCLY7gIfzeQaUJ+kppEO5WQG3cL8iE8tGHU= @@ -2460,7 +2433,6 @@ github.com/streadway/amqp v1.1.0 h1:py12iX8XSyI7aN/3dUT8DFIDJazNJsVJdxNVEpnQTZM= github.com/streadway/amqp v1.1.0/go.mod h1:WYSrTEYHOXHd0nwFeUXAe2G2hRnQT+deZJJf88uS9Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= @@ -2500,8 +2472,6 @@ github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00 h1:mujcChM89zOHwgZBBN github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= github.com/tedsuo/rata v1.0.0 h1:Sf9aZrYy6ElSTncjnGkyC2yuVvz5YJetBIUKJ4CmeKE= github.com/tedsuo/rata v1.0.0/go.mod h1:X47ELzhOoLbfFIY0Cql9P6yo3Cdwf2CMX3FVZxRzJPc= -github.com/temoto/robotstxt v1.1.1 h1:Gh8RCs8ouX3hRSxxK7B1mO5RFByQ4CmJZDwgom++JaA= -github.com/temoto/robotstxt v1.1.1/go.mod h1:+1AmkuG3IYkh1kv0d2qEB9Le88ehNO0zwOr3ujewlOo= github.com/terminalstatic/go-xsd-validate v0.1.5 h1:RqpJnf6HGE2CB/lZB1A8BYguk8uRtcvYAPLCF15qguo= github.com/terminalstatic/go-xsd-validate v0.1.5/go.mod h1:18lsvYFofBflqCrvo1umpABZ99+GneNTw2kEEc8UPJw= github.com/testcontainers/testcontainers-go v0.23.0 h1:ERYTSikX01QczBLPZpqsETTBO7lInqEP349phDOVJVs= @@ -2951,7 +2921,6 @@ golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2981,16 +2950,13 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -3009,7 +2975,6 @@ golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210916014120-12bc252f5db8/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= From 2ad1061647c0e732a18641247f4995cd453341bc Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Wed, 4 Sep 2024 18:02:14 +0200 Subject: [PATCH 004/128] [CWS] Allow syscall tester to be built with gcc (#29021) --- tasks/security_agent.py | 29 +++++++++++++++++------------ tasks/system_probe.py | 6 +++--- 2 files changed, 20 insertions(+), 15 deletions(-) diff --git a/tasks/security_agent.py b/tasks/security_agent.py index d2d97597b4cb9..9b38f03ad6f07 100644 --- a/tasks/security_agent.py +++ b/tasks/security_agent.py @@ -239,7 +239,7 @@ def build_go_syscall_tester(ctx, build_dir): return syscall_tester_exe_file -def ninja_c_syscall_tester_common(nw, file_name, build_dir, flags=None, libs=None, static=True): +def ninja_c_syscall_tester_common(nw, file_name, build_dir, flags=None, libs=None, static=True, compiler='clang'): if flags is None: flags = [] if libs is None: @@ -256,11 +256,11 @@ def ninja_c_syscall_tester_common(nw, file_name, build_dir, flags=None, libs=Non nw.build( inputs=[syscall_tester_c_file], outputs=[syscall_tester_exe_file], - rule="execlang", + rule="exe" + compiler, variables={ "exeflags": flags, "exelibs": libs, - "flags": [f"-D__{uname_m}__", f"-isystem/usr/include/{uname_m}-linux-gnu"], + "flags": [f"-isystem/usr/include/{uname_m}-linux-gnu"], }, ) return syscall_tester_exe_file @@ -307,12 +307,16 @@ def build_embed_latency_tools(ctx, static=True): ctx.run(f"ninja -f {nf_path}") -def ninja_syscall_x86_tester(ctx, build_dir, static=True): - return ninja_c_syscall_tester_common(ctx, "syscall_x86_tester", build_dir, flags=["-m32"], static=static) +def ninja_syscall_x86_tester(ctx, build_dir, static=True, compiler='clang'): + return ninja_c_syscall_tester_common( + ctx, "syscall_x86_tester", build_dir, flags=["-m32"], static=static, compiler=compiler + ) -def ninja_syscall_tester(ctx, build_dir, static=True): - return ninja_c_syscall_tester_common(ctx, "syscall_tester", build_dir, libs=["-lpthread"], static=static) +def ninja_syscall_tester(ctx, build_dir, static=True, compiler='clang'): + return ninja_c_syscall_tester_common( + ctx, "syscall_tester", build_dir, libs=["-lpthread"], static=static, compiler=compiler + ) def create_dir_if_needed(dir): @@ -324,7 +328,7 @@ def create_dir_if_needed(dir): @task -def build_embed_syscall_tester(ctx, arch: str | Arch = CURRENT_ARCH, static=True): +def build_embed_syscall_tester(ctx, arch: str | Arch = CURRENT_ARCH, static=True, compiler="clang"): arch = Arch.from_str(arch) check_for_ninja(ctx) build_dir = os.path.join("pkg", "security", "tests", "syscall_tester", "bin") @@ -335,11 +339,11 @@ def build_embed_syscall_tester(ctx, arch: str | Arch = CURRENT_ARCH, static=True with open(nf_path, 'w') as ninja_file: nw = NinjaWriter(ninja_file, width=120) ninja_define_ebpf_compiler(nw, arch=arch) - ninja_define_exe_compiler(nw) + ninja_define_exe_compiler(nw, compiler=compiler) - ninja_syscall_tester(nw, build_dir, static=static) + ninja_syscall_tester(nw, build_dir, static=static, compiler=compiler) if arch == ARCH_AMD64: - ninja_syscall_x86_tester(nw, build_dir, static=static) + ninja_syscall_x86_tester(nw, build_dir, static=static, compiler=compiler) ninja_ebpf_probe_syscall_tester(nw, go_dir) ctx.run(f"ninja -f {nf_path}") @@ -362,6 +366,7 @@ def build_functional_tests( kernel_release=None, debug=False, skip_object_files=False, + syscall_tester_compiler='clang', ): if not is_windows: if not skip_object_files: @@ -373,7 +378,7 @@ def build_functional_tests( debug=debug, bundle_ebpf=bundle_ebpf, ) - build_embed_syscall_tester(ctx) + build_embed_syscall_tester(ctx, compiler=syscall_tester_compiler) arch = Arch.from_str(arch) ldflags, gcflags, env = get_build_flags(ctx, major_version=major_version, static=static, arch=arch) diff --git a/tasks/system_probe.py b/tasks/system_probe.py index fa89df422bafb..4714474f5b564 100644 --- a/tasks/system_probe.py +++ b/tasks/system_probe.py @@ -141,10 +141,10 @@ def ninja_define_co_re_compiler(nw: NinjaWriter, arch: Arch | None = None): ) -def ninja_define_exe_compiler(nw: NinjaWriter): +def ninja_define_exe_compiler(nw: NinjaWriter, compiler='clang'): nw.rule( - name="execlang", - command="clang -MD -MF $out.d $exeflags $flags $in -o $out $exelibs", + name="exe" + compiler, + command=f"{compiler} -MD -MF $out.d $exeflags $flags $in -o $out $exelibs", depfile="$out.d", ) From 9b6988a83f619f648053c76a0364f83b72ca35f7 Mon Sep 17 00:00:00 2001 From: Olivier G <52180542+ogaca-dd@users.noreply.github.com> Date: Wed, 4 Sep 2024 18:19:21 +0200 Subject: [PATCH 005/128] [ASCII-2246] Log Fx Events using logger.Debug (#28925) --- .../subcommands/daemon/run_windows.go | 5 +- cmd/internal/runcmd/runcmd.go | 4 +- cmd/internal/runcmd/runcmd_test.go | 12 ---- cmd/process-agent/command/main_common.go | 2 +- cmd/serverless/dependencies_linux_amd64.txt | 1 + cmd/serverless/dependencies_linux_arm64.txt | 1 + comp/core/log/fx-systemprobe/fx.go | 3 + comp/core/log/fx-trace/fx.go | 3 + comp/core/log/fx/fx.go | 3 + comp/snmptraps/server/serverimpl/server.go | 3 +- pkg/util/fxutil/logging.go | 26 -------- pkg/util/fxutil/logging/logging.go | 61 +++++++++++++++++++ pkg/util/fxutil/oneshot.go | 2 +- pkg/util/fxutil/provide_comp.go | 14 +++-- pkg/util/fxutil/run.go | 2 +- pkg/util/fxutil/test.go | 16 ++--- 16 files changed, 97 insertions(+), 61 deletions(-) delete mode 100644 pkg/util/fxutil/logging.go create mode 100644 pkg/util/fxutil/logging/logging.go diff --git a/cmd/installer/subcommands/daemon/run_windows.go b/cmd/installer/subcommands/daemon/run_windows.go index 57d13a5b5ef53..4559addad1fe1 100644 --- a/cmd/installer/subcommands/daemon/run_windows.go +++ b/cmd/installer/subcommands/daemon/run_windows.go @@ -9,6 +9,8 @@ package daemon import ( "context" + "syscall" + "github.com/DataDog/datadog-agent/cmd/installer/command" "github.com/DataDog/datadog-agent/comp/core/pid" "github.com/DataDog/datadog-agent/comp/updater/localapi" @@ -16,7 +18,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/judwhite/go-svc" "go.uber.org/fx" - "syscall" ) type windowsService struct { @@ -26,7 +27,7 @@ type windowsService struct { func getFxOptions(global *command.GlobalParams) []fx.Option { return []fx.Option{ getCommonFxOption(global), - fxutil.FxAgentBase(), + fxutil.FxAgentBase(true), // Force the instantiation of some components fx.Invoke(func(_ pid.Component) {}), fx.Invoke(func(_ localapi.Component) {}), diff --git a/cmd/internal/runcmd/runcmd.go b/cmd/internal/runcmd/runcmd.go index 6f871fccec364..6b197801df093 100644 --- a/cmd/internal/runcmd/runcmd.go +++ b/cmd/internal/runcmd/runcmd.go @@ -9,7 +9,6 @@ package runcmd import ( "fmt" "io" - "os" "github.com/spf13/cobra" "go.uber.org/dig" @@ -36,10 +35,9 @@ func Run(cmd *cobra.Command) int { // these are simply printed with an "Error: " prefix, but some kinds of errors // are first simplified to reduce user confusion. func displayError(err error, w io.Writer) { - _, traceFxSet := os.LookupEnv("TRACE_FX") // RootCause returns the error it was given if it cannot find a "root cause", // and otherwise returns the root cause, which is more useful to the user. - if rc := dig.RootCause(err); rc != err && !traceFxSet { + if rc := dig.RootCause(err); rc != err { fmt.Fprintln(w, "Error:", rc.Error()) return } diff --git a/cmd/internal/runcmd/runcmd_test.go b/cmd/internal/runcmd/runcmd_test.go index e9aac05348d93..7bd572cd260a4 100644 --- a/cmd/internal/runcmd/runcmd_test.go +++ b/cmd/internal/runcmd/runcmd_test.go @@ -8,8 +8,6 @@ package runcmd import ( "bytes" "errors" - "os" - "regexp" "testing" "github.com/spf13/cobra" @@ -60,16 +58,6 @@ func TestDisplayError_normalError(t *testing.T) { // fx errors are abbreviated to just the root cause by default func TestDisplayError_fxError(t *testing.T) { var buf bytes.Buffer - t.Setenv("TRACE_FX", "") // get testing to reset this value for us - os.Unsetenv("TRACE_FX") // but actually _unset_ the value displayError(makeFxError(t), &buf) require.Equal(t, "Error: uhoh\n", buf.String()) } - -// entire error is included with TRACE_FX set -func TestDisplayError_fxError_TRACE_FX(t *testing.T) { - var buf bytes.Buffer - t.Setenv("TRACE_FX", "1") - displayError(makeFxError(t), &buf) - require.Regexp(t, regexp.MustCompile("Error: could not build arguments for function .* uhoh"), buf.String()) -} diff --git a/cmd/process-agent/command/main_common.go b/cmd/process-agent/command/main_common.go index fe6f66ef3d88e..73daed01b341b 100644 --- a/cmd/process-agent/command/main_common.go +++ b/cmd/process-agent/command/main_common.go @@ -181,7 +181,7 @@ func runApp(ctx context.Context, globalParams *GlobalParams) error { }), // Provides specific features to our own fx wrapper (logging, lifecycle, shutdowner) - fxutil.FxAgentBase(), + fxutil.FxAgentBase(true), // Set the pid file path fx.Supply(pidimpl.NewParams(globalParams.PidFilePath)), diff --git a/cmd/serverless/dependencies_linux_amd64.txt b/cmd/serverless/dependencies_linux_amd64.txt index 6059520efe6e5..0817058707727 100644 --- a/cmd/serverless/dependencies_linux_amd64.txt +++ b/cmd/serverless/dependencies_linux_amd64.txt @@ -271,6 +271,7 @@ github.com/DataDog/datadog-agent/pkg/util/fargate github.com/DataDog/datadog-agent/pkg/util/filesystem github.com/DataDog/datadog-agent/pkg/util/flavor github.com/DataDog/datadog-agent/pkg/util/fxutil +github.com/DataDog/datadog-agent/pkg/util/fxutil/logging github.com/DataDog/datadog-agent/pkg/util/hostname github.com/DataDog/datadog-agent/pkg/util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/http diff --git a/cmd/serverless/dependencies_linux_arm64.txt b/cmd/serverless/dependencies_linux_arm64.txt index f5373b770a685..31d6006a051e9 100644 --- a/cmd/serverless/dependencies_linux_arm64.txt +++ b/cmd/serverless/dependencies_linux_arm64.txt @@ -271,6 +271,7 @@ github.com/DataDog/datadog-agent/pkg/util/fargate github.com/DataDog/datadog-agent/pkg/util/filesystem github.com/DataDog/datadog-agent/pkg/util/flavor github.com/DataDog/datadog-agent/pkg/util/fxutil +github.com/DataDog/datadog-agent/pkg/util/fxutil/logging github.com/DataDog/datadog-agent/pkg/util/hostname github.com/DataDog/datadog-agent/pkg/util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/http diff --git a/comp/core/log/fx-systemprobe/fx.go b/comp/core/log/fx-systemprobe/fx.go index 48e883d4ef019..0b948e74336ee 100644 --- a/comp/core/log/fx-systemprobe/fx.go +++ b/comp/core/log/fx-systemprobe/fx.go @@ -7,8 +7,10 @@ package fx import ( + logdef "github.com/DataDog/datadog-agent/comp/core/log/def" logimpl "github.com/DataDog/datadog-agent/comp/core/log/impl-systemprobe" "github.com/DataDog/datadog-agent/pkg/util/fxutil" + "github.com/DataDog/datadog-agent/pkg/util/fxutil/logging" ) // Module defines the fx options for this component @@ -17,5 +19,6 @@ func Module() fxutil.Module { fxutil.ProvideComponentConstructor( logimpl.NewComponent, ), + logging.NewFxEventLoggerOption[logdef.Component](), ) } diff --git a/comp/core/log/fx-trace/fx.go b/comp/core/log/fx-trace/fx.go index 91dc0fc968e93..ebfd4d30d75d6 100644 --- a/comp/core/log/fx-trace/fx.go +++ b/comp/core/log/fx-trace/fx.go @@ -7,8 +7,10 @@ package fx import ( + logdef "github.com/DataDog/datadog-agent/comp/core/log/def" impltrace "github.com/DataDog/datadog-agent/comp/core/log/impl-trace" "github.com/DataDog/datadog-agent/pkg/util/fxutil" + "github.com/DataDog/datadog-agent/pkg/util/fxutil/logging" ) // Module defines the fx options for this component @@ -17,5 +19,6 @@ func Module() fxutil.Module { fxutil.ProvideComponentConstructor( impltrace.NewComponent, ), + logging.NewFxEventLoggerOption[logdef.Component](), ) } diff --git a/comp/core/log/fx/fx.go b/comp/core/log/fx/fx.go index 17bc03ca3a8e2..27b07266f1709 100644 --- a/comp/core/log/fx/fx.go +++ b/comp/core/log/fx/fx.go @@ -7,8 +7,10 @@ package fx import ( + logdef "github.com/DataDog/datadog-agent/comp/core/log/def" logimpl "github.com/DataDog/datadog-agent/comp/core/log/impl" "github.com/DataDog/datadog-agent/pkg/util/fxutil" + "github.com/DataDog/datadog-agent/pkg/util/fxutil/logging" ) // Module defines the fx options for this component @@ -17,5 +19,6 @@ func Module() fxutil.Module { fxutil.ProvideComponentConstructor( logimpl.NewComponent, ), + logging.NewFxEventLoggerOption[logdef.Component](), ) } diff --git a/comp/snmptraps/server/serverimpl/server.go b/comp/snmptraps/server/serverimpl/server.go index 269d771b2ee45..6575452402ef4 100644 --- a/comp/snmptraps/server/serverimpl/server.go +++ b/comp/snmptraps/server/serverimpl/server.go @@ -29,6 +29,7 @@ import ( "github.com/DataDog/datadog-agent/comp/snmptraps/status" "github.com/DataDog/datadog-agent/comp/snmptraps/status/statusimpl" "github.com/DataDog/datadog-agent/pkg/util/fxutil" + "github.com/DataDog/datadog-agent/pkg/util/fxutil/logging" ) // Module defines the fx options for this component. @@ -95,7 +96,7 @@ func newServer(lc fx.Lifecycle, deps dependencies) provides { // careful never to double-instantiate anything. Do not use this solution // elsewhere if possible. app := fx.New( - fxutil.FxLoggingOption(), + logging.FxLoggingOption(), fx.Supply(injections{ Conf: deps.Conf, HNService: deps.HNService, diff --git a/pkg/util/fxutil/logging.go b/pkg/util/fxutil/logging.go deleted file mode 100644 index 0b17166529ae4..0000000000000 --- a/pkg/util/fxutil/logging.go +++ /dev/null @@ -1,26 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package fxutil - -import ( - "os" - - "go.uber.org/fx" - "go.uber.org/fx/fxevent" -) - -// FxLoggingOption creates an fx.Option to configure the Fx logger, either to do nothing -// (the default) or to log to the console (when TRACE_FX is set). -func FxLoggingOption() fx.Option { - return fx.WithLogger( - func() fxevent.Logger { - if os.Getenv("TRACE_FX") == "" { - return fxevent.NopLogger - } - return &fxevent.ConsoleLogger{W: os.Stderr} - }, - ) -} diff --git a/pkg/util/fxutil/logging/logging.go b/pkg/util/fxutil/logging/logging.go new file mode 100644 index 0000000000000..8cb34a694ae11 --- /dev/null +++ b/pkg/util/fxutil/logging/logging.go @@ -0,0 +1,61 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package logging provides a logger that logs fx events. +package logging + +import ( + "go.uber.org/fx" + "go.uber.org/fx/fxevent" +) + +// Logger is a logger that logs fx events. +type Logger interface { + Debug(v ...interface{}) +} + +type fxEventLogger struct { + logger func(v ...interface{}) +} + +// NewFxEventLoggerOption returns an fx option that provides a fxEventLogger. +// Generic is used in order to not depends on the logger package. +func NewFxEventLoggerOption[T Logger]() fx.Option { + // Note: The pointer in *T is needed for `optional:"true"` + return fx.Provide(func(logger *T) *fxEventLogger { + if logger == nil { + return nil + } + return &fxEventLogger{logger: (*logger).Debug} + }) +} + +// Write writes the given bytes to the logger. +func (l *fxEventLogger) Write(p []byte) (n int, err error) { + l.logger(string(p)) + return len(p), nil +} + +type loggingParams struct { + fx.In + + // Note to the reader: Don't use `optional:"true"` except if you truly understand how does it work + // See https://github.com/uber-go/fx/issues/613. It should ideally use only for logging and debuggin purpose. + FxEventLogging *fxEventLogger `optional:"true"` +} + +// FxLoggingOption returns an fx.Option that provides a logger that logs fx events. +// If fxEventLogger is provided, it will be used, otherwise nothing is logged. +// Typically, this logs fx events when log_level is debug or above. +func FxLoggingOption() fx.Option { + return fx.WithLogger( + func(params loggingParams) fxevent.Logger { + if params.FxEventLogging != nil { + return &fxevent.ConsoleLogger{W: params.FxEventLogging} + } + return fxevent.NopLogger + }, + ) +} diff --git a/pkg/util/fxutil/oneshot.go b/pkg/util/fxutil/oneshot.go index f970f13efd4d0..fa602836c050a 100644 --- a/pkg/util/fxutil/oneshot.go +++ b/pkg/util/fxutil/oneshot.go @@ -33,7 +33,7 @@ func OneShot(oneShotFunc interface{}, opts ...fx.Option) error { opts = append(opts, delayedCall.option(), - FxAgentBase(), + FxAgentBase(true), ) // Temporarily increase timeout for all fxutil.OneShot calls until we can better characterize our // start time requirements. Prepend to opts so individual calls can override the timeout. diff --git a/pkg/util/fxutil/provide_comp.go b/pkg/util/fxutil/provide_comp.go index e483a4df90666..733db928b35d9 100644 --- a/pkg/util/fxutil/provide_comp.go +++ b/pkg/util/fxutil/provide_comp.go @@ -14,6 +14,7 @@ import ( "unicode/utf8" compdef "github.com/DataDog/datadog-agent/comp/def" + "github.com/DataDog/datadog-agent/pkg/util/fxutil/logging" "go.uber.org/fx" ) @@ -301,12 +302,13 @@ func coerceStructTo(input reflect.Value, outType reflect.Type, oldEmbed, newEmbe } // FxAgentBase returns all of our adapters from compdef types to fx types -func FxAgentBase() fx.Option { - return fx.Options( - FxLoggingOption(), - fx.Provide(newFxLifecycleAdapter), - fx.Provide(newFxShutdownerAdapter), - ) +func FxAgentBase(logFxEvents bool) fx.Option { + options := []fx.Option{fx.Provide(newFxLifecycleAdapter), + fx.Provide(newFxShutdownerAdapter)} + if logFxEvents { + options = append(options, logging.FxLoggingOption()) + } + return fx.Options(options...) } // Lifecycle is a compdef interface compatible with fx.Lifecycle, to provide start/stop hooks diff --git a/pkg/util/fxutil/run.go b/pkg/util/fxutil/run.go index d094927ee7e22..9fb86b3021c92 100644 --- a/pkg/util/fxutil/run.go +++ b/pkg/util/fxutil/run.go @@ -21,7 +21,7 @@ func Run(opts ...fx.Option) error { return fxAppTestOverride(func() {}, opts) } - opts = append(opts, FxAgentBase()) + opts = append(opts, FxAgentBase(true)) // Temporarily increase timeout for all fxutil.Run calls until we can better characterize our // start time requirements. Prepend to opts so individual calls can override the timeout. opts = append( diff --git a/pkg/util/fxutil/test.go b/pkg/util/fxutil/test.go index 4ba94db3faeed..76bffc7711d39 100644 --- a/pkg/util/fxutil/test.go +++ b/pkg/util/fxutil/test.go @@ -35,7 +35,7 @@ func Test[T any](t testing.TB, opts ...fx.Option) T { app := fxtest.New( t, - FxAgentBase(), + FxAgentBase(false), fx.Supply(fx.Annotate(t, fx.As(new(testing.TB)))), delayed.option(), fx.Options(opts...), @@ -64,7 +64,7 @@ func TestApp[T any](opts ...fx.Option) (*fx.App, T, error) { }) app := fx.New( - FxAgentBase(), + FxAgentBase(false), delayed.option(), fx.Options(opts...), ) @@ -97,7 +97,7 @@ type appAssertFn func(testing.TB, *fx.App) func TestStart(t testing.TB, opts fx.Option, appAssert appAssertFn, fn interface{}) { delayed := newDelayedFxInvocation(fn) app := fx.New( - FxAgentBase(), + FxAgentBase(false), fx.Supply(fx.Annotate(t, fx.As(new(testing.TB)))), delayed.option(), opts, @@ -116,7 +116,7 @@ func TestRun(t *testing.T, f func() error) { var fxFakeAppRan bool fxAppTestOverride = func(_ interface{}, opts []fx.Option) error { fxFakeAppRan = true - opts = append(opts, FxAgentBase()) + opts = append(opts, FxAgentBase(false)) require.NoError(t, fx.ValidateApp(opts...)) return nil } @@ -163,13 +163,13 @@ func TestOneShotSubcommand( require.NoError(t, fx.ValidateApp( append(opts, - FxAgentBase(), + FxAgentBase(false), fx.Invoke(oneShotFunc))...)) // build an app without the oneShotFunc, and with verifyFn app := fxtest.New(t, append(opts, - FxAgentBase(), + FxAgentBase(false), fx.Supply(fx.Annotate(t, fx.As(new(testing.TB)))), fx.Invoke(verifyFn))...) defer app.RequireStart().RequireStop() @@ -201,7 +201,7 @@ func TestOneShot(t *testing.T, fct func()) { require.NoError(t, fx.ValidateApp( append(opts, - FxAgentBase(), + FxAgentBase(false), fx.Invoke(oneShotFunc))...)) return nil } @@ -234,7 +234,7 @@ func TestBundle(t *testing.T, bundle BundleOptions, extraOptions ...fx.Option) { invoke, bundle, fx.Options(extraOptions...), - FxAgentBase(), + FxAgentBase(false), fx.Supply(fx.Annotate(t, fx.As(new(testing.TB)))), )) } From 01d4ab5beeac9792c1206e857ebc3385cbfe5858 Mon Sep 17 00:00:00 2001 From: Daniel Lavie Date: Wed, 4 Sep 2024 19:19:50 +0300 Subject: [PATCH 006/128] USMON-1008: Parse Produce Response (#28526) --- .../ebpf/c/protocols/classification/defs.h | 2 + .../c/protocols/kafka/kafka-classification.h | 7 +- .../ebpf/c/protocols/kafka/kafka-parsing.h | 332 ++++++++++++++---- pkg/network/ebpf/c/protocols/kafka/types.h | 8 + pkg/network/protocols/ebpf_types.go | 4 + pkg/network/protocols/ebpf_types_linux.go | 16 +- .../protocols/kafka/kernel_telemetry.go | 11 +- pkg/network/protocols/kafka/model_linux.go | 7 +- pkg/network/protocols/kafka/protocol.go | 48 ++- pkg/network/protocols/kafka/statkeeper.go | 2 +- pkg/network/protocols/kafka/types_linux.go | 3 +- pkg/network/usm/kafka_monitor_test.go | 219 ++++++++---- 12 files changed, 498 insertions(+), 161 deletions(-) diff --git a/pkg/network/ebpf/c/protocols/classification/defs.h b/pkg/network/ebpf/c/protocols/classification/defs.h index 5148d5a5be43b..d44d2adb8d8ce 100644 --- a/pkg/network/ebpf/c/protocols/classification/defs.h +++ b/pkg/network/ebpf/c/protocols/classification/defs.h @@ -147,6 +147,8 @@ typedef enum { PROG_KAFKA_RESPONSE_PARTITION_PARSER_V12, PROG_KAFKA_RESPONSE_RECORD_BATCH_PARSER_V0, PROG_KAFKA_RESPONSE_RECORD_BATCH_PARSER_V12, + PROG_KAFKA_PRODUCE_RESPONSE_PARTITION_PARSER_V0, + PROG_KAFKA_PRODUCE_RESPONSE_PARTITION_PARSER_V9, PROG_KAFKA_TERMINATION, PROG_GRPC, PROG_POSTGRES, diff --git a/pkg/network/ebpf/c/protocols/kafka/kafka-classification.h b/pkg/network/ebpf/c/protocols/kafka/kafka-classification.h index 68140f05390c4..982cdcaed53bc 100644 --- a/pkg/network/ebpf/c/protocols/kafka/kafka-classification.h +++ b/pkg/network/ebpf/c/protocols/kafka/kafka-classification.h @@ -283,7 +283,7 @@ static __always_inline bool skip_request_tagged_fields(pktbuf_t pkt, u32 *offset } // Getting the offset (out parameter) of the first topic name in the produce request. -static __always_inline bool get_topic_offset_from_produce_request(const kafka_header_t *kafka_header, pktbuf_t pkt, u32 *out_offset) { +static __always_inline bool get_topic_offset_from_produce_request(const kafka_header_t *kafka_header, pktbuf_t pkt, u32 *out_offset, s16 *out_acks) { const s16 api_version = kafka_header->api_version; u32 offset = *out_offset; bool flexible = api_version >= 9; @@ -310,6 +310,9 @@ static __always_inline bool get_topic_offset_from_produce_request(const kafka_he // complete. Allowed values: 0 for no acknowledgments, 1 for only the leader and -1 for the full ISR. return false; } + if (out_acks != NULL) { + *out_acks = acks; + } PKTBUF_READ_BIG_ENDIAN_WRAPPER(s32, timeout_ms, pkt, offset); if (timeout_ms < 0) { @@ -361,7 +364,7 @@ static __always_inline bool is_kafka_request(const kafka_header_t *kafka_header, bool flexible = false; switch (kafka_header->api_key) { case KAFKA_PRODUCE: - if (!get_topic_offset_from_produce_request(kafka_header, pkt, &offset)) { + if (!get_topic_offset_from_produce_request(kafka_header, pkt, &offset, NULL)) { return false; } flexible = kafka_header->api_version >= 9; diff --git a/pkg/network/ebpf/c/protocols/kafka/kafka-parsing.h b/pkg/network/ebpf/c/protocols/kafka/kafka-parsing.h index 5ee3697c4aa9a..f20905e92e041 100644 --- a/pkg/network/ebpf/c/protocols/kafka/kafka-parsing.h +++ b/pkg/network/ebpf/c/protocols/kafka/kafka-parsing.h @@ -480,6 +480,13 @@ static enum parser_level parser_state_to_level(kafka_response_state state) case KAFKA_FETCH_RESPONSE_PARTITION_ERROR_CODE_START: case KAFKA_FETCH_RESPONSE_PARTITION_ABORTED_TRANSACTIONS: case KAFKA_FETCH_RESPONSE_RECORD_BATCHES_ARRAY_START: + + case KAFKA_PRODUCE_RESPONSE_START: + case KAFKA_PRODUCE_RESPONSE_NUM_TOPICS: + case KAFKA_PRODUCE_RESPONSE_TOPIC_NAME_SIZE: + case KAFKA_PRODUCE_RESPONSE_NUM_PARTITIONS: + case KAFKA_PRODUCE_RESPONSE_PARTITION_START: + case KAFKA_PRODUCE_RESPONSE_PARTITION_ERROR_CODE_START: return PARSER_LEVEL_PARTITION; case KAFKA_FETCH_RESPONSE_RECORD_BATCH_START: case KAFKA_FETCH_RESPONSE_RECORD_BATCH_LENGTH: @@ -494,13 +501,14 @@ static enum parser_level parser_state_to_level(kafka_response_state state) } } -static __always_inline enum parse_result kafka_continue_parse_response_partition_loop(kafka_info_t *kafka, +static __always_inline enum parse_result kafka_continue_parse_response_partition_loop_fetch(kafka_info_t *kafka, conn_tuple_t *tup, kafka_response_context_t *response, pktbuf_t pkt, u32 offset, u32 data_end, u32 api_version) { + extra_debug("Parsing fetch response"); u32 orig_offset = offset; bool flexible = api_version >= 12; enum parse_result ret; @@ -587,18 +595,7 @@ static __always_inline enum parse_result kafka_continue_parse_response_partition response->record_batch_length = 0; } break; - case KAFKA_FETCH_RESPONSE_PARTITION_START: - case KAFKA_FETCH_RESPONSE_PARTITION_ERROR_CODE_START: - case KAFKA_FETCH_RESPONSE_PARTITION_ABORTED_TRANSACTIONS: - case KAFKA_FETCH_RESPONSE_RECORD_BATCHES_ARRAY_START: - case KAFKA_FETCH_RESPONSE_RECORD_BATCH_START: - case KAFKA_FETCH_RESPONSE_RECORD_BATCH_LENGTH: - case KAFKA_FETCH_RESPONSE_RECORD_BATCH_MAGIC: - case KAFKA_FETCH_RESPONSE_RECORD_BATCH_RECORDS_COUNT: - case KAFKA_FETCH_RESPONSE_RECORD_BATCH_END: - case KAFKA_FETCH_RESPONSE_RECORD_BATCHES_ARRAY_END: - case KAFKA_FETCH_RESPONSE_PARTITION_TAGGED_FIELDS: - case KAFKA_FETCH_RESPONSE_PARTITION_END: + default: break; } @@ -754,12 +751,8 @@ static __always_inline enum parse_result kafka_continue_parse_response_partition response->state = KAFKA_FETCH_RESPONSE_PARTITION_START; break; - case KAFKA_FETCH_RESPONSE_RECORD_BATCH_START: - case KAFKA_FETCH_RESPONSE_RECORD_BATCH_LENGTH: - case KAFKA_FETCH_RESPONSE_RECORD_BATCH_MAGIC: - case KAFKA_FETCH_RESPONSE_RECORD_BATCH_RECORDS_COUNT: - case KAFKA_FETCH_RESPONSE_RECORD_BATCH_END: - case KAFKA_FETCH_RESPONSE_RECORD_BATCHES_ARRAY_END: + default: + extra_debug("invalid state %d in partition parser", response->state); return RET_ERR; break; @@ -776,6 +769,133 @@ static __always_inline enum parse_result kafka_continue_parse_response_partition return RET_LOOP_END; } +static __always_inline enum parse_result kafka_continue_parse_response_partition_loop_produce(kafka_info_t *kafka, + conn_tuple_t *tup, + kafka_response_context_t *response, + pktbuf_t pkt, u32 offset, + u32 data_end, + u32 api_version) +{ + extra_debug("Parsing produce response"); + u32 orig_offset = offset; + bool flexible = api_version >= 9; + enum parse_result ret; + + extra_debug("carry_over_offset %d", response->carry_over_offset); + + if (response->carry_over_offset < 0) { + return RET_ERR; + } + + offset += response->carry_over_offset; + response->carry_over_offset = 0; + + switch (response->state) { + case KAFKA_PRODUCE_RESPONSE_START: + extra_debug("KAFKA_PRODUCE_RESPONSE_START"); + if (flexible) { + ret = skip_tagged_fields(response, pkt, &offset, data_end, true); + if (ret != RET_DONE) { + return ret; + } + } + + response->state = KAFKA_PRODUCE_RESPONSE_NUM_TOPICS; + // fallthrough + + case KAFKA_PRODUCE_RESPONSE_NUM_TOPICS: + { + extra_debug("KAFKA_PRODUCE_RESPONSE_NUM_TOPICS"); + s64 num_topics = 0; + ret = read_varint_or_s32(flexible, response, pkt, &offset, data_end, &num_topics, true, + VARINT_BYTES_NUM_TOPICS); + extra_debug("num_topics: %lld", num_topics); + if (ret != RET_DONE) { + return ret; + } + if (num_topics <= 0) { + return RET_ERR; + } + } + response->state = KAFKA_PRODUCE_RESPONSE_TOPIC_NAME_SIZE; + // fallthrough + + case KAFKA_PRODUCE_RESPONSE_TOPIC_NAME_SIZE: + { + extra_debug("KAFKA_PRODUCE_RESPONSE_TOPIC_NAME_SIZE"); + s64 topic_name_size = 0; + ret = read_varint_or_s16(flexible, response, pkt, &offset, data_end, &topic_name_size, true, + VARINT_BYTES_TOPIC_NAME_SIZE); + extra_debug("topic_name_size: %lld", topic_name_size); + if (ret != RET_DONE) { + return ret; + } + if (topic_name_size <= 0 || topic_name_size > TOPIC_NAME_MAX_ALLOWED_SIZE) { + return RET_ERR; + } + offset += topic_name_size; + } + response->state = KAFKA_PRODUCE_RESPONSE_NUM_PARTITIONS; + // fallthrough + + case KAFKA_PRODUCE_RESPONSE_NUM_PARTITIONS: + { + extra_debug("KAFKA_PRODUCE_RESPONSE_NUM_PARTITIONS"); + s64 number_of_partitions = 0; + ret = read_varint_or_s32(flexible, response, pkt, &offset, data_end, &number_of_partitions, true, + VARINT_BYTES_NUM_PARTITIONS); + extra_debug("number_of_partitions: %lld", number_of_partitions); + if (ret != RET_DONE) { + return ret; + } + if (number_of_partitions <= 0 || number_of_partitions >= 2) { + // We only support a single partition for produce requests at the moment + return RET_ERR; + } + response->partitions_count = number_of_partitions; + response->state = KAFKA_PRODUCE_RESPONSE_PARTITION_START; + + } + break; + + default: + break; + } + + switch (response->state) { + case KAFKA_PRODUCE_RESPONSE_PARTITION_START: + offset += sizeof(s32); // Skip partition_index + response->state = KAFKA_PRODUCE_RESPONSE_PARTITION_ERROR_CODE_START; + // fallthrough + + case KAFKA_PRODUCE_RESPONSE_PARTITION_ERROR_CODE_START: + { + // Error codes range from -1 to 119 as per the Kafka protocol specification. + // For details, refer to: https://kafka.apache.org/protocol.html#protocol_error_codes + s16 error_code = 0; + ret = read_with_remainder_s16(response, pkt, &offset, data_end, &error_code, true); + if (ret != RET_DONE) { + return ret; + } + if (error_code < -1 || error_code > 119) { + extra_debug("invalid error code: %d", error_code); + return RET_ERR; + } + extra_debug("got error code: %d", error_code); + response->partition_error_code = error_code; + response->transaction.error_code = error_code; + + // No need to continue parsing the produce response, as we got the error now + return RET_DONE; + } + default: + break; + } + + response->carry_over_offset = offset - orig_offset; + return RET_LOOP_END; +} + static __always_inline enum parse_result kafka_continue_parse_response_record_batches_loop(kafka_info_t *kafka, conn_tuple_t *tup, kafka_response_context_t *response, @@ -966,16 +1086,7 @@ static __always_inline enum parse_result kafka_continue_parse_response_record_ba } break; - case KAFKA_FETCH_RESPONSE_START: - case KAFKA_FETCH_RESPONSE_NUM_TOPICS: - case KAFKA_FETCH_RESPONSE_TOPIC_NAME_SIZE: - case KAFKA_FETCH_RESPONSE_NUM_PARTITIONS: - case KAFKA_FETCH_RESPONSE_PARTITION_START: - case KAFKA_FETCH_RESPONSE_PARTITION_ERROR_CODE_START: - case KAFKA_FETCH_RESPONSE_PARTITION_ABORTED_TRANSACTIONS: - case KAFKA_FETCH_RESPONSE_RECORD_BATCHES_ARRAY_START: - case KAFKA_FETCH_RESPONSE_PARTITION_TAGGED_FIELDS: - case KAFKA_FETCH_RESPONSE_PARTITION_END: + default: extra_debug("invalid state %d in record batches array parser", response->state); break; } @@ -991,7 +1102,7 @@ static __always_inline enum parse_result kafka_continue_parse_response_record_ba return RET_LOOP_END; } -static __always_inline void kafka_call_response_parser(void *ctx, conn_tuple_t *tup, pktbuf_t pkt, kafka_response_state state, u32 api_version) +static __always_inline void kafka_call_response_parser(void *ctx, conn_tuple_t *tup, pktbuf_t pkt, kafka_response_state state, u32 api_version, u32 api_key) { enum parser_level level = parser_state_to_level(state); // Leave uninitialzed to get a compile-time warning if we miss setting it in @@ -1010,12 +1121,25 @@ static __always_inline void kafka_call_response_parser(void *ctx, conn_tuple_t * break; case PARSER_LEVEL_PARTITION: default: - if (api_version >= 12) { - index = PROG_KAFKA_RESPONSE_PARTITION_PARSER_V12; - } else { - index = PROG_KAFKA_RESPONSE_PARTITION_PARSER_V0; + switch (api_key) { + case KAFKA_FETCH: + if (api_version >= 12) { + index = PROG_KAFKA_RESPONSE_PARTITION_PARSER_V12; + } else { + index = PROG_KAFKA_RESPONSE_PARTITION_PARSER_V0; + } + break; + case KAFKA_PRODUCE: + if (api_version >= 9) { + index = PROG_KAFKA_PRODUCE_RESPONSE_PARTITION_PARSER_V9; + } else { + index = PROG_KAFKA_PRODUCE_RESPONSE_PARTITION_PARSER_V0; + } + break; + default: + // Shouldn't happen + return; } - break; } bpf_tail_call_compat(ctx, &protocols_progs, index); break; @@ -1030,12 +1154,25 @@ static __always_inline void kafka_call_response_parser(void *ctx, conn_tuple_t * break; case PARSER_LEVEL_PARTITION: default: - if (api_version >= 12) { - index = PROG_KAFKA_RESPONSE_PARTITION_PARSER_V12; - } else { - index = PROG_KAFKA_RESPONSE_PARTITION_PARSER_V0; + switch (api_key) { + case KAFKA_FETCH: + if (api_version >= 12) { + index = PROG_KAFKA_RESPONSE_PARTITION_PARSER_V12; + } else { + index = PROG_KAFKA_RESPONSE_PARTITION_PARSER_V0; + } + break; + case KAFKA_PRODUCE: + if (api_version >= 9) { + index = PROG_KAFKA_PRODUCE_RESPONSE_PARTITION_PARSER_V9; + } else { + index = PROG_KAFKA_PRODUCE_RESPONSE_PARTITION_PARSER_V0; + } + break; + default: + // Shouldn't happen + return; } - break; } bpf_tail_call_compat(ctx, &tls_process_progs, index); break; @@ -1053,15 +1190,20 @@ static __always_inline enum parse_result kafka_continue_parse_response(void *ctx pktbuf_t pkt, u32 offset, u32 data_end, enum parser_level level, - u32 api_version) + u32 api_version, + u32 api_key) { - enum parse_result ret; + enum parse_result ret = 0; if (level == PARSER_LEVEL_PARTITION) { response->record_batches_arrays_count = 0; response->record_batches_arrays_idx = 0; - ret = kafka_continue_parse_response_partition_loop(kafka, tup, response, pkt, offset, data_end, api_version); + if (api_key == KAFKA_PRODUCE) { + ret = kafka_continue_parse_response_partition_loop_produce(kafka, tup, response, pkt, offset, data_end, api_version); + } else if (api_key == KAFKA_FETCH) { + ret = kafka_continue_parse_response_partition_loop_fetch(kafka, tup, response, pkt, offset, data_end, api_version); + } extra_debug("partition loop ret %d record_batches_array_count %u partitions_count %u", ret, response->record_batches_arrays_count, response->partitions_count); // If we have parsed any record batches arrays (message sets), then @@ -1083,7 +1225,7 @@ static __always_inline enum parse_result kafka_continue_parse_response(void *ctx } if (ret == RET_DONE) { - extra_debug("enqueue, records_count %d", response->transaction.records_count); + extra_debug("enqueue, records_count %d, error_code %d", response->transaction.records_count, response->transaction.error_code); kafka_batch_enqueue_wrapper(kafka, tup, &response->transaction); return ret; } @@ -1194,26 +1336,33 @@ static __always_inline enum parse_result kafka_continue_parse_response(void *ctx } static __always_inline void kafka_response_parser(kafka_info_t *kafka, void *ctx, conn_tuple_t *tup, pktbuf_t pkt, -enum parser_level level, u32 min_api_version, u32 max_api_version) { +enum parser_level level, u32 min_api_version, u32 max_api_version, u32 target_api_key) { kafka_response_context_t *response = bpf_map_lookup_elem(&kafka_response, tup); if (!response) { return; } u32 api_version = response->transaction.request_api_version; + u32 api_key = response->transaction.request_api_key; + if (api_version < min_api_version || api_version > max_api_version) { // Should never happen. This check is there to inform the compiler about // the bounds of api_version so that it can optimize away branches for versions // outside the range at compile time. return; } + if (api_key != target_api_key) { + // Should never happen. This check is there to inform the compiler about + // the target_api_key so that it can optimize away branches for other keys + return; + } u32 data_off = pktbuf_data_offset(pkt); u32 data_end = pktbuf_data_end(pkt); enum parse_result result = kafka_continue_parse_response(ctx, kafka, tup, response, pkt, data_off, data_end, level, - api_version); + api_version, target_api_key); switch (result) { case RET_EOP: // This packet parsed successfully but more data needed, nothing @@ -1231,7 +1380,7 @@ enum parser_level level, u32 min_api_version, u32 max_api_version) { case RET_LOOP_END: // We ran out of iterations in the loop, but we're not done // processing this packet, so continue in a self tail call. - kafka_call_response_parser(ctx, tup, pkt, response->state, response->transaction.request_api_version); + kafka_call_response_parser(ctx, tup, pkt, response->state, response->transaction.request_api_version, response->transaction.request_api_key); // If we failed (due to exceeding tail calls), at least flush what // we have. @@ -1243,7 +1392,7 @@ enum parser_level level, u32 min_api_version, u32 max_api_version) { } } -static __always_inline int __socket__kafka_response_parser(struct __sk_buff *skb, enum parser_level level, u32 min_api_version, u32 max_api_version) { +static __always_inline int __socket__kafka_response_parser(struct __sk_buff *skb, enum parser_level level, u32 min_api_version, u32 max_api_version, u32 target_api_key) { const __u32 zero = 0; kafka_info_t *kafka = bpf_map_lookup_elem(&kafka_heap, &zero); if (kafka == NULL) { @@ -1256,32 +1405,43 @@ static __always_inline int __socket__kafka_response_parser(struct __sk_buff *skb return 0; } - kafka_response_parser(kafka, skb, &tup, pktbuf_from_skb(skb, &skb_info), level, min_api_version, max_api_version); + kafka_response_parser(kafka, skb, &tup, pktbuf_from_skb(skb, &skb_info), level, min_api_version, max_api_version, target_api_key); return 0; } SEC("socket/kafka_response_partition_parser_v0") int socket__kafka_response_partition_parser_v0(struct __sk_buff *skb) { - return __socket__kafka_response_parser(skb, PARSER_LEVEL_PARTITION, 0, 11); + return __socket__kafka_response_parser(skb, PARSER_LEVEL_PARTITION, 0, 11, KAFKA_FETCH); } SEC("socket/kafka_response_partition_parser_v12") int socket__kafka_response_partition_parser_v12(struct __sk_buff *skb) { - return __socket__kafka_response_parser(skb, PARSER_LEVEL_PARTITION, 12, 12); + return __socket__kafka_response_parser(skb, PARSER_LEVEL_PARTITION, 12, 12, KAFKA_FETCH); } SEC("socket/kafka_response_record_batch_parser_v0") int socket__kafka_response_record_batch_parser_v0(struct __sk_buff *skb) { - return __socket__kafka_response_parser(skb, PARSER_LEVEL_RECORD_BATCH, 0, 11); + return __socket__kafka_response_parser(skb, PARSER_LEVEL_RECORD_BATCH, 0, 11, KAFKA_FETCH); } SEC("socket/kafka_response_record_batch_parser_v12") int socket__kafka_response_record_batch_parser_v12(struct __sk_buff *skb) { - return __socket__kafka_response_parser(skb, PARSER_LEVEL_RECORD_BATCH, 12, 12); + return __socket__kafka_response_parser(skb, PARSER_LEVEL_RECORD_BATCH, 12, 12, KAFKA_FETCH); +} + +SEC("socket/kafka_produce_response_partition_parser_v0") +int socket__kafka_produce_response_partition_parser_v0(struct __sk_buff *skb) { + return __socket__kafka_response_parser(skb, PARSER_LEVEL_PARTITION, 0, 8, KAFKA_PRODUCE); } -static __always_inline int __uprobe__kafka_tls_response_parser(struct pt_regs *ctx, enum parser_level level, u32 min_api_version, u32 max_api_version) { +SEC("socket/kafka_produce_response_partition_parser_v9") +int socket__kafka_produce_response_partition_parser_v9(struct __sk_buff *skb) { + return __socket__kafka_response_parser(skb, PARSER_LEVEL_PARTITION, 9, 11, KAFKA_PRODUCE); +} + + +static __always_inline int __uprobe__kafka_tls_response_parser(struct pt_regs *ctx, enum parser_level level, u32 min_api_version, u32 max_api_version, u32 target_api_key) { const __u32 zero = 0; kafka_info_t *kafka = bpf_map_lookup_elem(&kafka_heap, &zero); if (kafka == NULL) { @@ -1295,29 +1455,39 @@ static __always_inline int __uprobe__kafka_tls_response_parser(struct pt_regs *c // Put tuple on stack for 4.14. conn_tuple_t tup = args->tup; - kafka_response_parser(kafka, ctx, &tup, pktbuf_from_tls(ctx, args), level, min_api_version, max_api_version); + kafka_response_parser(kafka, ctx, &tup, pktbuf_from_tls(ctx, args), level, min_api_version, max_api_version, target_api_key); return 0; } SEC("uprobe/kafka_tls_response_partition_parser_v0") int uprobe__kafka_tls_response_partition_parser_v0(struct pt_regs *ctx) { - return __uprobe__kafka_tls_response_parser(ctx, PARSER_LEVEL_PARTITION, 0, 11); + return __uprobe__kafka_tls_response_parser(ctx, PARSER_LEVEL_PARTITION, 0, 11, KAFKA_FETCH); } SEC("uprobe/kafka_tls_response_partition_parser_v12") int uprobe__kafka_tls_response_partition_parser_v12(struct pt_regs *ctx) { - return __uprobe__kafka_tls_response_parser(ctx, PARSER_LEVEL_PARTITION, 12, 12); + return __uprobe__kafka_tls_response_parser(ctx, PARSER_LEVEL_PARTITION, 12, 12, KAFKA_FETCH); } SEC("uprobe/kafka_tls_response_record_batch_parser_v0") int uprobe__kafka_tls_response_record_batch_parser_v0(struct pt_regs *ctx) { - return __uprobe__kafka_tls_response_parser(ctx, PARSER_LEVEL_RECORD_BATCH, 0, 11); + return __uprobe__kafka_tls_response_parser(ctx, PARSER_LEVEL_RECORD_BATCH, 0, 11, KAFKA_FETCH); } SEC("uprobe/kafka_tls_response_record_batch_parser_v12") int uprobe__kafka_tls_response_record_batch_parser_v12(struct pt_regs *ctx) { - return __uprobe__kafka_tls_response_parser(ctx, PARSER_LEVEL_RECORD_BATCH, 12, 12); + return __uprobe__kafka_tls_response_parser(ctx, PARSER_LEVEL_RECORD_BATCH, 12, 12, KAFKA_FETCH); +} + +SEC("uprobe/kafka_tls_produce_response_partition_parser_v0") +int uprobe__kafka_tls_produce_response_partition_parser_v0(struct pt_regs *ctx) { + return __uprobe__kafka_tls_response_parser(ctx, PARSER_LEVEL_PARTITION, 0, 8, KAFKA_PRODUCE); +} + +SEC("uprobe/kafka_tls_produce_response_partition_parser_v9") +int uprobe__kafka_tls_produce_response_partition_parser_v9(struct pt_regs *ctx) { + return __uprobe__kafka_tls_response_parser(ctx, PARSER_LEVEL_PARTITION, 9, 11, KAFKA_PRODUCE); } // Gets the next expected TCP sequence in the stream, assuming @@ -1386,7 +1556,13 @@ static __always_inline bool kafka_process_new_response(void *ctx, conn_tuple_t * kafka->response.transaction = *request; bpf_map_delete_elem(&kafka_in_flight, &key); - kafka->response.state = KAFKA_FETCH_RESPONSE_START; + if (request->request_api_key == KAFKA_FETCH) { + kafka->response.state = KAFKA_FETCH_RESPONSE_START; + } else if (request->request_api_key == KAFKA_PRODUCE) { + kafka->response.state = KAFKA_PRODUCE_RESPONSE_START; + } else { + return false; + } kafka->response.carry_over_offset = offset - orig_offset; kafka->response.expected_tcp_seq = kafka_get_next_tcp_seq(skb_info); kafka->response.transaction.response_last_seen = bpf_ktime_get_ns(); @@ -1397,7 +1573,7 @@ static __always_inline bool kafka_process_new_response(void *ctx, conn_tuple_t * bpf_map_update_elem(&kafka_response, tup, &response_ctx, BPF_ANY); - kafka_call_response_parser(ctx, tup, pkt, KAFKA_FETCH_RESPONSE_START, kafka->response.transaction.request_api_version); + kafka_call_response_parser(ctx, tup, pkt, KAFKA_FETCH_RESPONSE_START, kafka->response.transaction.request_api_version, kafka->response.transaction.request_api_key); return true; } @@ -1407,7 +1583,7 @@ static __always_inline bool kafka_process_response(void *ctx, conn_tuple_t *tup, response->transaction.response_last_seen = bpf_ktime_get_ns(); if (!skb_info || skb_info->tcp_seq == response->expected_tcp_seq) { response->expected_tcp_seq = kafka_get_next_tcp_seq(skb_info); - kafka_call_response_parser(ctx, tup, pkt, response->state, response->transaction.request_api_version); + kafka_call_response_parser(ctx, tup, pkt, response->state, response->transaction.request_api_version, response->transaction.request_api_key); // It's on the response path, so no need to parser as a request. return true; } @@ -1491,11 +1667,15 @@ static __always_inline bool kafka_process(conn_tuple_t *tup, kafka_info_t *kafka bool flexible = false; + s16 produce_required_acks = 0; switch (kafka_header.api_key) { case KAFKA_PRODUCE: - if (!get_topic_offset_from_produce_request(&kafka_header, pkt, &offset)) { + if (!get_topic_offset_from_produce_request(&kafka_header, pkt, &offset, &produce_required_acks)) { return false; } + if (produce_required_acks == 0) { + __sync_fetch_and_add(&kafka_tel->produce_no_required_acks, 1); + } flexible = kafka_header.api_version >= 9; break; case KAFKA_FETCH: @@ -1592,6 +1772,7 @@ static __always_inline bool kafka_process(conn_tuple_t *tup, kafka_info_t *kafka log_debug("Got number of Kafka produce records <= 0"); return false; } + // We now know the record count, but we'll have to wait for the response to obtain the error code and latency kafka_transaction->records_count = records_count; break; } @@ -1603,21 +1784,22 @@ static __always_inline bool kafka_process(conn_tuple_t *tup, kafka_info_t *kafka return false; } - if (kafka_header.api_key == KAFKA_FETCH) { - // Copy to stack required by 4.14 verifier. - kafka_transaction_t transaction; - kafka_transaction_key_t key; - bpf_memset(&key, 0, sizeof(key)); - bpf_memcpy(&transaction, kafka_transaction, sizeof(transaction)); - key.correlation_id = kafka_header.correlation_id; - bpf_memcpy(&key.tuple, tup, sizeof(key.tuple)); - // Flip the tuple for the response path. - flip_tuple(&key.tuple); - bpf_map_update_elem(&kafka_in_flight, &key, &transaction, BPF_NOEXIST); + if (kafka_header.api_key == KAFKA_PRODUCE && produce_required_acks == 0) { + // If we have a produce request with required acks set to 0, we can enqueue it immediately, as there will be no produce response. + kafka_batch_enqueue_wrapper(kafka, tup, kafka_transaction); return true; } - kafka_batch_enqueue_wrapper(kafka, tup, kafka_transaction); + // Copy to stack required by 4.14 verifier. + kafka_transaction_t transaction; + kafka_transaction_key_t key; + bpf_memset(&key, 0, sizeof(key)); + bpf_memcpy(&transaction, kafka_transaction, sizeof(transaction)); + key.correlation_id = kafka_header.correlation_id; + bpf_memcpy(&key.tuple, tup, sizeof(key.tuple)); + // Flip the tuple for the response path. + flip_tuple(&key.tuple); + bpf_map_update_elem(&kafka_in_flight, &key, &transaction, BPF_NOEXIST); return true; } diff --git a/pkg/network/ebpf/c/protocols/kafka/types.h b/pkg/network/ebpf/c/protocols/kafka/types.h index a1729180174d8..af3a9e82de9c1 100644 --- a/pkg/network/ebpf/c/protocols/kafka/types.h +++ b/pkg/network/ebpf/c/protocols/kafka/types.h @@ -60,6 +60,13 @@ typedef enum { KAFKA_FETCH_RESPONSE_RECORD_BATCHES_ARRAY_END, KAFKA_FETCH_RESPONSE_PARTITION_TAGGED_FIELDS, KAFKA_FETCH_RESPONSE_PARTITION_END, + + KAFKA_PRODUCE_RESPONSE_START, + KAFKA_PRODUCE_RESPONSE_NUM_TOPICS, + KAFKA_PRODUCE_RESPONSE_TOPIC_NAME_SIZE, + KAFKA_PRODUCE_RESPONSE_NUM_PARTITIONS, + KAFKA_PRODUCE_RESPONSE_PARTITION_START, + KAFKA_PRODUCE_RESPONSE_PARTITION_ERROR_CODE_START, } __attribute__ ((packed)) kafka_response_state; typedef struct kafka_fetch_response_record_batches_array_t { @@ -113,6 +120,7 @@ typedef struct kafka_info_t { typedef struct { // The array topic_name_size_buckets maps a bucket index to the number of occurrences observed for topic name lengths __u64 topic_name_size_buckets[KAFKA_TELEMETRY_TOPIC_NAME_NUM_OF_BUCKETS]; + __u64 produce_no_required_acks; } kafka_telemetry_t; #endif diff --git a/pkg/network/protocols/ebpf_types.go b/pkg/network/protocols/ebpf_types.go index 64ab3ae132ba7..ab293e741290b 100644 --- a/pkg/network/protocols/ebpf_types.go +++ b/pkg/network/protocols/ebpf_types.go @@ -55,6 +55,10 @@ const ( ProgramKafkaResponseRecordBatchParserV0 ProgramType = C.PROG_KAFKA_RESPONSE_RECORD_BATCH_PARSER_V0 // ProgramKafkaResponseRecordBatchParserV12 is the Golang representation of the C.PROG_KAFKA_RESPONSE_RECORD_BATCH_PARSER_v0 enum ProgramKafkaResponseRecordBatchParserV12 ProgramType = C.PROG_KAFKA_RESPONSE_RECORD_BATCH_PARSER_V12 + // ProgramKafkaProduceResponsePartitionParserV0 is the Golang representation of the C.PROG_KAFKA_RESPONSE_PARTITION_PARSER_v0 enum + ProgramKafkaProduceResponsePartitionParserV0 ProgramType = C.PROG_KAFKA_PRODUCE_RESPONSE_PARTITION_PARSER_V0 + // ProgramKafkaProduceResponsePartitionParserV9 is the Golang representation of the C.PROG_KAFKA_RESPONSE_PARTITION_PARSER_v0 enum + ProgramKafkaProduceResponsePartitionParserV9 ProgramType = C.PROG_KAFKA_PRODUCE_RESPONSE_PARTITION_PARSER_V9 // ProgramKafkaTermination is tail call to process Kafka termination. ProgramKafkaTermination ProgramType = C.PROG_KAFKA_TERMINATION // ProgramPostgres is the Golang representation of the C.PROG_POSTGRES enum diff --git a/pkg/network/protocols/ebpf_types_linux.go b/pkg/network/protocols/ebpf_types_linux.go index 43ab21bee8667..24e84cd1b3511 100644 --- a/pkg/network/protocols/ebpf_types_linux.go +++ b/pkg/network/protocols/ebpf_types_linux.go @@ -44,17 +44,21 @@ const ( ProgramKafkaResponseRecordBatchParserV12 ProgramType = 0xd - ProgramKafkaTermination ProgramType = 0xe + ProgramKafkaProduceResponsePartitionParserV0 ProgramType = 0xe - ProgramPostgres ProgramType = 0x10 + ProgramKafkaProduceResponsePartitionParserV9 ProgramType = 0xf - ProgramPostgresParseMessage ProgramType = 0x11 + ProgramKafkaTermination ProgramType = 0x10 - ProgramPostgresTermination ProgramType = 0x12 + ProgramPostgres ProgramType = 0x12 - ProgramRedis ProgramType = 0x13 + ProgramPostgresParseMessage ProgramType = 0x13 - ProgramRedisTermination ProgramType = 0x14 + ProgramPostgresTermination ProgramType = 0x14 + + ProgramRedis ProgramType = 0x15 + + ProgramRedisTermination ProgramType = 0x16 ) type ebpfProtocolType uint16 diff --git a/pkg/network/protocols/kafka/kernel_telemetry.go b/pkg/network/protocols/kafka/kernel_telemetry.go index 034cd2a6cb59a..d607bb0032d51 100644 --- a/pkg/network/protocols/kafka/kernel_telemetry.go +++ b/pkg/network/protocols/kafka/kernel_telemetry.go @@ -20,6 +20,9 @@ type kernelTelemetry struct { // pathSizeBucket Count of topic names sizes divided into buckets. pathSizeBucket [TopicNameBuckets]*libtelemetry.Counter + // produceNoRequiredAcks is the number of produce requests that did not require any acks. + produceNoRequiredAcks *libtelemetry.Counter + // telemetryLastState represents the latest Kafka eBPF Kernel telemetry observed from the kernel telemetryLastState RawKernelTelemetry } @@ -34,6 +37,8 @@ func newKernelTelemetry() *kernelTelemetry { kafkaKernelTel.pathSizeBucket[bucketIndex] = metricGroup.NewCounter("path_size_bucket_" + (strconv.Itoa(bucketIndex + 1))) } + kafkaKernelTel.produceNoRequiredAcks = metricGroup.NewCounter("produce_no_required_acks") + return kafkaKernelTel } @@ -42,8 +47,9 @@ func (t *kernelTelemetry) update(tel *RawKernelTelemetry) { // We should only add the delta between the current eBPF map state and the last seen eBPF map state telemetryDelta := tel.Sub(t.telemetryLastState) for bucketIndex := range t.pathSizeBucket { - t.pathSizeBucket[bucketIndex].Add(int64(telemetryDelta.Name_size_buckets[bucketIndex])) + t.pathSizeBucket[bucketIndex].Add(int64(telemetryDelta.Topic_name_size_buckets[bucketIndex])) } + t.produceNoRequiredAcks.Add(int64(telemetryDelta.Produce_no_required_acks)) // Create a deep copy of the 'tel' parameter to prevent changes from the outer scope affecting the last state t.telemetryLastState = *tel } @@ -51,7 +57,8 @@ func (t *kernelTelemetry) update(tel *RawKernelTelemetry) { // Sub generates a new RawKernelTelemetry object by subtracting the values of this RawKernelTelemetry object from the other func (t *RawKernelTelemetry) Sub(other RawKernelTelemetry) *RawKernelTelemetry { return &RawKernelTelemetry{ - Name_size_buckets: computePathSizeBucketDifferences(t.Name_size_buckets, other.Name_size_buckets), + Topic_name_size_buckets: computePathSizeBucketDifferences(t.Topic_name_size_buckets, other.Topic_name_size_buckets), + Produce_no_required_acks: t.Produce_no_required_acks - other.Produce_no_required_acks, } } diff --git a/pkg/network/protocols/kafka/model_linux.go b/pkg/network/protocols/kafka/model_linux.go index 25b7c738469c4..225e2120acc19 100644 --- a/pkg/network/protocols/kafka/model_linux.go +++ b/pkg/network/protocols/kafka/model_linux.go @@ -70,7 +70,8 @@ RawKernelTelemetry{ "in range [81, 90]": %d, "in range [91, 255]": %d, } -}`, t.Name_size_buckets[0], t.Name_size_buckets[1], t.Name_size_buckets[2], t.Name_size_buckets[3], - t.Name_size_buckets[4], t.Name_size_buckets[5], t.Name_size_buckets[6], t.Name_size_buckets[7], - t.Name_size_buckets[8], t.Name_size_buckets[9]) + "produce no required acks": %d, +}`, t.Topic_name_size_buckets[0], t.Topic_name_size_buckets[1], t.Topic_name_size_buckets[2], t.Topic_name_size_buckets[3], + t.Topic_name_size_buckets[4], t.Topic_name_size_buckets[5], t.Topic_name_size_buckets[6], t.Topic_name_size_buckets[7], + t.Topic_name_size_buckets[8], t.Topic_name_size_buckets[9], t.Produce_no_required_acks) } diff --git a/pkg/network/protocols/kafka/protocol.go b/pkg/network/protocols/kafka/protocol.go index 85e94c778004d..542d97367e9ce 100644 --- a/pkg/network/protocols/kafka/protocol.go +++ b/pkg/network/protocols/kafka/protocol.go @@ -40,10 +40,12 @@ const ( eventStreamName = "kafka" filterTailCall = "socket__kafka_filter" - responsePartitionParserV0TailCall = "socket__kafka_response_partition_parser_v0" - responsePartitionParserV12TailCall = "socket__kafka_response_partition_parser_v12" - responseRecordBatchParserV0TailCall = "socket__kafka_response_record_batch_parser_v0" - responseRecordBatchParserV12TailCall = "socket__kafka_response_record_batch_parser_v12" + responsePartitionParserV0TailCall = "socket__kafka_response_partition_parser_v0" + responsePartitionParserV12TailCall = "socket__kafka_response_partition_parser_v12" + responseRecordBatchParserV0TailCall = "socket__kafka_response_record_batch_parser_v0" + responseRecordBatchParserV12TailCall = "socket__kafka_response_record_batch_parser_v12" + ProduceResponsePartitionParserV0TailCall = "socket__kafka_produce_response_partition_parser_v0" + ProduceResponsePartitionParserV9TailCall = "socket__kafka_produce_response_partition_parser_v9" dispatcherTailCall = "socket__protocol_dispatcher_kafka" kafkaHeapMap = "kafka_heap" @@ -53,10 +55,12 @@ const ( tlsFilterTailCall = "uprobe__kafka_tls_filter" - tlsResponsePartitionParserV0TailCall = "uprobe__kafka_tls_response_partition_parser_v0" - tlsResponsePartitionParserV12TailCall = "uprobe__kafka_tls_response_partition_parser_v12" - tlsResponseRecordBatchParserV0TailCall = "uprobe__kafka_tls_response_record_batch_parser_v0" - tlsResponseRecordBatchParserV12TailCall = "uprobe__kafka_tls_response_record_batch_parser_v12" + tlsResponsePartitionParserV0TailCall = "uprobe__kafka_tls_response_partition_parser_v0" + tlsResponsePartitionParserV12TailCall = "uprobe__kafka_tls_response_partition_parser_v12" + tlsResponseRecordBatchParserV0TailCall = "uprobe__kafka_tls_response_record_batch_parser_v0" + tlsResponseRecordBatchParserV12TailCall = "uprobe__kafka_tls_response_record_batch_parser_v12" + tlsProduceResponsePartitionParserV0TailCall = "uprobe__kafka_tls_produce_response_partition_parser_v0" + tlsProduceResponsePartitionParserV9TailCall = "uprobe__kafka_tls_produce_response_partition_parser_v9" tlsTerminationTailCall = "uprobe__kafka_tls_termination" tlsDispatcherTailCall = "uprobe__tls_protocol_dispatcher_kafka" @@ -132,6 +136,20 @@ var Spec = &protocols.ProtocolSpec{ EBPFFuncName: responseRecordBatchParserV12TailCall, }, }, + { + ProgArrayName: protocols.ProtocolDispatcherProgramsMap, + Key: uint32(protocols.ProgramKafkaProduceResponsePartitionParserV0), + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + EBPFFuncName: ProduceResponsePartitionParserV0TailCall, + }, + }, + { + ProgArrayName: protocols.ProtocolDispatcherProgramsMap, + Key: uint32(protocols.ProgramKafkaProduceResponsePartitionParserV9), + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + EBPFFuncName: ProduceResponsePartitionParserV9TailCall, + }, + }, { ProgArrayName: protocols.ProtocolDispatcherClassificationPrograms, Key: uint32(protocols.DispatcherKafkaProg), @@ -174,6 +192,20 @@ var Spec = &protocols.ProtocolSpec{ EBPFFuncName: tlsResponseRecordBatchParserV12TailCall, }, }, + { + ProgArrayName: protocols.TLSDispatcherProgramsMap, + Key: uint32(protocols.ProgramKafkaProduceResponsePartitionParserV0), + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + EBPFFuncName: tlsProduceResponsePartitionParserV0TailCall, + }, + }, + { + ProgArrayName: protocols.TLSDispatcherProgramsMap, + Key: uint32(protocols.ProgramKafkaProduceResponsePartitionParserV9), + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + EBPFFuncName: tlsProduceResponsePartitionParserV9TailCall, + }, + }, { ProgArrayName: protocols.TLSDispatcherProgramsMap, Key: uint32(protocols.ProgramKafkaTermination), diff --git a/pkg/network/protocols/kafka/statkeeper.go b/pkg/network/protocols/kafka/statkeeper.go index ccd7d0e6416c4..3ef8895d4c201 100644 --- a/pkg/network/protocols/kafka/statkeeper.go +++ b/pkg/network/protocols/kafka/statkeeper.go @@ -58,7 +58,7 @@ func (statKeeper *StatKeeper) Process(tx *EbpfTx) { } latency := tx.RequestLatency() - // Currently, we only support measuring latency for fetch operations + // Produce requests with acks = 0 do not receive a response, and as a result, have no latency if key.RequestAPIKey == FetchAPIKey && latency <= 0 { statKeeper.telemetry.invalidLatency.Add(1) return diff --git a/pkg/network/protocols/kafka/types_linux.go b/pkg/network/protocols/kafka/types_linux.go index cf33022e3dd2f..ca225a50ec3a5 100644 --- a/pkg/network/protocols/kafka/types_linux.go +++ b/pkg/network/protocols/kafka/types_linux.go @@ -63,5 +63,6 @@ type KafkaResponseContext struct { } type RawKernelTelemetry struct { - Name_size_buckets [10]uint64 + Topic_name_size_buckets [10]uint64 + Produce_no_required_acks uint64 } diff --git a/pkg/network/usm/kafka_monitor_test.go b/pkg/network/usm/kafka_monitor_test.go index 2862a8600c89b..bbcedaecb2be6 100644 --- a/pkg/network/usm/kafka_monitor_test.go +++ b/pkg/network/usm/kafka_monitor_test.go @@ -94,6 +94,11 @@ type kafkaParsingValidationWithErrorCodes struct { expectedAPIVersionFetch int } +type groupInfo struct { + numSets int + msgs []Message +} + func skipTestIfKernelNotSupported(t *testing.T) { currKernelVersion, err := kernel.HostVersion() require.NoError(t, err) @@ -500,15 +505,15 @@ func (s *KafkaProtocolParsingSuite) testKafkaProtocolParsing(t *testing.T, tls b // Ensure that the other buckets remain unchanged before verifying the expected bucket. for idx := 0; idx < kafka.TopicNameBuckets; idx++ { if idx != tt.expectedBucketIndex { - require.Equal(t, currentRawKernelTelemetry.Name_size_buckets[idx], - telemetryMap.Name_size_buckets[idx], + require.Equal(t, currentRawKernelTelemetry.Topic_name_size_buckets[idx], + telemetryMap.Topic_name_size_buckets[idx], "Expected bucket (%d) to remain unchanged", idx) } } // Verify that the expected bucket contains the correct number of occurrences. expectedNumberOfOccurrences := fixCount(2) // (1 produce request + 1 fetch request) - return uint64(expectedNumberOfOccurrences)+currentRawKernelTelemetry.Name_size_buckets[tt.expectedBucketIndex] == telemetryMap.Name_size_buckets[tt.expectedBucketIndex] + return uint64(expectedNumberOfOccurrences)+currentRawKernelTelemetry.Topic_name_size_buckets[tt.expectedBucketIndex] == telemetryMap.Topic_name_size_buckets[tt.expectedBucketIndex] }, time.Second*3, time.Millisecond*100) // Update the current raw kernel telemetry for the next iteration @@ -616,7 +621,7 @@ func appendUint32(dst []byte, u uint32) []byte { // kmsg doesn't have a ResponseFormatter so we need to add the length // and the correlation Id ourselves. -func appendResponse(dst []byte, response kmsg.FetchResponse, correlationID uint32) []byte { +func appendResponse(dst []byte, response kmsg.Response, correlationID uint32) []byte { var data []byte data = response.AppendTo(data) @@ -645,9 +650,9 @@ type Message struct { response []byte } -func appendMessages(messages []Message, correlationID int, req kmsg.FetchRequest, resp kmsg.FetchResponse) []Message { +func appendMessages(messages []Message, correlationID int, req kmsg.Request, resp kmsg.Response) []Message { formatter := kmsg.NewRequestFormatter(kmsg.FormatterClientID("kgo")) - data := formatter.AppendRequest(make([]byte, 0), &req, int32(correlationID)) + data := formatter.AppendRequest(make([]byte, 0), req, int32(correlationID)) respData := appendResponse(make([]byte, 0), resp, uint32(correlationID)) return append(messages, @@ -932,7 +937,7 @@ func testKafkaFetchRaw(t *testing.T, tls bool, apiVersion int) { formatter := kmsg.NewRequestFormatter(kmsg.FormatterClientID("kgo")) var msgs []Message reqData := formatter.AppendRequest(make([]byte, 0), &req, int32(55)) - respData := appendResponse(make([]byte, 0), resp, uint32(55)) + respData := appendResponse(make([]byte, 0), &resp, uint32(55)) msgs = append(msgs, Message{request: reqData}) msgs = append(msgs, Message{response: respData[0:4]}) @@ -955,7 +960,7 @@ func testKafkaFetchRaw(t *testing.T, tls bool, apiVersion int) { formatter := kmsg.NewRequestFormatter(kmsg.FormatterClientID("kgo")) var msgs []Message reqData := formatter.AppendRequest(make([]byte, 0), &req, int32(55)) - respData := appendResponse(make([]byte, 0), resp, uint32(55)) + respData := appendResponse(make([]byte, 0), &resp, uint32(55)) msgs = append(msgs, Message{request: reqData}) msgs = append(msgs, Message{response: respData[0:8]}) @@ -978,7 +983,7 @@ func testKafkaFetchRaw(t *testing.T, tls bool, apiVersion int) { formatter := kmsg.NewRequestFormatter(kmsg.FormatterClientID("kgo")) var msgs []Message reqData := formatter.AppendRequest(make([]byte, 0), &req, int32(55)) - respData := appendResponse(make([]byte, 0), resp, uint32(55)) + respData := appendResponse(make([]byte, 0), &resp, uint32(55)) msgs = append(msgs, Message{request: reqData}) msgs = append(msgs, Message{response: respData[0:4]}) @@ -1150,7 +1155,7 @@ func testKafkaFetchRaw(t *testing.T, tls bool, apiVersion int) { var msgs []Message if tt.buildMessages == nil { - msgs = appendMessages(msgs, 99, req, resp) + msgs = appendMessages(msgs, 99, &req, &resp) } else { msgs = tt.buildMessages(req, resp) } @@ -1193,54 +1198,7 @@ func testKafkaFetchRaw(t *testing.T, tls bool, apiVersion int) { formatter := kmsg.NewRequestFormatter(kmsg.FormatterClientID("kgo")) - type groupInfo struct { - numSets int - msgs []Message - } - - var groups []groupInfo - var info groupInfo - - for splitIdx := 0; splitIdx < 500; splitIdx++ { - reqData := formatter.AppendRequest(make([]byte, 0), &req, int32(splitIdx)) - respData := appendResponse(make([]byte, 0), resp, uint32(splitIdx)) - - // There is an assumption in the code that there are no splits - // inside the header. - minSegSize := 8 - - segSize := min(minSegSize+splitIdx, len(respData)) - if segSize >= len(respData) { - break - } - - var msgs []Message - msgs = append(msgs, Message{request: reqData}) - msgs = append(msgs, Message{response: respData[0:segSize]}) - - if segSize+8 >= len(respData) { - msgs = append(msgs, Message{response: respData[segSize:]}) - } else { - // Three segments tests other code paths than two, for example - // it will fail if the tcp_seq is not updated in the response - // parsing continuation path. - msgs = append(msgs, Message{response: respData[segSize : segSize+8]}) - msgs = append(msgs, Message{response: respData[segSize+8:]}) - } - - if info.numSets >= 20 { - groups = append(groups, info) - info.numSets = 0 - info.msgs = make([]Message, 0) - } - - info.numSets++ - info.msgs = append(info.msgs, msgs...) - } - - if info.numSets > 0 { - groups = append(groups, info) - } + groups := getSplitGroups(&req, &resp, formatter) for groupIdx, group := range groups { name := fmt.Sprintf("split/%s/group%d", tt.name, groupIdx) @@ -1303,7 +1261,9 @@ func testKafkaProduceRaw(t *testing.T, tls bool, apiVersion int) { name string topic string buildRequest func(string) kmsg.ProduceRequest + buildResponse func(string) kmsg.ProduceResponse numProducedRecords int + errorCode int32 }{ { name: "basic", @@ -1323,6 +1283,7 @@ func testKafkaProduceRaw(t *testing.T, tls bool, apiVersion int) { req := kmsg.NewProduceRequest() req.SetVersion(int16(apiVersion)) + req.Acks = 1 // Leader Ack transactionID := "transaction-id" req.TransactionID = &transactionID req.TimeoutMillis = 99999999 @@ -1330,8 +1291,70 @@ func testKafkaProduceRaw(t *testing.T, tls bool, apiVersion int) { return req }, + buildResponse: func(topic string) kmsg.ProduceResponse { + partition := kmsg.NewProduceResponseTopicPartition() + + var partitions []kmsg.ProduceResponseTopicPartition + partitions = append(partitions, partition) + + topics := kmsg.NewProduceResponseTopic() + topics.Topic = topic + topics.Partitions = append(topics.Partitions, partitions...) + + resp := kmsg.NewProduceResponse() + resp.SetVersion(int16(apiVersion)) + resp.ThrottleMillis = 999999999 + resp.Topics = append(resp.Topics, topics) + return resp + }, numProducedRecords: 2, }, + { + name: "with error code", + topic: "test-topic-error", + buildRequest: func(topic string) kmsg.ProduceRequest { + // Make record batch over 16KiB for larger varint size + record := makeRecordWithVal(make([]byte, 10000)) + records := []kmsg.Record{record, record} + recordBatch := makeRecordBatch(records...) + + partition := kmsg.NewProduceRequestTopicPartition() + partition.Records = recordBatch.AppendTo(partition.Records) + + reqTopic := kmsg.NewProduceRequestTopic() + reqTopic.Partitions = append(reqTopic.Partitions, partition) + reqTopic.Topic = topic + + req := kmsg.NewProduceRequest() + req.SetVersion(int16(apiVersion)) + req.Acks = -1 // All ISR Acks + transactionID := "transaction-id" + req.TransactionID = &transactionID + req.TimeoutMillis = 99999999 + req.Topics = append(req.Topics, reqTopic) + + return req + }, + buildResponse: func(topic string) kmsg.ProduceResponse { + partition := kmsg.NewProduceResponseTopicPartition() + partition.ErrorCode = 1 + + var partitions []kmsg.ProduceResponseTopicPartition + partitions = append(partitions, partition) + + topics := kmsg.NewProduceResponseTopic() + topics.Topic = topic + topics.Partitions = append(topics.Partitions, partitions...) + + resp := kmsg.NewProduceResponse() + resp.SetVersion(int16(apiVersion)) + resp.ThrottleMillis = 999999999 + resp.Topics = append(resp.Topics, topics) + return resp + }, + numProducedRecords: 2, + errorCode: 1, + }, } can := newCannedClientServer(t, tls) @@ -1349,9 +1372,9 @@ func testKafkaProduceRaw(t *testing.T, tls bool, apiVersion int) { cleanProtocolMaps(t, "kafka", monitor.ebpfProgram.Manager.Manager) }) req := tt.buildRequest(tt.topic) - formatter := kmsg.NewRequestFormatter(kmsg.FormatterClientID("kgo")) - data := formatter.AppendRequest(make([]byte, 0), &req, int32(99)) - msgs := []Message{{request: data}} + var msgs []Message + resp := tt.buildResponse(tt.topic) + msgs = appendMessages(msgs, 99, &req, &resp) can.runClient(msgs) @@ -1359,9 +1382,79 @@ func testKafkaProduceRaw(t *testing.T, tls bool, apiVersion int) { expectedNumberOfProduceRequests: tt.numProducedRecords, expectedAPIVersionProduce: apiVersion, tlsEnabled: tls, - }, kafkaSuccessErrorCode) + }, tt.errorCode) }) + + req := tt.buildRequest(tt.topic) + resp := tt.buildResponse(tt.topic) + formatter := kmsg.NewRequestFormatter(kmsg.FormatterClientID("kgo")) + + groups := getSplitGroups(&req, &resp, formatter) + + for groupIdx, group := range groups { + name := fmt.Sprintf("split/%s/group%d", tt.name, groupIdx) + t.Run(name, func(t *testing.T) { + t.Cleanup(func() { + cleanProtocolMaps(t, "kafka", monitor.ebpfProgram.Manager.Manager) + }) + + can.runClient(group.msgs) + + getAndValidateKafkaStats(t, monitor, 1, tt.topic, kafkaParsingValidation{ + expectedNumberOfProduceRequests: tt.numProducedRecords * group.numSets, + expectedAPIVersionProduce: apiVersion, + tlsEnabled: tls, + }, tt.errorCode) + }) + } + } +} + +func getSplitGroups(req kmsg.Request, resp kmsg.Response, formatter *kmsg.RequestFormatter) []groupInfo { + var groups []groupInfo + var info groupInfo + + for splitIdx := 0; splitIdx < 500; splitIdx++ { + reqData := formatter.AppendRequest(make([]byte, 0), req, int32(splitIdx)) + respData := appendResponse(make([]byte, 0), resp, uint32(splitIdx)) + + // There is an assumption in the code that there are no splits + // inside the header. + minSegSize := 8 + + segSize := min(minSegSize+splitIdx, len(respData)) + if segSize >= len(respData) { + break + } + + var msgs []Message + msgs = append(msgs, Message{request: reqData}) + msgs = append(msgs, Message{response: respData[0:segSize]}) + + if segSize+8 >= len(respData) { + msgs = append(msgs, Message{response: respData[segSize:]}) + } else { + // Three segments tests other code paths than two, for example + // it will fail if the tcp_seq is not updated in the response + // parsing continuation path. + msgs = append(msgs, Message{response: respData[segSize : segSize+8]}) + msgs = append(msgs, Message{response: respData[segSize+8:]}) + } + + if info.numSets >= 20 { + groups = append(groups, info) + info.numSets = 0 + info.msgs = make([]Message, 0) + } + + info.numSets++ + info.msgs = append(info.msgs, msgs...) + } + + if info.numSets > 0 { + groups = append(groups, info) } + return groups } func (s *KafkaProtocolParsingSuite) TestKafkaProduceRaw() { @@ -1526,13 +1619,13 @@ func validateProduceFetchCount(t *assert.CollectT, kafkaStats map[kafka.Key]*kaf continue } assert.Equal(t, topicName[:min(len(topicName), 80)], kafkaKey.TopicName) + assert.Greater(t, requestStats.FirstLatencySample, float64(1)) switch kafkaKey.RequestAPIKey { case kafka.ProduceAPIKey: assert.Equal(t, uint16(validation.expectedAPIVersionProduce), kafkaKey.RequestVersion) numberOfProduceRequests += requestStats.Count case kafka.FetchAPIKey: assert.Equal(t, uint16(validation.expectedAPIVersionFetch), kafkaKey.RequestVersion) - assert.Greater(t, requestStats.FirstLatencySample, float64(1)) numberOfFetchRequests += requestStats.Count default: assert.FailNow(t, "Expecting only produce or fetch kafka requests") From b0ff386b7ad850df3214d420138fb68b8019ec87 Mon Sep 17 00:00:00 2001 From: Olivier G <52180542+ogaca-dd@users.noreply.github.com> Date: Wed, 4 Sep 2024 18:23:24 +0200 Subject: [PATCH 007/128] Add params to jmxlogger.Module (#28999) --- cmd/agent/subcommands/jmx/command.go | 3 +-- cmd/agent/subcommands/run/command.go | 3 +-- comp/agent/bundle.go | 4 ++-- comp/agent/bundle_test.go | 3 +-- comp/agent/jmxlogger/jmxloggerimpl/jmxlogger.go | 3 ++- pkg/cli/subcommands/check/command.go | 3 +-- 6 files changed, 8 insertions(+), 11 deletions(-) diff --git a/cmd/agent/subcommands/jmx/command.go b/cmd/agent/subcommands/jmx/command.go index 6ae802311346f..b827558dbdb73 100644 --- a/cmd/agent/subcommands/jmx/command.go +++ b/cmd/agent/subcommands/jmx/command.go @@ -158,8 +158,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { fx.Provide(tagger.NewTaggerParamsForCoreAgent), taggerimpl.Module(), autodiscoveryimpl.Module(), - agent.Bundle(), - fx.Supply(jmxloggerimpl.NewCliParams(cliParams.logFile)), + agent.Bundle(jmxloggerimpl.NewCliParams(cliParams.logFile)), ) } diff --git a/cmd/agent/subcommands/run/command.go b/cmd/agent/subcommands/run/command.go index b26ba80c58a9a..6ff4f17804df5 100644 --- a/cmd/agent/subcommands/run/command.go +++ b/cmd/agent/subcommands/run/command.go @@ -443,8 +443,7 @@ func getSharedFxOption() fx.Option { collectorimpl.Module(), process.Bundle(), guiimpl.Module(), - agent.Bundle(), - fx.Supply(jmxloggerimpl.NewDefaultParams()), + agent.Bundle(jmxloggerimpl.NewDefaultParams()), fx.Provide(func(config config.Component) healthprobe.Options { return healthprobe.Options{ Port: config.GetInt("health_port"), diff --git a/comp/agent/bundle.go b/comp/agent/bundle.go index bfb93359e0554..5e550c0e1ee08 100644 --- a/comp/agent/bundle.go +++ b/comp/agent/bundle.go @@ -17,10 +17,10 @@ import ( // team: agent-shared-components // Bundle defines the fx options for this bundle. -func Bundle() fxutil.BundleOptions { +func Bundle(params jmxloggerimpl.Params) fxutil.BundleOptions { return fxutil.Bundle( autoexitimpl.Module(), - jmxloggerimpl.Module(), + jmxloggerimpl.Module(params), expvarserverimpl.Module(), cloudfoundrycontainerimpl.Module(), ) diff --git a/comp/agent/bundle_test.go b/comp/agent/bundle_test.go index 2ce804e6894d9..7f60820804110 100644 --- a/comp/agent/bundle_test.go +++ b/comp/agent/bundle_test.go @@ -24,7 +24,7 @@ import ( func TestBundleDependencies(t *testing.T) { fxutil.TestBundle(t, - Bundle(), + Bundle(jmxloggerimpl.NewDefaultParams()), core.MockBundle(), compressionimpl.MockModule(), defaultforwarder.MockModule(), @@ -32,7 +32,6 @@ func TestBundleDependencies(t *testing.T) { eventplatformimpl.MockModule(), demultiplexerimpl.Module(), fx.Supply(demultiplexerimpl.NewDefaultParams()), - fx.Supply(jmxloggerimpl.NewDefaultParams()), workloadmetafxmock.MockModule(workloadmeta.NewParams()), ) } diff --git a/comp/agent/jmxlogger/jmxloggerimpl/jmxlogger.go b/comp/agent/jmxlogger/jmxloggerimpl/jmxlogger.go index 965fae44628fd..1fe9aa55b1dfa 100644 --- a/comp/agent/jmxlogger/jmxloggerimpl/jmxlogger.go +++ b/comp/agent/jmxlogger/jmxloggerimpl/jmxlogger.go @@ -20,9 +20,10 @@ import ( ) // Module defines the fx options for this component. -func Module() fxutil.Module { +func Module(params Params) fxutil.Module { return fxutil.Component( fx.Provide(newJMXLogger), + fx.Supply(params), ) } diff --git a/pkg/cli/subcommands/check/command.go b/pkg/cli/subcommands/check/command.go index ea8c9642e824d..f971da2048b6c 100644 --- a/pkg/cli/subcommands/check/command.go +++ b/pkg/cli/subcommands/check/command.go @@ -217,8 +217,7 @@ func MakeCommand(globalParamsGetter func() GlobalParams) *cobra.Command { fx.Provide(func() pidmap.Component { return nil }), getPlatformModules(), - jmxloggerimpl.Module(), - fx.Supply(jmxloggerimpl.NewDisabledParams()), + jmxloggerimpl.Module(jmxloggerimpl.NewDisabledParams()), ) }, } From edd107b506a26c0b6324c067886367d79e0b89dd Mon Sep 17 00:00:00 2001 From: Vincent Whitchurch Date: Wed, 4 Sep 2024 18:30:56 +0200 Subject: [PATCH 008/128] discovery: Rework service name handling (#29000) --- .../corechecks/servicediscovery/events.go | 68 ++-- .../servicediscovery/events_test.go | 185 +++++----- .../corechecks/servicediscovery/impl_linux.go | 1 - .../servicediscovery/impl_linux_test.go | 319 ++++++++++-------- .../servicediscovery/model/model.go | 4 +- .../servicediscovery/module/impl_linux.go | 24 +- .../module/impl_linux_test.go | 22 +- .../servicediscovery/service_detector.go | 6 +- .../corechecks/servicediscovery/usm/java.go | 28 +- .../servicediscovery/usm/service.go | 61 +++- .../servicediscovery/usm/service_test.go | 194 ++++++----- .../aggregator/servicediscoveryAggregator.go | 24 +- test/new-e2e/tests/discovery/linux_test.go | 22 +- .../discovery/testdata/provision/provision.sh | 10 +- 14 files changed, 550 insertions(+), 418 deletions(-) diff --git a/pkg/collector/corechecks/servicediscovery/events.go b/pkg/collector/corechecks/servicediscovery/events.go index fe22df5e15326..ebf17b1b8a994 100644 --- a/pkg/collector/corechecks/servicediscovery/events.go +++ b/pkg/collector/corechecks/servicediscovery/events.go @@ -26,20 +26,22 @@ const ( ) type eventPayload struct { - NamingSchemaVersion string `json:"naming_schema_version"` - ServiceName string `json:"service_name"` - HostName string `json:"host_name"` - Env string `json:"env"` - ServiceLanguage string `json:"service_language"` - ServiceType string `json:"service_type"` - StartTime int64 `json:"start_time"` - LastSeen int64 `json:"last_seen"` - APMInstrumentation string `json:"apm_instrumentation"` - ServiceNameSource string `json:"service_name_source"` - Ports []uint16 `json:"ports"` - PID int `json:"pid"` - CommandLine []string `json:"command_line"` - RSSMemory uint64 `json:"rss_memory"` + NamingSchemaVersion string `json:"naming_schema_version"` + ServiceName string `json:"service_name"` + GeneratedServiceName string `json:"generated_service_name"` + DDService string `json:"dd_service,omitempty"` + HostName string `json:"host_name"` + Env string `json:"env"` + ServiceLanguage string `json:"service_language"` + ServiceType string `json:"service_type"` + StartTime int64 `json:"start_time"` + LastSeen int64 `json:"last_seen"` + APMInstrumentation string `json:"apm_instrumentation"` + ServiceNameSource string `json:"service_name_source,omitempty"` + Ports []uint16 `json:"ports"` + PID int `json:"pid"` + CommandLine []string `json:"command_line"` + RSSMemory uint64 `json:"rss_memory"` } type event struct { @@ -57,24 +59,34 @@ func (ts *telemetrySender) newEvent(t eventType, svc serviceInfo) *event { host := ts.hostname.GetSafe(context.Background()) env := pkgconfig.Datadog().GetString("env") + nameSource := "" + if svc.service.DDService != "" { + nameSource = "provided" + if svc.service.DDServiceInjected { + nameSource = "injected" + } + } + return &event{ RequestType: t, APIVersion: "v2", Payload: &eventPayload{ - NamingSchemaVersion: "1", - ServiceName: svc.meta.Name, - HostName: host, - Env: env, - ServiceLanguage: svc.meta.Language, - ServiceType: svc.meta.Type, - StartTime: int64(svc.service.StartTimeSecs), - LastSeen: svc.LastHeartbeat.Unix(), - APMInstrumentation: svc.meta.APMInstrumentation, - ServiceNameSource: svc.meta.NameSource, - Ports: svc.service.Ports, - PID: svc.service.PID, - CommandLine: svc.service.CommandLine, - RSSMemory: svc.service.RSS, + NamingSchemaVersion: "1", + ServiceName: svc.meta.Name, + GeneratedServiceName: svc.service.GeneratedName, + DDService: svc.service.DDService, + HostName: host, + Env: env, + ServiceLanguage: svc.meta.Language, + ServiceType: svc.meta.Type, + StartTime: int64(svc.service.StartTimeSecs), + LastSeen: svc.LastHeartbeat.Unix(), + APMInstrumentation: svc.meta.APMInstrumentation, + ServiceNameSource: nameSource, + Ports: svc.service.Ports, + PID: svc.service.PID, + CommandLine: svc.service.CommandLine, + RSSMemory: svc.service.RSS, }, } } diff --git a/pkg/collector/corechecks/servicediscovery/events_test.go b/pkg/collector/corechecks/servicediscovery/events_test.go index 747d49e5dcbf2..93ffb9ffe1ecb 100644 --- a/pkg/collector/corechecks/servicediscovery/events_test.go +++ b/pkg/collector/corechecks/servicediscovery/events_test.go @@ -56,18 +56,20 @@ func Test_telemetrySender(t *testing.T) { svc := serviceInfo{ service: model.Service{ - PID: 99, - CommandLine: []string{"test-service", "--args"}, - Ports: []uint16{80, 8080}, - StartTimeSecs: uint64(now.Add(-20 * time.Minute).Unix()), - RSS: 500 * 1024 * 1024, + PID: 99, + CommandLine: []string{"test-service", "--args"}, + Ports: []uint16{80, 8080}, + StartTimeSecs: uint64(now.Add(-20 * time.Minute).Unix()), + RSS: 500 * 1024 * 1024, + GeneratedName: "generated-name", + DDService: "dd-service", + DDServiceInjected: true, }, meta: ServiceMetadata{ Name: "test-service", Language: "jvm", Type: "web_service", APMInstrumentation: "injected", - NameSource: "generated", }, LastHeartbeat: now, } @@ -81,60 +83,66 @@ func Test_telemetrySender(t *testing.T) { RequestType: "start-service", APIVersion: "v2", Payload: &eventPayload{ - NamingSchemaVersion: "1", - ServiceName: "test-service", - HostName: "test-host", - Env: "", - ServiceLanguage: "jvm", - ServiceType: "web_service", - StartTime: 1715557200, - LastSeen: 1715558400, - APMInstrumentation: "injected", - ServiceNameSource: "generated", - Ports: []uint16{80, 8080}, - PID: 99, - CommandLine: []string{"test-service", "--args"}, - RSSMemory: 500 * 1024 * 1024, + NamingSchemaVersion: "1", + ServiceName: "test-service", + GeneratedServiceName: "generated-name", + DDService: "dd-service", + ServiceNameSource: "injected", + HostName: "test-host", + Env: "", + ServiceLanguage: "jvm", + ServiceType: "web_service", + StartTime: 1715557200, + LastSeen: 1715558400, + APMInstrumentation: "injected", + Ports: []uint16{80, 8080}, + PID: 99, + CommandLine: []string{"test-service", "--args"}, + RSSMemory: 500 * 1024 * 1024, }, }, { RequestType: "heartbeat-service", APIVersion: "v2", Payload: &eventPayload{ - NamingSchemaVersion: "1", - ServiceName: "test-service", - HostName: "test-host", - Env: "", - ServiceLanguage: "jvm", - ServiceType: "web_service", - StartTime: 1715557200, - LastSeen: 1715558400, - APMInstrumentation: "injected", - ServiceNameSource: "generated", - Ports: []uint16{80, 8080}, - PID: 99, - CommandLine: []string{"test-service", "--args"}, - RSSMemory: 500 * 1024 * 1024, + NamingSchemaVersion: "1", + ServiceName: "test-service", + GeneratedServiceName: "generated-name", + DDService: "dd-service", + ServiceNameSource: "injected", + HostName: "test-host", + Env: "", + ServiceLanguage: "jvm", + ServiceType: "web_service", + StartTime: 1715557200, + LastSeen: 1715558400, + APMInstrumentation: "injected", + Ports: []uint16{80, 8080}, + PID: 99, + CommandLine: []string{"test-service", "--args"}, + RSSMemory: 500 * 1024 * 1024, }, }, { RequestType: "end-service", APIVersion: "v2", Payload: &eventPayload{ - NamingSchemaVersion: "1", - ServiceName: "test-service", - HostName: "test-host", - Env: "", - ServiceLanguage: "jvm", - ServiceType: "web_service", - StartTime: 1715557200, - LastSeen: 1715558400, - APMInstrumentation: "injected", - ServiceNameSource: "generated", - Ports: []uint16{80, 8080}, - PID: 99, - CommandLine: []string{"test-service", "--args"}, - RSSMemory: 500 * 1024 * 1024, + NamingSchemaVersion: "1", + ServiceName: "test-service", + GeneratedServiceName: "generated-name", + DDService: "dd-service", + ServiceNameSource: "injected", + HostName: "test-host", + Env: "", + ServiceLanguage: "jvm", + ServiceType: "web_service", + StartTime: 1715557200, + LastSeen: 1715558400, + APMInstrumentation: "injected", + Ports: []uint16{80, 8080}, + PID: 99, + CommandLine: []string{"test-service", "--args"}, + RSSMemory: 500 * 1024 * 1024, }, }, } @@ -168,13 +176,14 @@ func Test_telemetrySender_name_provided(t *testing.T) { PID: 55, CommandLine: []string{"foo", "--option"}, StartTimeSecs: uint64(now.Add(-20 * time.Minute).Unix()), + GeneratedName: "generated-name2", + DDService: "dd-service-provided", }, meta: ServiceMetadata{ Name: "test-service", Language: "jvm", Type: "web_service", APMInstrumentation: "injected", - NameSource: "provided", }, LastHeartbeat: now, } @@ -188,54 +197,60 @@ func Test_telemetrySender_name_provided(t *testing.T) { RequestType: "start-service", APIVersion: "v2", Payload: &eventPayload{ - NamingSchemaVersion: "1", - ServiceName: "test-service", - HostName: "test-host", - Env: "", - ServiceLanguage: "jvm", - ServiceType: "web_service", - StartTime: 1715557200, - LastSeen: 1715558400, - APMInstrumentation: "injected", - ServiceNameSource: "provided", - PID: 55, - CommandLine: []string{"foo", "--option"}, + NamingSchemaVersion: "1", + ServiceName: "test-service", + GeneratedServiceName: "generated-name2", + DDService: "dd-service-provided", + ServiceNameSource: "provided", + HostName: "test-host", + Env: "", + ServiceLanguage: "jvm", + ServiceType: "web_service", + StartTime: 1715557200, + LastSeen: 1715558400, + APMInstrumentation: "injected", + PID: 55, + CommandLine: []string{"foo", "--option"}, }, }, { RequestType: "heartbeat-service", APIVersion: "v2", Payload: &eventPayload{ - NamingSchemaVersion: "1", - ServiceName: "test-service", - HostName: "test-host", - Env: "", - ServiceLanguage: "jvm", - ServiceType: "web_service", - StartTime: 1715557200, - LastSeen: 1715558400, - APMInstrumentation: "injected", - ServiceNameSource: "provided", - PID: 55, - CommandLine: []string{"foo", "--option"}, + NamingSchemaVersion: "1", + ServiceName: "test-service", + GeneratedServiceName: "generated-name2", + DDService: "dd-service-provided", + ServiceNameSource: "provided", + HostName: "test-host", + Env: "", + ServiceLanguage: "jvm", + ServiceType: "web_service", + StartTime: 1715557200, + LastSeen: 1715558400, + APMInstrumentation: "injected", + PID: 55, + CommandLine: []string{"foo", "--option"}, }, }, { RequestType: "end-service", APIVersion: "v2", Payload: &eventPayload{ - NamingSchemaVersion: "1", - ServiceName: "test-service", - HostName: "test-host", - Env: "", - ServiceLanguage: "jvm", - ServiceType: "web_service", - StartTime: 1715557200, - LastSeen: 1715558400, - APMInstrumentation: "injected", - ServiceNameSource: "provided", - PID: 55, - CommandLine: []string{"foo", "--option"}, + NamingSchemaVersion: "1", + ServiceName: "test-service", + GeneratedServiceName: "generated-name2", + DDService: "dd-service-provided", + ServiceNameSource: "provided", + HostName: "test-host", + Env: "", + ServiceLanguage: "jvm", + ServiceType: "web_service", + StartTime: 1715557200, + LastSeen: 1715558400, + APMInstrumentation: "injected", + PID: 55, + CommandLine: []string{"foo", "--option"}, }, }, } diff --git a/pkg/collector/corechecks/servicediscovery/impl_linux.go b/pkg/collector/corechecks/servicediscovery/impl_linux.go index db48fbfe6de20..c24bd5087cd09 100644 --- a/pkg/collector/corechecks/servicediscovery/impl_linux.go +++ b/pkg/collector/corechecks/servicediscovery/impl_linux.go @@ -144,7 +144,6 @@ func (li *linuxImpl) getServiceInfo(service model.Service) serviceInfo { Language: service.Language, Type: string(serviceType), APMInstrumentation: service.APMInstrumentation, - NameSource: service.NameSource, } return serviceInfo{ diff --git a/pkg/collector/corechecks/servicediscovery/impl_linux_test.go b/pkg/collector/corechecks/servicediscovery/impl_linux_test.go index 563dabbbd8dd4..a42d2dbac7414 100644 --- a/pkg/collector/corechecks/servicediscovery/impl_linux_test.go +++ b/pkg/collector/corechecks/servicediscovery/impl_linux_test.go @@ -70,19 +70,22 @@ var ( portTCP8080 = model.Service{ PID: procTestService1.pid, Name: "test-service-1", + GeneratedName: "test-service-1-generated", + DDService: "test-service-1", + DDServiceInjected: true, Ports: []uint16{8080}, APMInstrumentation: string(apm.None), - NameSource: "provided", RSS: 100 * 1024 * 1024, CommandLine: []string{"test-service-1"}, StartTimeSecs: procLaunchedSeconds, } portTCP8080UpdatedRSS = model.Service{ PID: procTestService1.pid, - Name: "test-service-1", + GeneratedName: "test-service-1-generated", + DDService: "test-service-1", + DDServiceInjected: true, Ports: []uint16{8080}, APMInstrumentation: string(apm.None), - NameSource: "provided", RSS: 200 * 1024 * 1024, CommandLine: []string{"test-service-1"}, StartTimeSecs: procLaunchedSeconds, @@ -90,21 +93,25 @@ var ( portTCP8080DifferentPID = model.Service{ PID: procTestService1DifferentPID.pid, Name: "test-service-1", + GeneratedName: "test-service-1-generated", + DDService: "test-service-1", + DDServiceInjected: true, Ports: []uint16{8080}, APMInstrumentation: string(apm.Injected), - NameSource: "generated", CommandLine: []string{"test-service-1"}, StartTimeSecs: procLaunchedSeconds, } portTCP8081 = model.Service{ PID: procIgnoreService1.pid, Name: "ignore-1", + GeneratedName: "ignore-1", Ports: []uint16{8081}, StartTimeSecs: procLaunchedSeconds, } portTCP5000 = model.Service{ PID: procPythonService.pid, Name: "python-service", + GeneratedName: "python-service", Language: "python", Ports: []uint16{5000}, CommandLine: pythonCommandLine, @@ -113,6 +120,7 @@ var ( portTCP5432 = model.Service{ PID: procTestService1Repeat.pid, Name: "test-service-1", + GeneratedName: "test-service-1", Ports: []uint16{5432}, CommandLine: []string{"test-service-1"}, StartTimeSecs: procLaunchedSeconds, @@ -206,91 +214,99 @@ func Test_linuxImpl(t *testing.T) { RequestType: "start-service", APIVersion: "v2", Payload: &eventPayload{ - NamingSchemaVersion: "1", - ServiceName: "test-service-1", - ServiceType: "web_service", - HostName: host, - Env: "", - StartTime: calcTime(0).Unix(), - LastSeen: calcTime(1 * time.Minute).Unix(), - Ports: []uint16{8080}, - PID: 99, - CommandLine: []string{"test-service-1"}, - APMInstrumentation: "none", - ServiceNameSource: "provided", - RSSMemory: 100 * 1024 * 1024, + NamingSchemaVersion: "1", + ServiceName: "test-service-1", + GeneratedServiceName: "test-service-1-generated", + DDService: "test-service-1", + ServiceNameSource: "injected", + ServiceType: "web_service", + HostName: host, + Env: "", + StartTime: calcTime(0).Unix(), + LastSeen: calcTime(1 * time.Minute).Unix(), + Ports: []uint16{8080}, + PID: 99, + CommandLine: []string{"test-service-1"}, + APMInstrumentation: "none", + RSSMemory: 100 * 1024 * 1024, }, }, { RequestType: "heartbeat-service", APIVersion: "v2", Payload: &eventPayload{ - NamingSchemaVersion: "1", - ServiceName: "test-service-1", - ServiceType: "web_service", - HostName: host, - Env: "", - StartTime: calcTime(0).Unix(), - LastSeen: calcTime(20 * time.Minute).Unix(), - Ports: []uint16{8080}, - PID: 99, - CommandLine: []string{"test-service-1"}, - APMInstrumentation: "none", - ServiceNameSource: "provided", - RSSMemory: 200 * 1024 * 1024, + NamingSchemaVersion: "1", + ServiceName: "test-service-1", + GeneratedServiceName: "test-service-1-generated", + DDService: "test-service-1", + ServiceNameSource: "injected", + ServiceType: "web_service", + HostName: host, + Env: "", + StartTime: calcTime(0).Unix(), + LastSeen: calcTime(20 * time.Minute).Unix(), + Ports: []uint16{8080}, + PID: 99, + CommandLine: []string{"test-service-1"}, + APMInstrumentation: "none", + RSSMemory: 200 * 1024 * 1024, }, }, { RequestType: "end-service", APIVersion: "v2", Payload: &eventPayload{ - NamingSchemaVersion: "1", - ServiceName: "test-service-1", - ServiceType: "web_service", - HostName: host, - Env: "", - StartTime: calcTime(0).Unix(), - LastSeen: calcTime(20 * time.Minute).Unix(), - Ports: []uint16{8080}, - PID: 99, - CommandLine: []string{"test-service-1"}, - APMInstrumentation: "none", - ServiceNameSource: "provided", - RSSMemory: 200 * 1024 * 1024, + NamingSchemaVersion: "1", + ServiceName: "test-service-1", + GeneratedServiceName: "test-service-1-generated", + DDService: "test-service-1", + ServiceNameSource: "injected", + ServiceType: "web_service", + HostName: host, + Env: "", + StartTime: calcTime(0).Unix(), + LastSeen: calcTime(20 * time.Minute).Unix(), + Ports: []uint16{8080}, + PID: 99, + CommandLine: []string{"test-service-1"}, + APMInstrumentation: "none", + RSSMemory: 200 * 1024 * 1024, }, }, { RequestType: "start-service", APIVersion: "v2", Payload: &eventPayload{ - NamingSchemaVersion: "1", - ServiceName: "python-service", - ServiceType: "web_service", - HostName: host, - Env: "", - StartTime: calcTime(0).Unix(), - LastSeen: calcTime(1 * time.Minute).Unix(), - Ports: []uint16{5000}, - PID: 500, - ServiceLanguage: "python", - CommandLine: pythonCommandLine, + NamingSchemaVersion: "1", + ServiceName: "python-service", + GeneratedServiceName: "python-service", + ServiceType: "web_service", + HostName: host, + Env: "", + StartTime: calcTime(0).Unix(), + LastSeen: calcTime(1 * time.Minute).Unix(), + Ports: []uint16{5000}, + PID: 500, + ServiceLanguage: "python", + CommandLine: pythonCommandLine, }, }, { RequestType: "heartbeat-service", APIVersion: "v2", Payload: &eventPayload{ - NamingSchemaVersion: "1", - ServiceName: "python-service", - ServiceType: "web_service", - HostName: host, - Env: "", - StartTime: calcTime(0).Unix(), - LastSeen: calcTime(20 * time.Minute).Unix(), - Ports: []uint16{5000}, - PID: 500, - ServiceLanguage: "python", - CommandLine: pythonCommandLine, + NamingSchemaVersion: "1", + ServiceName: "python-service", + GeneratedServiceName: "python-service", + ServiceType: "web_service", + HostName: host, + Env: "", + StartTime: calcTime(0).Unix(), + LastSeen: calcTime(20 * time.Minute).Unix(), + Ports: []uint16{5000}, + PID: 500, + ServiceLanguage: "python", + CommandLine: pythonCommandLine, }, }, }, @@ -334,86 +350,93 @@ func Test_linuxImpl(t *testing.T) { RequestType: "start-service", APIVersion: "v2", Payload: &eventPayload{ - NamingSchemaVersion: "1", - ServiceName: "test-service-1", - ServiceType: "db", - HostName: host, - Env: "", - StartTime: calcTime(0).Unix(), - LastSeen: calcTime(1 * time.Minute).Unix(), - Ports: []uint16{5432}, - PID: 101, - CommandLine: []string{"test-service-1"}, + NamingSchemaVersion: "1", + ServiceName: "test-service-1", + GeneratedServiceName: "test-service-1", + ServiceType: "db", + HostName: host, + Env: "", + StartTime: calcTime(0).Unix(), + LastSeen: calcTime(1 * time.Minute).Unix(), + Ports: []uint16{5432}, + PID: 101, + CommandLine: []string{"test-service-1"}, }, }, { RequestType: "start-service", APIVersion: "v2", Payload: &eventPayload{ - NamingSchemaVersion: "1", - ServiceName: "test-service-1", - ServiceType: "web_service", - HostName: host, - Env: "", - StartTime: calcTime(0).Unix(), - LastSeen: calcTime(1 * time.Minute).Unix(), - Ports: []uint16{8080}, - PID: 99, - CommandLine: []string{"test-service-1"}, - APMInstrumentation: "none", - ServiceNameSource: "provided", - RSSMemory: 100 * 1024 * 1024, + NamingSchemaVersion: "1", + ServiceName: "test-service-1", + GeneratedServiceName: "test-service-1-generated", + DDService: "test-service-1", + ServiceNameSource: "injected", + ServiceType: "web_service", + HostName: host, + Env: "", + StartTime: calcTime(0).Unix(), + LastSeen: calcTime(1 * time.Minute).Unix(), + Ports: []uint16{8080}, + PID: 99, + CommandLine: []string{"test-service-1"}, + APMInstrumentation: "none", + RSSMemory: 100 * 1024 * 1024, }, }, { RequestType: "heartbeat-service", APIVersion: "v2", Payload: &eventPayload{ - NamingSchemaVersion: "1", - ServiceName: "test-service-1", - ServiceType: "db", - HostName: host, - Env: "", - StartTime: calcTime(0).Unix(), - LastSeen: calcTime(20 * time.Minute).Unix(), - Ports: []uint16{5432}, - PID: 101, - CommandLine: []string{"test-service-1"}, + NamingSchemaVersion: "1", + ServiceName: "test-service-1", + GeneratedServiceName: "test-service-1", + ServiceType: "db", + HostName: host, + Env: "", + StartTime: calcTime(0).Unix(), + LastSeen: calcTime(20 * time.Minute).Unix(), + Ports: []uint16{5432}, + PID: 101, + CommandLine: []string{"test-service-1"}, }, }, { RequestType: "end-service", APIVersion: "v2", Payload: &eventPayload{ - NamingSchemaVersion: "1", - ServiceName: "test-service-1", - ServiceType: "db", - HostName: host, - Env: "", - StartTime: calcTime(0).Unix(), - LastSeen: calcTime(20 * time.Minute).Unix(), - Ports: []uint16{5432}, - PID: 101, - CommandLine: []string{"test-service-1"}, + NamingSchemaVersion: "1", + ServiceName: "test-service-1", + GeneratedServiceName: "test-service-1", + ServiceType: "db", + HostName: host, + Env: "", + StartTime: calcTime(0).Unix(), + LastSeen: calcTime(20 * time.Minute).Unix(), + Ports: []uint16{5432}, + PID: 101, + CommandLine: []string{"test-service-1"}, }, }, { RequestType: "heartbeat-service", APIVersion: "v2", Payload: &eventPayload{ - NamingSchemaVersion: "1", - ServiceName: "test-service-1", - ServiceType: "web_service", - HostName: host, - Env: "", - StartTime: calcTime(0).Unix(), - LastSeen: calcTime(20 * time.Minute).Unix(), - Ports: []uint16{8080}, - PID: 99, - CommandLine: []string{"test-service-1"}, - APMInstrumentation: "none", - ServiceNameSource: "provided", - RSSMemory: 100 * 1024 * 1024, + NamingSchemaVersion: "1", + ServiceName: "test-service-1", + GeneratedServiceName: "test-service-1-generated", + DDService: "test-service-1", + ServiceNameSource: "injected", + ServiceType: "web_service", + HostName: host, + Env: "", + StartTime: calcTime(0).Unix(), + LastSeen: calcTime(20 * time.Minute).Unix(), + Ports: []uint16{8080}, + PID: 99, + CommandLine: []string{"test-service-1"}, + APMInstrumentation: "none", + RSSMemory: 100 * 1024 * 1024, }, }, }, @@ -455,37 +478,41 @@ func Test_linuxImpl(t *testing.T) { RequestType: "start-service", APIVersion: "v2", Payload: &eventPayload{ - NamingSchemaVersion: "1", - ServiceName: "test-service-1", - ServiceType: "web_service", - HostName: host, - Env: "", - StartTime: calcTime(0).Unix(), - LastSeen: calcTime(1 * time.Minute).Unix(), - Ports: []uint16{8080}, - PID: 99, - CommandLine: []string{"test-service-1"}, - APMInstrumentation: "none", - ServiceNameSource: "provided", - RSSMemory: 100 * 1024 * 1024, + NamingSchemaVersion: "1", + ServiceName: "test-service-1", + GeneratedServiceName: "test-service-1-generated", + DDService: "test-service-1", + ServiceNameSource: "injected", + ServiceType: "web_service", + HostName: host, + Env: "", + StartTime: calcTime(0).Unix(), + LastSeen: calcTime(1 * time.Minute).Unix(), + Ports: []uint16{8080}, + PID: 99, + CommandLine: []string{"test-service-1"}, + APMInstrumentation: "none", + RSSMemory: 100 * 1024 * 1024, }, }, { RequestType: "start-service", APIVersion: "v2", Payload: &eventPayload{ - NamingSchemaVersion: "1", - ServiceName: "test-service-1", - ServiceType: "web_service", - HostName: host, - Env: "", - StartTime: calcTime(0).Unix(), - LastSeen: calcTime(22 * time.Minute).Unix(), - Ports: []uint16{8080}, - PID: 102, - CommandLine: []string{"test-service-1"}, - APMInstrumentation: "injected", - ServiceNameSource: "generated", + NamingSchemaVersion: "1", + ServiceName: "test-service-1", + GeneratedServiceName: "test-service-1-generated", + DDService: "test-service-1", + ServiceNameSource: "injected", + ServiceType: "web_service", + HostName: host, + Env: "", + StartTime: calcTime(0).Unix(), + LastSeen: calcTime(22 * time.Minute).Unix(), + Ports: []uint16{8080}, + PID: 102, + CommandLine: []string{"test-service-1"}, + APMInstrumentation: "injected", }, }, }, diff --git a/pkg/collector/corechecks/servicediscovery/model/model.go b/pkg/collector/corechecks/servicediscovery/model/model.go index 2c563860fcfed..79fd656bbfb58 100644 --- a/pkg/collector/corechecks/servicediscovery/model/model.go +++ b/pkg/collector/corechecks/servicediscovery/model/model.go @@ -10,7 +10,9 @@ package model type Service struct { PID int `json:"pid"` Name string `json:"name"` - NameSource string `json:"name_source"` + GeneratedName string `json:"generated_name"` + DDService string `json:"dd_service"` + DDServiceInjected bool `json:"dd_service_injected"` Ports []uint16 `json:"ports"` APMInstrumentation string `json:"apm_instrumentation"` Language string `json:"language"` diff --git a/pkg/collector/corechecks/servicediscovery/module/impl_linux.go b/pkg/collector/corechecks/servicediscovery/module/impl_linux.go index 690b74e5e6e7d..4331835f96d91 100644 --- a/pkg/collector/corechecks/servicediscovery/module/impl_linux.go +++ b/pkg/collector/corechecks/servicediscovery/module/impl_linux.go @@ -43,8 +43,9 @@ var _ module.Module = &discovery{} // serviceInfo holds process data that should be cached between calls to the // endpoint. type serviceInfo struct { - name string - nameFromDDService bool + generatedName string + ddServiceName string + ddServiceInjected bool language language.Language apmInstrumentation apm.Instrumentation cmdLine []string @@ -317,7 +318,7 @@ func (s *discovery) getServiceInfo(proc *process.Process) (*serviceInfo, error) contextMap := make(usm.DetectorContextMap) root := kernel.HostProc(strconv.Itoa(int(proc.Pid)), "root") - name, fromDDService := servicediscovery.GetServiceName(cmdline, envs, root, contextMap) + nameMeta := servicediscovery.GetServiceName(cmdline, envs, root, contextMap) lang := language.FindInArgs(exe, cmdline) if lang == "" { lang = language.FindUsingPrivilegedDetector(s.privilegedDetector, proc.Pid) @@ -325,10 +326,11 @@ func (s *discovery) getServiceInfo(proc *process.Process) (*serviceInfo, error) apmInstrumentation := apm.Detect(int(proc.Pid), cmdline, envs, lang, contextMap) return &serviceInfo{ - name: name, + generatedName: nameMeta.Name, + ddServiceName: nameMeta.DDService, language: lang, apmInstrumentation: apmInstrumentation, - nameFromDDService: fromDDService, + ddServiceInjected: nameMeta.DDServiceInjected, cmdLine: sanitizeCmdLine(s.scrubber, cmdline), startTimeSecs: uint64(createTime / 1000), }, nil @@ -447,15 +449,17 @@ func (s *discovery) getService(context parsingContext, pid int32) *model.Service s.mux.Unlock() } - nameSource := "generated" - if info.nameFromDDService { - nameSource = "provided" + name := info.ddServiceName + if name == "" { + name = info.generatedName } return &model.Service{ PID: int(pid), - Name: info.name, - NameSource: nameSource, + Name: name, + GeneratedName: info.generatedName, + DDService: info.ddServiceName, + DDServiceInjected: info.ddServiceInjected, Ports: ports, APMInstrumentation: string(info.apmInstrumentation), Language: string(info.language), diff --git a/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go b/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go index 8028da0225c70..f9f0a58184710 100644 --- a/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go +++ b/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go @@ -306,8 +306,10 @@ func TestServiceName(t *testing.T) { require.EventuallyWithT(t, func(collect *assert.CollectT) { portMap := getServicesMap(t, url) assert.Contains(collect, portMap, pid) - assert.Equal(t, "foobar", portMap[pid].Name) - assert.Equal(t, "provided", portMap[pid].NameSource) + assert.Equal(t, "foobar", portMap[pid].DDService) + assert.Equal(t, portMap[pid].DDService, portMap[pid].Name) + assert.Equal(t, "sleep", portMap[pid].GeneratedName) + assert.False(t, portMap[pid].DDServiceInjected) }, 30*time.Second, 100*time.Millisecond) } @@ -328,8 +330,13 @@ func TestInjectedServiceName(t *testing.T) { pid := os.Getpid() portMap := getServicesMap(t, url) require.Contains(t, portMap, pid) - require.Equal(t, "injected-service-name", portMap[pid].Name) - require.Equal(t, "generated", portMap[pid].NameSource) + require.Equal(t, "injected-service-name", portMap[pid].DDService) + require.Equal(t, portMap[pid].DDService, portMap[pid].Name) + // The GeneratedName can vary depending on how the tests are run, so don't + // assert for a specific value. + require.NotEmpty(t, portMap[pid].GeneratedName) + require.NotEqual(t, portMap[pid].DDService, portMap[pid].GeneratedName) + assert.True(t, portMap[pid].DDServiceInjected) } func TestAPMInstrumentationInjected(t *testing.T) { @@ -579,7 +586,8 @@ func TestNodeDocker(t *testing.T) { require.EventuallyWithT(t, func(collect *assert.CollectT) { svcMap := getServicesMap(t, url) assert.Contains(collect, svcMap, pid) - assert.Equal(collect, "nodejs-https-server", svcMap[pid].Name) + assert.Equal(collect, "nodejs-https-server", svcMap[pid].GeneratedName) + assert.Equal(collect, svcMap[pid].GeneratedName, svcMap[pid].Name) assert.Equal(collect, "provided", svcMap[pid].APMInstrumentation) assertStat(collect, svcMap[pid]) }, 30*time.Second, 100*time.Millisecond) @@ -766,8 +774,8 @@ func TestCache(t *testing.T) { for i, cmd := range cmds { pid := int32(cmd.Process.Pid) - require.Contains(t, discovery.cache[pid].name, serviceNames[i]) - require.True(t, discovery.cache[pid].nameFromDDService) + require.Equal(t, serviceNames[i], discovery.cache[pid].ddServiceName) + require.False(t, discovery.cache[pid].ddServiceInjected) } cancel() diff --git a/pkg/collector/corechecks/servicediscovery/service_detector.go b/pkg/collector/corechecks/servicediscovery/service_detector.go index 529c8c92a64e7..ac0434be492e8 100644 --- a/pkg/collector/corechecks/servicediscovery/service_detector.go +++ b/pkg/collector/corechecks/servicediscovery/service_detector.go @@ -18,7 +18,6 @@ type ServiceMetadata struct { Language string Type string APMInstrumentation string - NameSource string } func fixAdditionalNames(additionalNames []string) []string { @@ -43,8 +42,9 @@ func makeFinalName(meta usm.ServiceMetadata) string { // GetServiceName gets the service name based on the command line arguments and // the list of environment variables. -func GetServiceName(cmdline []string, env map[string]string, root string, contextMap usm.DetectorContextMap) (string, bool) { +func GetServiceName(cmdline []string, env map[string]string, root string, contextMap usm.DetectorContextMap) usm.ServiceMetadata { fs := usm.NewSubDirFS(root) meta, _ := usm.ExtractServiceMetadata(cmdline, env, fs, contextMap) - return makeFinalName(meta), meta.FromDDService + meta.Name = makeFinalName(meta) + return meta } diff --git a/pkg/collector/corechecks/servicediscovery/usm/java.go b/pkg/collector/corechecks/servicediscovery/usm/java.go index 7c017301620e2..6512c30c04029 100644 --- a/pkg/collector/corechecks/servicediscovery/usm/java.go +++ b/pkg/collector/corechecks/servicediscovery/usm/java.go @@ -18,12 +18,10 @@ func newJavaDetector(ctx DetectionContext) detector { return &javaDetector{ctx: ctx} } -func (jd javaDetector) detect(args []string) (ServiceMetadata, bool) { +func (jd javaDetector) detect(args []string) (metadata ServiceMetadata, success bool) { // Look for dd.service if index := slices.IndexFunc(args, func(arg string) bool { return strings.HasPrefix(arg, "-Ddd.service=") }); index != -1 { - metadata := NewServiceMetadata(strings.TrimPrefix(args[index], "-Ddd.service=")) - metadata.FromDDService = true - return metadata, true + metadata.DDService = strings.TrimPrefix(args[index], "-Ddd.service=") } prevArgIsFlag := false var additionalNames []string @@ -45,30 +43,40 @@ func (jd javaDetector) detect(args []string) (ServiceMetadata, bool) { // try to see if the application is a spring boot archive and extract its application name if len(additionalNames) == 0 { if springAppName, ok := newSpringBootParser(jd.ctx).GetSpringBootAppName(a); ok { - return NewServiceMetadata(springAppName), true + success = true + metadata.Name = springAppName + return } } - return NewServiceMetadata(arg[:len(arg)-len(javaJarExtension)], additionalNames...), true + success = true + metadata.SetNames(arg[:len(arg)-len(javaJarExtension)], additionalNames...) + return } if strings.HasPrefix(arg, javaApachePrefix) { // take the project name after the package 'org.apache.' while stripping off the remaining package // and class name arg = arg[len(javaApachePrefix):] if idx := strings.Index(arg, "."); idx != -1 { - return NewServiceMetadata(arg[:idx], additionalNames...), true + success = true + metadata.SetNames(arg[:idx], additionalNames...) + return } } if idx := strings.LastIndex(arg, "."); idx != -1 && idx+1 < len(arg) { // take just the class name without the package - return NewServiceMetadata(arg[idx+1:], additionalNames...), true + success = true + metadata.SetNames(arg[idx+1:], additionalNames...) + return } - return NewServiceMetadata(arg, additionalNames...), true + success = true + metadata.SetNames(arg, additionalNames...) + return } } prevArgIsFlag = hasFlagPrefix && !includesAssignment && a != javaJarFlag } - return ServiceMetadata{}, false + return } diff --git a/pkg/collector/corechecks/servicediscovery/usm/service.go b/pkg/collector/corechecks/servicediscovery/usm/service.go index f8bed0ad59541..9de72147d64df 100644 --- a/pkg/collector/corechecks/servicediscovery/usm/service.go +++ b/pkg/collector/corechecks/servicediscovery/usm/service.go @@ -44,9 +44,10 @@ const ( // ServiceMetadata holds information about a service. type ServiceMetadata struct { - Name string - AdditionalNames []string - FromDDService bool + Name string + AdditionalNames []string + DDService string + DDServiceInjected bool // for future usage: we can detect also the type, vendor, frameworks, etc } @@ -59,6 +60,21 @@ func NewServiceMetadata(name string, additional ...string) ServiceMetadata { return ServiceMetadata{Name: name, AdditionalNames: additional} } +// SetAdditionalNames set additional names for the service +func (s *ServiceMetadata) SetAdditionalNames(additional ...string) { + if len(additional) > 1 { + // names are discovered in unpredictable order. We need to keep them sorted if we're going to join them + slices.Sort(additional) + } + s.AdditionalNames = additional +} + +// SetNames sets generated names for the service. +func (s *ServiceMetadata) SetNames(name string, additional ...string) { + s.Name = name + s.SetAdditionalNames(additional...) +} + // GetServiceKey returns the key for the service. func (s ServiceMetadata) GetServiceKey() string { if len(s.AdditionalNames) > 0 { @@ -165,22 +181,20 @@ var binsWithContext = map[string]detectorCreatorFn{ "gunicorn": newGunicornDetector, } -func checkForInjectionNaming(envs map[string]string) bool { - fromDDService := true +func serviceNameInjected(envs map[string]string) bool { if env, ok := envs["DD_INJECTION_ENABLED"]; ok { values := strings.Split(env, ",") for _, v := range values { if v == "service_name" { - fromDDService = false - break + return true } } } - return fromDDService + return false } // ExtractServiceMetadata attempts to detect ServiceMetadata from the given process. -func ExtractServiceMetadata(args []string, envs map[string]string, fs fs.SubFS, contextMap DetectorContextMap) (ServiceMetadata, bool) { +func ExtractServiceMetadata(args []string, envs map[string]string, fs fs.SubFS, contextMap DetectorContextMap) (metadata ServiceMetadata, success bool) { dc := DetectionContext{ args: args, envs: envs, @@ -189,14 +203,15 @@ func ExtractServiceMetadata(args []string, envs map[string]string, fs fs.SubFS, } cmd := dc.args if len(cmd) == 0 || len(cmd[0]) == 0 { - return ServiceMetadata{}, false + return } + // We always return a service name from here on + success = true + if value, ok := chooseServiceNameFromEnvs(dc.envs); ok { - metadata := NewServiceMetadata(value) - // we only want to set FromDDService to true if the name wasn't assigned by injection - metadata.FromDDService = checkForInjectionNaming(dc.envs) - return metadata, true + metadata.DDService = value + metadata.DDServiceInjected = serviceNameInjected(envs) } exe := cmd[0] @@ -220,8 +235,19 @@ func ExtractServiceMetadata(args []string, envs map[string]string, fs fs.SubFS, exe = normalizeExeName(exe) if detectorProvider, ok := binsWithContext[exe]; ok { - if metadata, ok := detectorProvider(dc).detect(cmd[1:]); ok { - return metadata, true + langMeta, ok := detectorProvider(dc).detect(cmd[1:]) + + // The detector could return a DD Service name (eg. Java, from the + // dd.service property), but still fail to generate a service name (ok = + // false) so check this first. + if langMeta.DDService != "" { + metadata.DDService = langMeta.DDService + } + + if ok { + metadata.Name = langMeta.Name + metadata.SetAdditionalNames(langMeta.AdditionalNames...) + return } } @@ -230,7 +256,8 @@ func ExtractServiceMetadata(args []string, envs map[string]string, fs fs.SubFS, exe = exe[:i] } - return NewServiceMetadata(exe), true + metadata.Name = exe + return } func removeFilePath(s string) string { diff --git a/pkg/collector/corechecks/servicediscovery/usm/service_test.go b/pkg/collector/corechecks/servicediscovery/usm/service_test.go index a0403966e141b..38d8c390bb169 100644 --- a/pkg/collector/corechecks/servicediscovery/usm/service_test.go +++ b/pkg/collector/corechecks/servicediscovery/usm/service_test.go @@ -50,121 +50,122 @@ func TestExtractServiceMetadata(t *testing.T) { name string cmdline []string envs map[string]string - expectedServiceTag string + expectedGeneratedName string + expectedDDService string expectedAdditionalServices []string - fromDDService bool + ddServiceInjected bool fs *SubDirFS skipOnWindows bool }{ { - name: "empty", - cmdline: []string{}, - expectedServiceTag: "", + name: "empty", + cmdline: []string{}, + expectedGeneratedName: "", }, { - name: "blank", - cmdline: []string{""}, - expectedServiceTag: "", + name: "blank", + cmdline: []string{""}, + expectedGeneratedName: "", }, { name: "single arg executable", cmdline: []string{ "./my-server.sh", }, - expectedServiceTag: "my-server", + expectedGeneratedName: "my-server", }, { name: "single arg executable with DD_SERVICE", cmdline: []string{ "./my-server.sh", }, - envs: map[string]string{"DD_SERVICE": "my-service"}, - expectedServiceTag: "my-service", - fromDDService: true, + envs: map[string]string{"DD_SERVICE": "my-service"}, + expectedDDService: "my-service", + expectedGeneratedName: "my-server", }, { name: "single arg executable with DD_TAGS", cmdline: []string{ "./my-server.sh", }, - envs: map[string]string{"DD_TAGS": "service:my-service"}, - expectedServiceTag: "my-service", - fromDDService: true, + envs: map[string]string{"DD_TAGS": "service:my-service"}, + expectedDDService: "my-service", + expectedGeneratedName: "my-server", }, { name: "single arg executable with special chars", cmdline: []string{ "./-my-server.sh-", }, - expectedServiceTag: "my-server", + expectedGeneratedName: "my-server", }, { name: "sudo", cmdline: []string{ "sudo", "-E", "-u", "dog", "/usr/local/bin/myApp", "-items=0,1,2,3", "-foo=bar", }, - expectedServiceTag: "myApp", + expectedGeneratedName: "myApp", }, { name: "python flask argument", cmdline: []string{ "/opt/python/2.7.11/bin/python2.7", "flask", "run", "--host=0.0.0.0", }, - expectedServiceTag: "flask", - envs: map[string]string{"PWD": "testdata/python"}, - fs: &subUsmTestData, + expectedGeneratedName: "flask", + envs: map[string]string{"PWD": "testdata/python"}, + fs: &subUsmTestData, }, { name: "python - flask argument in path", cmdline: []string{ "/opt/python/2.7.11/bin/python2.7", "testdata/python/flask", "run", "--host=0.0.0.0", "--without-threads", }, - expectedServiceTag: "flask", - fs: &subUsmTestData, + expectedGeneratedName: "flask", + fs: &subUsmTestData, }, { name: "python flask in single argument", cmdline: []string{ "/opt/python/2.7.11/bin/python2.7 flask run --host=0.0.0.0", }, - envs: map[string]string{"PWD": "testdata/python"}, - expectedServiceTag: "flask", - fs: &subUsmTestData, + envs: map[string]string{"PWD": "testdata/python"}, + expectedGeneratedName: "flask", + fs: &subUsmTestData, }, { name: "python - module hello", cmdline: []string{ "python3", "-m", "hello", }, - expectedServiceTag: "hello", + expectedGeneratedName: "hello", }, { name: "ruby - td-agent", cmdline: []string{ "ruby", "/usr/sbin/td-agent", "--log", "/var/log/td-agent/td-agent.log", "--daemon", "/var/run/td-agent/td-agent.pid", }, - expectedServiceTag: "td-agent", + expectedGeneratedName: "td-agent", }, { name: "java using the -jar flag to define the service", cmdline: []string{ "java", "-Xmx4000m", "-Xms4000m", "-XX:ReservedCodeCacheSize=256m", "-jar", "/opt/sheepdog/bin/myservice.jar", }, - expectedServiceTag: "myservice", + expectedGeneratedName: "myservice", }, { name: "java class name as service", cmdline: []string{ "java", "-Xmx4000m", "-Xms4000m", "-XX:ReservedCodeCacheSize=256m", "com.datadog.example.HelloWorld", }, - expectedServiceTag: "HelloWorld", + expectedGeneratedName: "HelloWorld", }, { name: "java kafka", cmdline: []string{ "java", "-Xmx4000m", "-Xms4000m", "-XX:ReservedCodeCacheSize=256m", "kafka.Kafka", }, - expectedServiceTag: "Kafka", + expectedGeneratedName: "Kafka", }, { name: "java parsing for org.apache projects with cassandra as the service", @@ -174,14 +175,14 @@ func TestExtractServiceMetadata(t *testing.T) { "-cp", "/etc/cassandra:/usr/share/cassandra/lib/HdrHistogram-2.1.9.jar:/usr/share/cassandra/lib/cassandra-driver-core-3.0.1-shaded.jar", "org.apache.cassandra.service.CassandraDaemon", }, - expectedServiceTag: "cassandra", + expectedGeneratedName: "cassandra", }, { name: "java space in java executable path", cmdline: []string{ "/home/dd/my java dir/java", "com.dog.cat", }, - expectedServiceTag: "cat", + expectedGeneratedName: "cat", }, { name: "node js with package.json not present", cmdline: []string{ @@ -192,7 +193,7 @@ func TestExtractServiceMetadata(t *testing.T) { "--", "/somewhere/index.js", }, - expectedServiceTag: "node", + expectedGeneratedName: "node", }, { name: "node js with a broken package.json", @@ -200,7 +201,7 @@ func TestExtractServiceMetadata(t *testing.T) { "/usr/bin/node", "./testdata/inner/index.js", }, - expectedServiceTag: "node", + expectedGeneratedName: "node", }, { name: "node js with a valid package.json", @@ -212,8 +213,8 @@ func TestExtractServiceMetadata(t *testing.T) { "--", "./testdata/index.js", }, - expectedServiceTag: "my-awesome-package", - fs: &subUsmTestData, + expectedGeneratedName: "my-awesome-package", + fs: &subUsmTestData, }, { name: "node js with a symlink to a .js file and valid package.json", @@ -225,9 +226,9 @@ func TestExtractServiceMetadata(t *testing.T) { "./testdata/bins/broken", "./testdata/bins/json-server", }, - expectedServiceTag: "json-server-package", - skipOnWindows: true, - fs: &subUsmTestData, + expectedGeneratedName: "json-server-package", + skipOnWindows: true, + fs: &subUsmTestData, }, { name: "node js with a valid nested package.json and cwd", @@ -239,9 +240,9 @@ func TestExtractServiceMetadata(t *testing.T) { "--", "index.js", }, - envs: map[string]string{"PWD": "testdata/deep"}, // it's relative but it's ok for testing purposes - fs: &subUsmTestData, - expectedServiceTag: "my-awesome-package", + envs: map[string]string{"PWD": "testdata/deep"}, // it's relative but it's ok for testing purposes + fs: &subUsmTestData, + expectedGeneratedName: "my-awesome-package", }, { name: "spring boot default options", @@ -250,7 +251,7 @@ func TestExtractServiceMetadata(t *testing.T) { "-jar", springBootAppFullPath, }, - expectedServiceTag: "default-app", + expectedGeneratedName: "default-app", }, { name: "wildfly 18 standalone", @@ -276,7 +277,7 @@ func TestExtractServiceMetadata(t *testing.T) { "org.jboss.as.standalone", "-Djboss.home.dir=" + jbossTestAppRoot, "-Djboss.server.base.dir=" + jbossTestAppRoot + "/standalone"}, - expectedServiceTag: "jboss-modules", + expectedGeneratedName: "jboss-modules", expectedAdditionalServices: []string{"my-jboss-webapp", "some_context_root", "web3"}, fs: &sub, envs: map[string]string{"PWD": "/sibiling"}, @@ -308,7 +309,7 @@ func TestExtractServiceMetadata(t *testing.T) { "-mp", "" + jbossTestAppRoot + "/modules", "org.jboss.as.server"}, - expectedServiceTag: "jboss-modules", + expectedGeneratedName: "jboss-modules", expectedAdditionalServices: []string{"web3", "web4"}, fs: &sub, envs: map[string]string{"PWD": "/sibiling"}, @@ -330,7 +331,7 @@ func TestExtractServiceMetadata(t *testing.T) { "-Dweblogic.home=/u01/oracle/wlserver/server", "weblogic.Server"}, envs: map[string]string{"PWD": weblogicTestAppRootAbsolute}, - expectedServiceTag: "Server", + expectedGeneratedName: "Server", expectedAdditionalServices: []string{"my_context", "sample4", "some_context_root"}, }, { @@ -338,8 +339,19 @@ func TestExtractServiceMetadata(t *testing.T) { cmdline: []string{ "/usr/bin/java", "-Ddd.service=custom", "-jar", "app.jar", }, - expectedServiceTag: "custom", - fromDDService: true, + expectedDDService: "custom", + expectedGeneratedName: "app", + }, + { + // The system property takes priority over the environment variable, see + // https://docs.datadoghq.com/tracing/trace_collection/library_config/java/ + name: "java with dd_service as system property and DD_SERVICE", + cmdline: []string{ + "/usr/bin/java", "-Ddd.service=dd-service-from-property", "-jar", "app.jar", + }, + envs: map[string]string{"DD_SERVICE": "dd-service-from-env"}, + expectedDDService: "dd-service-from-property", + expectedGeneratedName: "app", }, { name: "Tomcat 10.X", @@ -363,7 +375,7 @@ func TestExtractServiceMetadata(t *testing.T) { "org.apache.catalina.startup.Bootstrap", "start", }, - expectedServiceTag: "catalina", + expectedGeneratedName: "catalina", expectedAdditionalServices: []string{"app2", "custom"}, fs: &subUsmTestData, }, @@ -372,21 +384,21 @@ func TestExtractServiceMetadata(t *testing.T) { cmdline: []string{ "/usr/bin/dotnet", "./myservice.dll", }, - expectedServiceTag: "myservice", + expectedGeneratedName: "myservice", }, { name: "dotnet cmd with dll and options", cmdline: []string{ "/usr/bin/dotnet", "-v", "--", "/app/lib/myservice.dll", }, - expectedServiceTag: "myservice", + expectedGeneratedName: "myservice", }, { name: "dotnet cmd with unrecognized options", cmdline: []string{ "/usr/bin/dotnet", "run", "--project", "./projects/proj1/proj1.csproj", }, - expectedServiceTag: "dotnet", + expectedGeneratedName: "dotnet", }, { name: "PHP Laravel", @@ -395,7 +407,7 @@ func TestExtractServiceMetadata(t *testing.T) { "artisan", "serve", }, - expectedServiceTag: "laravel", + expectedGeneratedName: "laravel", }, { name: "Plain PHP with INI", @@ -404,7 +416,7 @@ func TestExtractServiceMetadata(t *testing.T) { "-ddatadog.service=foo", "swoole-server.php", }, - expectedServiceTag: "foo", + expectedGeneratedName: "foo", }, { name: "PHP with version number", @@ -413,7 +425,7 @@ func TestExtractServiceMetadata(t *testing.T) { "artisan", "migrate:fresh", }, - expectedServiceTag: "laravel", + expectedGeneratedName: "laravel", }, { name: "PHP with two-digit version number", @@ -422,7 +434,7 @@ func TestExtractServiceMetadata(t *testing.T) { "artisan", "migrate:fresh", }, - expectedServiceTag: "laravel", + expectedGeneratedName: "laravel", }, { name: "PHP-FPM shouldn't trigger php parsing", @@ -430,7 +442,7 @@ func TestExtractServiceMetadata(t *testing.T) { "php-fpm", "artisan", }, - expectedServiceTag: "php-fpm", + expectedGeneratedName: "php-fpm", }, { name: "PHP-FPM with version number shouldn't trigger php parsing", @@ -438,28 +450,29 @@ func TestExtractServiceMetadata(t *testing.T) { "php8.1-fpm", "artisan", }, - expectedServiceTag: "php8", + expectedGeneratedName: "php8", }, { - name: "DD_SERVICE_set_manually", - cmdline: []string{"java", "-jar", "Foo.jar"}, - envs: map[string]string{"DD_SERVICE": "howdy"}, - expectedServiceTag: "howdy", - fromDDService: true, + name: "DD_SERVICE_set_manually", + cmdline: []string{"java", "-jar", "Foo.jar"}, + envs: map[string]string{"DD_SERVICE": "howdy"}, + expectedDDService: "howdy", + expectedGeneratedName: "Foo", }, { - name: "DD_SERVICE_set_manually_tags", - cmdline: []string{"java", "-jar", "Foo.jar"}, - envs: map[string]string{"DD_TAGS": "service:howdy"}, - expectedServiceTag: "howdy", - fromDDService: true, + name: "DD_SERVICE_set_manually_tags", + cmdline: []string{"java", "-jar", "Foo.jar"}, + envs: map[string]string{"DD_TAGS": "service:howdy"}, + expectedDDService: "howdy", + expectedGeneratedName: "Foo", }, { - name: "DD_SERVICE_set_manually_injection", - cmdline: []string{"java", "-jar", "Foo.jar"}, - envs: map[string]string{"DD_SERVICE": "howdy", "DD_INJECTION_ENABLED": "tracer,service_name"}, - expectedServiceTag: "howdy", - fromDDService: false, + name: "DD_SERVICE_set_manually_injection", + cmdline: []string{"java", "-jar", "Foo.jar"}, + envs: map[string]string{"DD_SERVICE": "howdy", "DD_INJECTION_ENABLED": "tracer,service_name"}, + expectedDDService: "howdy", + expectedGeneratedName: "Foo", + ddServiceInjected: true, }, { name: "gunicorn simple", @@ -468,7 +481,7 @@ func TestExtractServiceMetadata(t *testing.T) { "--workers=2", "test:app", }, - expectedServiceTag: "test", + expectedGeneratedName: "test", }, { name: "gunicorn from name", @@ -481,7 +494,7 @@ func TestExtractServiceMetadata(t *testing.T) { "dummy", "test:app", }, - expectedServiceTag: "dummy", + expectedGeneratedName: "dummy", }, { name: "gunicorn from name (long arg)", @@ -493,7 +506,7 @@ func TestExtractServiceMetadata(t *testing.T) { "--name=dummy", "test:app", }, - expectedServiceTag: "dummy", + expectedGeneratedName: "dummy", }, { name: "gunicorn from name in env", @@ -501,16 +514,16 @@ func TestExtractServiceMetadata(t *testing.T) { "gunicorn", "test:app", }, - envs: map[string]string{"GUNICORN_CMD_ARGS": "--bind=127.0.0.1:8080 --workers=3 -n dummy"}, - expectedServiceTag: "dummy", + envs: map[string]string{"GUNICORN_CMD_ARGS": "--bind=127.0.0.1:8080 --workers=3 -n dummy"}, + expectedGeneratedName: "dummy", }, { name: "gunicorn without app found", cmdline: []string{ "gunicorn", }, - envs: map[string]string{"GUNICORN_CMD_ARGS": "--bind=127.0.0.1:8080 --workers=3"}, - expectedServiceTag: "gunicorn", + envs: map[string]string{"GUNICORN_CMD_ARGS": "--bind=127.0.0.1:8080 --workers=3"}, + expectedGeneratedName: "gunicorn", }, { name: "gunicorn with partial wsgi app", @@ -518,7 +531,7 @@ func TestExtractServiceMetadata(t *testing.T) { "gunicorn", "my.package", }, - expectedServiceTag: "my.package", + expectedGeneratedName: "my.package", }, { name: "gunicorn with empty WSGI_APP env", @@ -526,16 +539,16 @@ func TestExtractServiceMetadata(t *testing.T) { "gunicorn", "my.package", }, - envs: map[string]string{"WSGI_APP": ""}, - expectedServiceTag: "my.package", + envs: map[string]string{"WSGI_APP": ""}, + expectedGeneratedName: "my.package", }, { name: "gunicorn with WSGI_APP env", cmdline: []string{ "gunicorn", }, - envs: map[string]string{"WSGI_APP": "test:app"}, - expectedServiceTag: "test", + envs: map[string]string{"WSGI_APP": "test:app"}, + expectedGeneratedName: "test", }, { name: "gunicorn with replaced cmdline with colon", @@ -544,7 +557,7 @@ func TestExtractServiceMetadata(t *testing.T) { "master", "[domains.foo.apps.bar:create_server()]", }, - expectedServiceTag: "domains.foo.apps.bar", + expectedGeneratedName: "domains.foo.apps.bar", }, { name: "gunicorn with replaced cmdline", @@ -553,7 +566,7 @@ func TestExtractServiceMetadata(t *testing.T) { "master", "[mcservice]", }, - expectedServiceTag: "mcservice", + expectedGeneratedName: "mcservice", }, } @@ -569,13 +582,14 @@ func TestExtractServiceMetadata(t *testing.T) { fs = *tt.fs } meta, ok := ExtractServiceMetadata(tt.cmdline, tt.envs, fs, make(DetectorContextMap)) - if len(tt.expectedServiceTag) == 0 { + if len(tt.expectedGeneratedName) == 0 && len(tt.expectedDDService) == 0 { require.False(t, ok) } else { require.True(t, ok) - require.Equal(t, tt.expectedServiceTag, meta.Name) + require.Equal(t, tt.expectedDDService, meta.DDService) + require.Equal(t, tt.expectedGeneratedName, meta.Name) require.Equal(t, tt.expectedAdditionalServices, meta.AdditionalNames) - require.Equal(t, tt.fromDDService, meta.FromDDService) + require.Equal(t, tt.ddServiceInjected, meta.DDServiceInjected) } }) } diff --git a/test/fakeintake/aggregator/servicediscoveryAggregator.go b/test/fakeintake/aggregator/servicediscoveryAggregator.go index 3ee0a522e01ec..854f890ea62db 100644 --- a/test/fakeintake/aggregator/servicediscoveryAggregator.go +++ b/test/fakeintake/aggregator/servicediscoveryAggregator.go @@ -20,17 +20,19 @@ type ServiceDiscoveryPayload struct { RequestType string `json:"request_type"` APIVersion string `json:"api_version"` Payload struct { - NamingSchemaVersion string `json:"naming_schema_version"` - ServiceName string `json:"service_name"` - HostName string `json:"host_name"` - Env string `json:"env"` - ServiceLanguage string `json:"service_language"` - ServiceType string `json:"service_type"` - StartTime int64 `json:"start_time"` - LastSeen int64 `json:"last_seen"` - APMInstrumentation string `json:"apm_instrumentation"` - ServiceNameSource string `json:"service_name_source"` - RSSMemory uint64 `json:"rss_memory"` + NamingSchemaVersion string `json:"naming_schema_version"` + ServiceName string `json:"service_name"` + GeneratedServiceName string `json:"generated_service_name"` + DDService string `json:"dd_service,omitempty"` + HostName string `json:"host_name"` + Env string `json:"env"` + ServiceLanguage string `json:"service_language"` + ServiceType string `json:"service_type"` + StartTime int64 `json:"start_time"` + LastSeen int64 `json:"last_seen"` + APMInstrumentation string `json:"apm_instrumentation"` + ServiceNameSource string `json:"service_name_source,omitempty"` + RSSMemory uint64 `json:"rss_memory"` } `json:"payload"` } diff --git a/test/new-e2e/tests/discovery/linux_test.go b/test/new-e2e/tests/discovery/linux_test.go index 2ede187079619..fe0f98a38d142 100644 --- a/test/new-e2e/tests/discovery/linux_test.go +++ b/test/new-e2e/tests/discovery/linux_test.go @@ -92,28 +92,40 @@ func (s *linuxTestSuite) TestServiceDiscoveryCheck() { found := foundMap["json-server"] if assert.NotNil(c, found) { assert.Equal(c, "none", found.Payload.APMInstrumentation) - assert.Equal(c, "generated", found.Payload.ServiceNameSource) + assert.Equal(c, "json-server", found.Payload.ServiceName) + assert.Equal(c, "json-server", found.Payload.GeneratedServiceName) + assert.Empty(c, found.Payload.DDService) + assert.Empty(c, found.Payload.ServiceNameSource) assert.NotZero(c, found.Payload.RSSMemory) } found = foundMap["node-instrumented"] if assert.NotNil(c, found) { assert.Equal(c, "provided", found.Payload.APMInstrumentation) - assert.Equal(c, "generated", found.Payload.ServiceNameSource) + assert.Equal(c, "node-instrumented", found.Payload.ServiceName) + assert.Equal(c, "node-instrumented", found.Payload.GeneratedServiceName) + assert.Empty(c, found.Payload.DDService) + assert.Empty(c, found.Payload.ServiceNameSource) assert.NotZero(c, found.Payload.RSSMemory) } - found = foundMap["python.server"] + found = foundMap["python-svc-dd"] if assert.NotNil(c, found) { assert.Equal(c, "none", found.Payload.APMInstrumentation) - assert.Equal(c, "generated", found.Payload.ServiceNameSource) + assert.Equal(c, "python-svc-dd", found.Payload.ServiceName) + assert.Equal(c, "python.server", found.Payload.GeneratedServiceName) + assert.Equal(c, "python-svc-dd", found.Payload.DDService) + assert.Equal(c, "provided", found.Payload.ServiceNameSource) assert.NotZero(c, found.Payload.RSSMemory) } found = foundMap["python.instrumented"] if assert.NotNil(c, found) { assert.Equal(c, "provided", found.Payload.APMInstrumentation) - assert.Equal(c, "generated", found.Payload.ServiceNameSource) + assert.Equal(c, "python.instrumented", found.Payload.ServiceName) + assert.Equal(c, "python.instrumented", found.Payload.GeneratedServiceName) + assert.Empty(c, found.Payload.DDService) + assert.Empty(c, found.Payload.ServiceNameSource) assert.NotZero(c, found.Payload.RSSMemory) } diff --git a/test/new-e2e/tests/discovery/testdata/provision/provision.sh b/test/new-e2e/tests/discovery/testdata/provision/provision.sh index 9d3c61f06a425..93bb7a0deeec1 100755 --- a/test/new-e2e/tests/discovery/testdata/provision/provision.sh +++ b/test/new-e2e/tests/discovery/testdata/provision/provision.sh @@ -33,6 +33,7 @@ install_systemd_unit () { name=$1 command=$2 port=$3 + extraenv=$4 cat > "/etc/systemd/system/${name}.service" <<- EOM [Unit] @@ -48,6 +49,7 @@ User=root ExecStart=${command} Environment="PORT=${port}" Environment="NODE_VERSION=20" +Environment="${extraenv}" [Install] WantedBy=multi-user.target @@ -55,12 +57,12 @@ EOM } # Node -install_systemd_unit "node-json-server" "$NVM_DIR/nvm-exec npx json-server --port 8084 /home/ubuntu/e2e-test/node/json-server/db.json" "8084" -install_systemd_unit "node-instrumented" "$NVM_DIR/nvm-exec node /home/ubuntu/e2e-test/node/instrumented/server.js" "8085" +install_systemd_unit "node-json-server" "$NVM_DIR/nvm-exec npx json-server --port 8084 /home/ubuntu/e2e-test/node/json-server/db.json" "8084" "" +install_systemd_unit "node-instrumented" "$NVM_DIR/nvm-exec node /home/ubuntu/e2e-test/node/instrumented/server.js" "8085" "" # Python -install_systemd_unit "python-svc" "/usr/bin/python3 /home/ubuntu/e2e-test/python/server.py" "8082" -install_systemd_unit "python-instrumented" "/usr/bin/python3 /home/ubuntu/e2e-test/python/instrumented.py" "8083" +install_systemd_unit "python-svc" "/usr/bin/python3 /home/ubuntu/e2e-test/python/server.py" "8082" "DD_SERVICE=python-svc-dd" +install_systemd_unit "python-instrumented" "/usr/bin/python3 /home/ubuntu/e2e-test/python/instrumented.py" "8083" "" systemctl daemon-reload From 8e16d7ac88b9ad4e884a129681d263a63eede152 Mon Sep 17 00:00:00 2001 From: Rey Abolofia Date: Wed, 4 Sep 2024 19:18:58 +0200 Subject: [PATCH 009/128] Dynamic binary size test comment message based on size change. (#28924) --- .github/workflows/serverless-binary-size.yml | 31 ++++++------- .../serverless_perf/write_message.sh | 46 +++++++++++++++++++ 2 files changed, 59 insertions(+), 18 deletions(-) create mode 100755 test/integration/serverless_perf/write_message.sh diff --git a/.github/workflows/serverless-binary-size.yml b/.github/workflows/serverless-binary-size.yml index 56818495bd7cb..e396e8a78751d 100644 --- a/.github/workflows/serverless-binary-size.yml +++ b/.github/workflows/serverless-binary-size.yml @@ -138,6 +138,18 @@ jobs: name: dependency-graphs path: go/src/github.com/DataDog/datadog-lambda-extension/graphs + - name: Write message + id: write + if: steps.should.outputs.should_run == 'true' + env: + VAR_COLD_START: ${{ steps.compare.outputs.coldstart }} + VAR_DIFF: ${{ steps.compare.outputs.diff }} + VAR_DEPS: ${{ steps.deps.outputs.deps }} + VAR_RUN_ID: ${{ github.run_id }} + run: | + cd go/src/github.com/DataDog/datadog-agent + ./test/integration/serverless_perf/write_message.sh + - name: Post comment uses: marocchino/sticky-pull-request-comment@331f8f5b4215f0445d3c07b4967662a32a2d3e31 # v2.9.0 if: steps.should.outputs.should_run == 'true' @@ -145,21 +157,4 @@ jobs: header: serverless-binary-size hide_and_recreate: true hide_classify: "RESOLVED" - message: | - :warning::rotating_light: Warning, this pull request increases the binary size of serverless extension by ${{ steps.compare.outputs.diff }} bytes. Each MB of binary size increase means about 10ms of additional cold start time, so this pull request would increase cold start time by ${{ steps.compare.outputs.coldstart }}ms. - - If you have questions, we are happy to help, come visit us in the [#serverless](https://dd.slack.com/archives/CBWDFKWV8) slack channel and provide a link to this comment. - -
- Debug info - - These dependencies were added to the serverless extension by this pull request: - - ``` - ${{ steps.deps.outputs.deps }} - ``` - - View dependency graphs for each added dependency in the [artifacts section](https://github.com/DataDog/datadog-agent/actions/runs/${{ github.run_id }}#artifacts) of the github action. - - We suggest you consider adding the `!serverless` build tag to remove any new dependencies not needed in the serverless extension. -
+ path: ${{ steps.write.outputs.filename }} diff --git a/test/integration/serverless_perf/write_message.sh b/test/integration/serverless_perf/write_message.sh new file mode 100755 index 0000000000000..557426e9ac7c3 --- /dev/null +++ b/test/integration/serverless_perf/write_message.sh @@ -0,0 +1,46 @@ +#!/bin/bash -e + +filename="${HOME}/comment.md" +echo "filename=$filename" >> "$GITHUB_OUTPUT" + +if [ "${VAR_COLD_START}" != 0 ]; then + echo -n ":warning::rotating_light: Warning, " > "$filename" +else + echo -n ":inbox_tray: :loudspeaker: Info, " > "$filename" +fi + +cat >> "$filename" << EOL +this pull request increases the binary size of serverless extension by ${VAR_DIFF} bytes. Each MB of binary size increase means about 10ms of additional cold start time, so this pull request would increase cold start time by ${VAR_COLD_START}ms. + +
+Debug info + +If you have questions, we are happy to help, come visit us in the [#serverless](https://dd.slack.com/archives/CBWDFKWV8) slack channel and provide a link to this comment. + +EOL + +if [ -n "$VAR_DEPS" ]; then + cat >> "$filename" << EOL + +These dependencies were added to the serverless extension by this pull request: + +\`\`\` +${VAR_DEPS} +\`\`\` + +View dependency graphs for each added dependency in the [artifacts section](https://github.com/DataDog/datadog-agent/actions/runs/${VAR_RUN_ID}#artifacts) of the github action. + +EOL +fi + +cat >> "$filename" << EOL + +We suggest you consider adding the \`!serverless\` build tag to remove any new dependencies not needed in the serverless extension. + +
+ +EOL + +echo "Will post comment with message:" +echo +cat "$filename" From 4b1ad02123a02d0ba6e65ad65918a12f6207f7d6 Mon Sep 17 00:00:00 2001 From: "John L. Peterson (Jack)" Date: Wed, 4 Sep 2024 13:41:18 -0400 Subject: [PATCH 010/128] convert pkg/util/tagger and comp/core/tagger/types to module (#29008) --- comp/core/tagger/types/go.mod | 88 +++++++++ comp/core/tagger/types/go.sum | 352 ++++++++++++++++++++++++++++++++++ go.mod | 36 ++-- pkg/util/tagger/go.mod | 78 ++++++++ pkg/util/tagger/go.sum | 352 ++++++++++++++++++++++++++++++++++ pkg/util/tagger/tagger.go | 4 +- tasks/modules.py | 2 + 7 files changed, 894 insertions(+), 18 deletions(-) create mode 100644 comp/core/tagger/types/go.mod create mode 100644 comp/core/tagger/types/go.sum create mode 100644 pkg/util/tagger/go.mod create mode 100644 pkg/util/tagger/go.sum diff --git a/comp/core/tagger/types/go.mod b/comp/core/tagger/types/go.mod new file mode 100644 index 0000000000000..5abfecabc88c2 --- /dev/null +++ b/comp/core/tagger/types/go.mod @@ -0,0 +1,88 @@ +module github.com/DataDog/datadog-agent/comp/core/tagger/types + +go 1.22.0 + +replace ( + github.com/DataDog/datadog-agent/comp/api/api/def => ../../../api/api/def + github.com/DataDog/datadog-agent/comp/core/flare/builder => ../../flare/builder + github.com/DataDog/datadog-agent/comp/core/flare/types => ../../flare/types + github.com/DataDog/datadog-agent/comp/core/secrets => ../../secrets + github.com/DataDog/datadog-agent/comp/core/tagger/utils => ../utils + github.com/DataDog/datadog-agent/comp/core/telemetry => ../../telemetry + github.com/DataDog/datadog-agent/comp/def => ../../../def + github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../../../../pkg/collector/check/defaults + github.com/DataDog/datadog-agent/pkg/config/env => ../../../../pkg/config/env + github.com/DataDog/datadog-agent/pkg/config/model => ../../../../pkg/config/model + github.com/DataDog/datadog-agent/pkg/config/setup => ../../../../pkg/config/setup + github.com/DataDog/datadog-agent/pkg/util/executable => ../../../../pkg/util/executable + github.com/DataDog/datadog-agent/pkg/util/filesystem => ../../../../pkg/util/filesystem + github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../../pkg/util/fxutil + github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../../../pkg/util/hostname/validate + github.com/DataDog/datadog-agent/pkg/util/log => ../../../../pkg/util/log + github.com/DataDog/datadog-agent/pkg/util/optional => ../../../../pkg/util/optional + github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../../pkg/util/pointer + github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../../pkg/util/scrubber + github.com/DataDog/datadog-agent/pkg/util/system => ../../../../pkg/util/system + github.com/DataDog/datadog-agent/pkg/util/system/socket => ../../../../pkg/util/system/socket + github.com/DataDog/datadog-agent/pkg/util/tagger => ../../../../pkg/util/tagger + github.com/DataDog/datadog-agent/pkg/util/testutil => ../../../../pkg/util/testutil + github.com/DataDog/datadog-agent/pkg/util/winutil => ../../../../pkg/util/winutil +) + +require ( + github.com/DataDog/datadog-agent/comp/core/tagger/utils v0.56.2 + github.com/DataDog/datadog-agent/pkg/util/tagger v0.56.2 + github.com/stretchr/testify v1.9.0 +) + +require ( + github.com/DataDog/datadog-agent/comp/core/secrets v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.2 // indirect + github.com/DataDog/viper v1.13.5 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 // indirect + github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/magiconair/properties v1.8.1 // indirect + github.com/mitchellh/mapstructure v1.1.2 // indirect + github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect + github.com/pelletier/go-toml v1.2.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/shirou/gopsutil/v3 v3.23.12 // indirect + github.com/shoenig/go-m1cpu v0.1.6 // indirect + github.com/spf13/afero v1.1.2 // indirect + github.com/spf13/cast v1.5.1 // indirect + github.com/spf13/jwalterweatherman v1.0.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect + github.com/yusufpapurcu/wmi v1.2.3 // indirect + go.uber.org/atomic v1.11.0 // indirect + golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect + golang.org/x/mod v0.20.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.24.0 // indirect + golang.org/x/text v0.17.0 // indirect + golang.org/x/tools v0.24.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/comp/core/tagger/types/go.sum b/comp/core/tagger/types/go.sum new file mode 100644 index 0000000000000..77ba213060c82 --- /dev/null +++ b/comp/core/tagger/types/go.sum @@ -0,0 +1,352 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= +github.com/DataDog/viper v1.13.5/go.mod h1:wDdUVJ2SHaMaPrCZrlRCObwkubsX8j5sme3LaR/SGTc= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 h1:S4qyfL2sEm5Budr4KVMyEniCy+PbS55651I/a+Kn/NQ= +github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= +github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4= +github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= +github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= +github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= +github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20200122045848-3419fae592fc/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= +github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= +go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= +go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= +go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= +go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= +go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= +go.opentelemetry.io/otel/sdk/metric v1.27.0 h1:5uGNOlpXi+Hbo/DRoI31BSb1v+OGcpv2NemcCrOL8gI= +go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw= +go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= +go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= +go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.22.2 h1:iPW+OPxv0G8w75OemJ1RAnTUrF55zOJlXlo1TbJ0Buw= +go.uber.org/fx v1.22.2/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= +golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= +golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= +golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= diff --git a/go.mod b/go.mod index 0117a492a0912..4d5c0ef3cdb58 100644 --- a/go.mod +++ b/go.mod @@ -37,6 +37,7 @@ replace ( github.com/DataDog/datadog-agent/comp/core/secrets => ./comp/core/secrets github.com/DataDog/datadog-agent/comp/core/status => ./comp/core/status github.com/DataDog/datadog-agent/comp/core/status/statusimpl => ./comp/core/status/statusimpl + github.com/DataDog/datadog-agent/comp/core/tagger/types => ./comp/core/tagger/types github.com/DataDog/datadog-agent/comp/core/tagger/utils => ./comp/core/tagger/utils github.com/DataDog/datadog-agent/comp/core/telemetry => ./comp/core/telemetry/ github.com/DataDog/datadog-agent/comp/def => ./comp/def/ @@ -129,6 +130,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/statstracker => ./pkg/util/statstracker github.com/DataDog/datadog-agent/pkg/util/system => ./pkg/util/system github.com/DataDog/datadog-agent/pkg/util/system/socket => ./pkg/util/system/socket/ + github.com/DataDog/datadog-agent/pkg/util/tagger => ./pkg/util/tagger github.com/DataDog/datadog-agent/pkg/util/testutil => ./pkg/util/testutil github.com/DataDog/datadog-agent/pkg/util/uuid => ./pkg/util/uuid github.com/DataDog/datadog-agent/pkg/util/winutil => ./pkg/util/winutil/ @@ -147,9 +149,9 @@ require ( github.com/DataDog/datadog-agent/pkg/security/secl v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/trace v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/cgroups v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/log v0.56.2 + github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.2 + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.2 github.com/DataDog/datadog-go/v5 v5.5.0 github.com/DataDog/datadog-operator v1.8.0-rc.1 github.com/DataDog/ebpf-manager v0.7.1 @@ -594,7 +596,7 @@ require ( ) require ( - github.com/DataDog/datadog-agent/comp/core/tagger/utils v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/comp/core/tagger/utils v0.56.2 github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/def v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/impl v0.0.0-00010101000000-000000000000 github.com/containerd/containerd/api v1.7.19 @@ -616,9 +618,10 @@ require ( github.com/DataDog/datadog-agent/comp/core/log/impl v0.0.0-00010101000000-000000000000 github.com/DataDog/datadog-agent/comp/core/log/impl-trace v0.0.0-00010101000000-000000000000 github.com/DataDog/datadog-agent/comp/core/log/mock v0.0.0-00010101000000-000000000000 - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 + github.com/DataDog/datadog-agent/comp/core/secrets v0.56.2 github.com/DataDog/datadog-agent/comp/core/status v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/core/status/statusimpl v0.56.0-rc.3 + github.com/DataDog/datadog-agent/comp/core/tagger/types v0.56.2 github.com/DataDog/datadog-agent/comp/core/telemetry v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder v0.56.0-rc.3 @@ -645,12 +648,12 @@ require ( github.com/DataDog/datadog-agent/comp/trace/compression/impl-zstd v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/aggregator/ckey v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/api v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.2 + github.com/DataDog/datadog-agent/pkg/config/env v0.56.2 github.com/DataDog/datadog-agent/pkg/config/mock v0.58.0-devel - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/config/model v0.56.2 github.com/DataDog/datadog-agent/pkg/config/remote v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/config/setup v0.56.2 github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/errors v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/auditor v0.56.0-rc.3 @@ -680,22 +683,23 @@ require ( github.com/DataDog/datadog-agent/pkg/util/cache v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/common v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/containers/image v0.56.2 - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/executable v0.56.2 + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.2 github.com/DataDog/datadog-agent/pkg/util/flavor v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/grpc v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.2 github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/json v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/log/setup v1.0.0 - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/optional v0.56.2 github.com/DataDog/datadog-agent/pkg/util/sort v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/startstop v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/system v0.56.2 + github.com/DataDog/datadog-agent/pkg/util/tagger v0.56.2 // indirect github.com/DataDog/datadog-agent/pkg/util/testutil v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/uuid v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.2 github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 github.com/DataDog/go-libddwaf/v3 v3.3.0 github.com/DataDog/go-sqllexer v0.0.13 @@ -765,7 +769,7 @@ require ( github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/buf v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.2 // indirect github.com/DataDog/datadog-api-client-go/v2 v2.26.0 // indirect github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.17.0 // indirect diff --git a/pkg/util/tagger/go.mod b/pkg/util/tagger/go.mod new file mode 100644 index 0000000000000..0d482a50ec691 --- /dev/null +++ b/pkg/util/tagger/go.mod @@ -0,0 +1,78 @@ +module github.com/DataDog/datadog-agent/pkg/util/tagger + +go 1.22.0 + +replace ( + github.com/DataDog/datadog-agent/comp/api/api/def => ../../../comp/api/api/def + github.com/DataDog/datadog-agent/comp/core/flare/builder => ../../../comp/core/flare/builder + github.com/DataDog/datadog-agent/comp/core/flare/types => ../../../comp/core/flare/types + github.com/DataDog/datadog-agent/comp/core/secrets => ../../../comp/core/secrets + github.com/DataDog/datadog-agent/comp/core/telemetry => ../../../comp/core/telemetry + github.com/DataDog/datadog-agent/comp/def => ../../../comp/def + github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../../collector/check/defaults + github.com/DataDog/datadog-agent/pkg/config/env => ../../config/env + github.com/DataDog/datadog-agent/pkg/config/model => ../../config/model + github.com/DataDog/datadog-agent/pkg/config/setup => ../../config/setup + github.com/DataDog/datadog-agent/pkg/util/executable => ../../util/executable + github.com/DataDog/datadog-agent/pkg/util/filesystem => ../../util/filesystem + github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../util/fxutil + github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../util/hostname/validate + github.com/DataDog/datadog-agent/pkg/util/log => ../../util/log + github.com/DataDog/datadog-agent/pkg/util/optional => ../../util/optional + github.com/DataDog/datadog-agent/pkg/util/pointer => ../../util/pointer + github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../util/scrubber + github.com/DataDog/datadog-agent/pkg/util/system => ../../util/system + github.com/DataDog/datadog-agent/pkg/util/system/socket => ../../util/system/socket + github.com/DataDog/datadog-agent/pkg/util/testutil => ../../util/testutil + github.com/DataDog/datadog-agent/pkg/util/winutil => ../../util/winutil +) + +require github.com/DataDog/datadog-agent/pkg/config/setup v0.56.2 + +require ( + github.com/DataDog/datadog-agent/comp/core/secrets v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.2 // indirect + github.com/DataDog/viper v1.13.5 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 // indirect + github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/magiconair/properties v1.8.1 // indirect + github.com/mitchellh/mapstructure v1.1.2 // indirect + github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect + github.com/pelletier/go-toml v1.2.0 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/shirou/gopsutil/v3 v3.23.12 // indirect + github.com/shoenig/go-m1cpu v0.1.6 // indirect + github.com/spf13/afero v1.1.2 // indirect + github.com/spf13/cast v1.5.1 // indirect + github.com/spf13/jwalterweatherman v1.0.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect + github.com/yusufpapurcu/wmi v1.2.3 // indirect + go.uber.org/atomic v1.11.0 // indirect + golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect + golang.org/x/mod v0.20.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.24.0 // indirect + golang.org/x/text v0.17.0 // indirect + golang.org/x/tools v0.24.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect +) diff --git a/pkg/util/tagger/go.sum b/pkg/util/tagger/go.sum new file mode 100644 index 0000000000000..77ba213060c82 --- /dev/null +++ b/pkg/util/tagger/go.sum @@ -0,0 +1,352 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= +github.com/DataDog/viper v1.13.5/go.mod h1:wDdUVJ2SHaMaPrCZrlRCObwkubsX8j5sme3LaR/SGTc= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 h1:S4qyfL2sEm5Budr4KVMyEniCy+PbS55651I/a+Kn/NQ= +github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= +github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4= +github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= +github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= +github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= +github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20200122045848-3419fae592fc/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= +github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= +go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= +go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= +go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= +go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= +go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= +go.opentelemetry.io/otel/sdk/metric v1.27.0 h1:5uGNOlpXi+Hbo/DRoI31BSb1v+OGcpv2NemcCrOL8gI= +go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw= +go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= +go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= +go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.22.2 h1:iPW+OPxv0G8w75OemJ1RAnTUrF55zOJlXlo1TbJ0Buw= +go.uber.org/fx v1.22.2/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= +golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= +golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= +golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= diff --git a/pkg/util/tagger/tagger.go b/pkg/util/tagger/tagger.go index 61615355e9747..018c9052b8063 100644 --- a/pkg/util/tagger/tagger.go +++ b/pkg/util/tagger/tagger.go @@ -6,11 +6,11 @@ // Package tagger provides function to check if the tagger should use composite entity id and object store package tagger -import "github.com/DataDog/datadog-agent/pkg/config" +import pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" // ShouldUseCompositeStore indicates whether the tagger should use the default or composite implementation // of entity ID and object store. // TODO: remove this when we switch over fully to the composite implementation func ShouldUseCompositeStore() bool { - return config.Datadog().GetBool("tagger.tagstore_use_composite_entity_id") + return pkgconfigsetup.Datadog().GetBool("tagger.tagstore_use_composite_entity_id") } diff --git a/tasks/modules.py b/tasks/modules.py index 39cbb81e14242..ebed764f57eaf 100644 --- a/tasks/modules.py +++ b/tasks/modules.py @@ -147,6 +147,7 @@ def dependency_path(self, agent_version): "comp/core/secrets": GoModule("comp/core/secrets", independent=True, used_by_otel=True), "comp/core/status": GoModule("comp/core/status", independent=True, used_by_otel=True), "comp/core/status/statusimpl": GoModule("comp/core/status/statusimpl", independent=True), + "comp/core/tagger/types": GoModule("comp/core/tagger/types", independent=True, used_by_otel=True), "comp/core/tagger/utils": GoModule("comp/core/tagger/utils", independent=True, used_by_otel=True), "comp/core/telemetry": GoModule("comp/core/telemetry", independent=True, used_by_otel=True), "comp/def": GoModule("comp/def", independent=True, used_by_otel=True), @@ -275,6 +276,7 @@ def dependency_path(self, agent_version): "pkg/util/statstracker": GoModule("pkg/util/statstracker", independent=True, used_by_otel=True), "pkg/util/system": GoModule("pkg/util/system", independent=True, used_by_otel=True), "pkg/util/system/socket": GoModule("pkg/util/system/socket", independent=True, used_by_otel=True), + "pkg/util/tagger": GoModule("pkg/util/tagger", independent=True, used_by_otel=True), "pkg/util/testutil": GoModule("pkg/util/testutil", independent=True, used_by_otel=True), "pkg/util/uuid": GoModule("pkg/util/uuid", independent=True), "pkg/util/winutil": GoModule("pkg/util/winutil", independent=True, used_by_otel=True), From d89c5c44d6de36137772d80dd97d18bf0f076cfc Mon Sep 17 00:00:00 2001 From: Kevin Fairise <132568982+KevinFairise2@users.noreply.github.com> Date: Wed, 4 Sep 2024 20:49:10 +0200 Subject: [PATCH 011/128] Revert "service discovery: Add check yaml by default" (#29058) --- .../dist/conf.d/service_discovery.d/conf.yaml.default | 2 -- omnibus/config/software/datadog-agent-finalize.rb | 3 --- tasks/agent.py | 1 - .../agent-subcommands/configcheck/configcheck_nix_test.go | 8 +------- 4 files changed, 1 insertion(+), 13 deletions(-) delete mode 100644 cmd/agent/dist/conf.d/service_discovery.d/conf.yaml.default diff --git a/cmd/agent/dist/conf.d/service_discovery.d/conf.yaml.default b/cmd/agent/dist/conf.d/service_discovery.d/conf.yaml.default deleted file mode 100644 index 00d9a2dbba2c8..0000000000000 --- a/cmd/agent/dist/conf.d/service_discovery.d/conf.yaml.default +++ /dev/null @@ -1,2 +0,0 @@ -instances: - - {} diff --git a/omnibus/config/software/datadog-agent-finalize.rb b/omnibus/config/software/datadog-agent-finalize.rb index 2d545f58b498b..b6df0ab46e821 100644 --- a/omnibus/config/software/datadog-agent-finalize.rb +++ b/omnibus/config/software/datadog-agent-finalize.rb @@ -52,9 +52,6 @@ # load isn't supported by windows delete "#{conf_dir}/load.d" - # service_discovery isn't supported by windows - delete "#{conf_dir}/service_discovery.d" - # Remove .pyc files from embedded Python command "del /q /s #{windows_safe_path(install_dir)}\\*.pyc" end diff --git a/tasks/agent.py b/tasks/agent.py index 8474176fc244f..c92f5d8399782 100644 --- a/tasks/agent.py +++ b/tasks/agent.py @@ -76,7 +76,6 @@ "orchestrator_ecs", "cisco_sdwan", "network_path", - "service_discovery", ] WINDOWS_CORECHECKS = [ diff --git a/test/new-e2e/tests/agent-subcommands/configcheck/configcheck_nix_test.go b/test/new-e2e/tests/agent-subcommands/configcheck/configcheck_nix_test.go index 6f6e744e09bcb..74562bfff59eb 100644 --- a/test/new-e2e/tests/agent-subcommands/configcheck/configcheck_nix_test.go +++ b/test/new-e2e/tests/agent-subcommands/configcheck/configcheck_nix_test.go @@ -25,7 +25,7 @@ func TestLinuxConfigCheckSuite(t *testing.T) { e2e.Run(t, &linuxConfigCheckSuite{}, e2e.WithProvisioner(awshost.ProvisionerNoFakeIntake())) } -// cpu, disk, file_handle, io, load, memory, network, ntp, uptime, service_discovery +// cpu, disk, file_handle, io, load, memory, network, ntp, uptime func (v *linuxConfigCheckSuite) TestDefaultInstalledChecks() { testChecks := []CheckConfigOutput{ { @@ -82,12 +82,6 @@ func (v *linuxConfigCheckSuite) TestDefaultInstalledChecks() { InstanceID: "uptime:", Settings: "{}", }, - { - CheckName: "service_discovery", - Filepath: "file:/etc/datadog-agent/conf.d/service_discovery.d/conf.yaml.default", - InstanceID: "service_discovery:", - Settings: "{}", - }, } output := v.Env().Agent.Client.ConfigCheck() From 4c8218dcec1c0da0af493097c76e2a262a047be8 Mon Sep 17 00:00:00 2001 From: Paul Cacheux Date: Wed, 4 Sep 2024 21:53:17 +0200 Subject: [PATCH 012/128] [CWS] fix small possible nil-ptr deref regarding profiled containers telemetry (#29059) --- pkg/security/agent/agent.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/security/agent/agent.go b/pkg/security/agent/agent.go index c582e4a777763..7251fe7dff99e 100644 --- a/pkg/security/agent/agent.go +++ b/pkg/security/agent/agent.go @@ -190,7 +190,7 @@ func (rsa *RuntimeSecurityAgent) DispatchActivityDump(msg *api.ActivityDumpStrea log.Errorf("%v", err) return } - if rsa.telemetry != nil { + if rsa.profContainersTelemetry != nil { // register for telemetry for this container imageName, imageTag := dump.GetImageNameTag() rsa.profContainersTelemetry.registerProfiledContainer(imageName, imageTag) From 7ebe188e1eed1a117d60837f58d50aec74de26fa Mon Sep 17 00:00:00 2001 From: Ken Schneider <103530259+ken-schneider@users.noreply.github.com> Date: Wed, 4 Sep 2024 16:30:32 -0400 Subject: [PATCH 013/128] [NETPATH-294] Update Network Path timeout logic between agent and system-probe (#28685) --- .../conf.d/network_path.d/conf.yaml.example | 12 +++ cmd/system-probe/modules/traceroute.go | 12 +-- cmd/system-probe/modules/traceroute_test.go | 2 +- .../npcollector/npcollectorimpl/config.go | 5 ++ .../npcollectorimpl/config_test.go | 21 +++++ .../npcollectorimpl/npcollector.go | 8 +- .../corechecks/networkpath/config.go | 22 +++-- .../corechecks/networkpath/config_test.go | 89 +++++++++++++++++++ .../corechecks/networkpath/networkpath.go | 2 +- pkg/config/setup/config.go | 5 ++ pkg/config/setup/config_test.go | 16 ++++ pkg/networkpath/traceroute/runner.go | 28 +++--- pkg/networkpath/traceroute/runner_test.go | 3 +- pkg/networkpath/traceroute/traceroute.go | 3 +- .../traceroute/traceroute_linux.go | 2 +- .../traceroute/traceroute_windows.go | 2 +- pkg/process/net/common.go | 27 +++--- 17 files changed, 214 insertions(+), 45 deletions(-) create mode 100644 comp/networkpath/npcollector/npcollectorimpl/config_test.go diff --git a/cmd/agent/dist/conf.d/network_path.d/conf.yaml.example b/cmd/agent/dist/conf.d/network_path.d/conf.yaml.example index 6a1a287f9d267..233f284d2576d 100644 --- a/cmd/agent/dist/conf.d/network_path.d/conf.yaml.example +++ b/cmd/agent/dist/conf.d/network_path.d/conf.yaml.example @@ -5,6 +5,12 @@ init_config: # # min_collection_interval: 60 + ## @param timeout - integer - optional - default: 10000 + ## Specifies how much time the full traceroute should take + ## in milliseconds + # + # timeout: 10000 + # Network Path integration is used to monitor individual endpoints. # Supported platforms are Linux and Windows. macOS is not supported yet. instances: @@ -30,6 +36,12 @@ instances: # # max_ttl: + ## @param timeout - integer - optional - default: 10000 + ## Specifies how much time the full traceroute should take + ## in milliseconds + # + # timeout: 10000 + ## @param min_collection_interval - number - optional - default: 60 ## Specifies how frequently we should probe the endpoint. ## Min collection interval is defined in seconds. diff --git a/cmd/system-probe/modules/traceroute.go b/cmd/system-probe/modules/traceroute.go index 8314fe4dce396..320106472c087 100644 --- a/cmd/system-probe/modules/traceroute.go +++ b/cmd/system-probe/modules/traceroute.go @@ -85,7 +85,7 @@ func (t *traceroute) Register(httpMux *module.Router) error { } runCount := runCounter.Inc() - logTracerouteRequests(cfg.DestHostname, id, runCount, start) + logTracerouteRequests(cfg, id, runCount, start) }) return nil @@ -97,9 +97,9 @@ func (t *traceroute) RegisterGRPC(_ grpc.ServiceRegistrar) error { func (t *traceroute) Close() {} -func logTracerouteRequests(host string, client string, runCount uint64, start time.Time) { - args := []interface{}{host, client, runCount, time.Since(start)} - msg := "Got request on /traceroute/%s?client_id=%s (count: %d): retrieved traceroute in %s" +func logTracerouteRequests(cfg tracerouteutil.Config, client string, runCount uint64, start time.Time) { + args := []interface{}{cfg.DestHostname, client, cfg.DestPort, cfg.MaxTTL, cfg.Timeout, runCount, time.Since(start)} + msg := "Got request on /traceroute/%s?client_id=%s&port=%d&maxTTL=%d&timeout=%d (count: %d): retrieved traceroute in %s" switch { case runCount <= 5, runCount%20 == 0: log.Infof(msg, args...) @@ -119,7 +119,7 @@ func parseParams(req *http.Request) (tracerouteutil.Config, error) { if err != nil { return tracerouteutil.Config{}, fmt.Errorf("invalid max_ttl: %s", err) } - timeout, err := parseUint(req, "timeout", 32) + timeout, err := parseUint(req, "timeout", 64) if err != nil { return tracerouteutil.Config{}, fmt.Errorf("invalid timeout: %s", err) } @@ -129,7 +129,7 @@ func parseParams(req *http.Request) (tracerouteutil.Config, error) { DestHostname: host, DestPort: uint16(port), MaxTTL: uint8(maxTTL), - TimeoutMs: uint(timeout), + Timeout: time.Duration(timeout), Protocol: payload.Protocol(protocol), }, nil } diff --git a/cmd/system-probe/modules/traceroute_test.go b/cmd/system-probe/modules/traceroute_test.go index fcfc929234ddf..31451a241c70b 100644 --- a/cmd/system-probe/modules/traceroute_test.go +++ b/cmd/system-probe/modules/traceroute_test.go @@ -46,7 +46,7 @@ func TestParseParams(t *testing.T) { DestHostname: "1.2.3.4", DestPort: 42, MaxTTL: 35, - TimeoutMs: 1000, + Timeout: 1000, }, }, } diff --git a/comp/networkpath/npcollector/npcollectorimpl/config.go b/comp/networkpath/npcollector/npcollectorimpl/config.go index c752153db7747..3a91df33ad08d 100644 --- a/comp/networkpath/npcollector/npcollectorimpl/config.go +++ b/comp/networkpath/npcollector/npcollectorimpl/config.go @@ -14,6 +14,8 @@ import ( type collectorConfigs struct { connectionsMonitoringEnabled bool workers int + timeout time.Duration + maxTTL int pathtestInputChanSize int pathtestProcessingChanSize int pathtestContextsLimit int @@ -24,9 +26,12 @@ type collectorConfigs struct { } func newConfig(agentConfig config.Component) *collectorConfigs { + return &collectorConfigs{ connectionsMonitoringEnabled: agentConfig.GetBool("network_path.connections_monitoring.enabled"), workers: agentConfig.GetInt("network_path.collector.workers"), + timeout: agentConfig.GetDuration("network_path.collector.timeout") * time.Millisecond, + maxTTL: agentConfig.GetInt("network_path.collector.max_ttl"), pathtestInputChanSize: agentConfig.GetInt("network_path.collector.input_chan_size"), pathtestProcessingChanSize: agentConfig.GetInt("network_path.collector.processing_chan_size"), pathtestContextsLimit: agentConfig.GetInt("network_path.collector.pathtest_contexts_limit"), diff --git a/comp/networkpath/npcollector/npcollectorimpl/config_test.go b/comp/networkpath/npcollector/npcollectorimpl/config_test.go new file mode 100644 index 0000000000000..3be402965e8aa --- /dev/null +++ b/comp/networkpath/npcollector/npcollectorimpl/config_test.go @@ -0,0 +1,21 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. +package npcollectorimpl + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNetworkPathCollectorEnabled(t *testing.T) { + config := &collectorConfigs{ + connectionsMonitoringEnabled: true, + } + assert.True(t, config.networkPathCollectorEnabled()) + + config.connectionsMonitoringEnabled = false + assert.False(t, config.networkPathCollectorEnabled()) +} diff --git a/comp/networkpath/npcollector/npcollectorimpl/npcollector.go b/comp/networkpath/npcollector/npcollectorimpl/npcollector.go index e35cdc9d05c51..f65bce267ee70 100644 --- a/comp/networkpath/npcollector/npcollectorimpl/npcollector.go +++ b/comp/networkpath/npcollector/npcollectorimpl/npcollector.go @@ -75,8 +75,10 @@ func newNoopNpCollectorImpl() *npCollectorImpl { } func newNpCollectorImpl(epForwarder eventplatform.Forwarder, collectorConfigs *collectorConfigs, logger log.Component, telemetrycomp telemetryComp.Component) *npCollectorImpl { - logger.Infof("New NpCollector (workers=%d input_chan_size=%d processing_chan_size=%d pathtest_contexts_limit=%d pathtest_ttl=%s pathtest_interval=%s flush_interval=%s)", + logger.Infof("New NpCollector (workers=%d timeout=%d max_ttl=%d input_chan_size=%d processing_chan_size=%d pathtest_contexts_limit=%d pathtest_ttl=%s pathtest_interval=%s flush_interval=%s)", collectorConfigs.workers, + collectorConfigs.timeout, + collectorConfigs.maxTTL, collectorConfigs.pathtestInputChanSize, collectorConfigs.pathtestProcessingChanSize, collectorConfigs.pathtestContextsLimit, @@ -210,8 +212,8 @@ func (s *npCollectorImpl) runTracerouteForPath(ptest *pathteststore.PathtestCont cfg := traceroute.Config{ DestHostname: ptest.Pathtest.Hostname, DestPort: ptest.Pathtest.Port, - MaxTTL: 0, // TODO: make it configurable, setting 0 to use default value for now - TimeoutMs: 0, // TODO: make it configurable, setting 0 to use default value for now + MaxTTL: uint8(s.collectorConfigs.maxTTL), + Timeout: s.collectorConfigs.timeout, Protocol: ptest.Pathtest.Protocol, } diff --git a/pkg/collector/corechecks/networkpath/config.go b/pkg/collector/corechecks/networkpath/config.go index 5efb936eb0db2..c7b57f6924a6a 100644 --- a/pkg/collector/corechecks/networkpath/config.go +++ b/pkg/collector/corechecks/networkpath/config.go @@ -12,15 +12,19 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" coreconfig "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/networkpath/payload" "gopkg.in/yaml.v2" ) -const defaultCheckInterval time.Duration = 1 * time.Minute +const ( + defaultCheckInterval time.Duration = 1 * time.Minute +) // InitConfig is used to deserialize integration init config type InitConfig struct { - MinCollectionInterval int `yaml:"min_collection_interval"` + MinCollectionInterval int64 `yaml:"min_collection_interval"` + TimeoutMs int64 `yaml:"timeout"` } // InstanceConfig is used to deserialize integration instance config @@ -36,7 +40,7 @@ type InstanceConfig struct { MaxTTL uint8 `yaml:"max_ttl"` - TimeoutMs uint `yaml:"timeout"` // millisecond + TimeoutMs int64 `yaml:"timeout"` MinCollectionInterval int `yaml:"min_collection_interval"` @@ -52,7 +56,7 @@ type CheckConfig struct { DestinationService string MaxTTL uint8 Protocol payload.Protocol - TimeoutMs uint + Timeout time.Duration MinCollectionInterval time.Duration Tags []string Namespace string @@ -80,7 +84,6 @@ func NewCheckConfig(rawInstance integration.Data, rawInitConfig integration.Data c.SourceService = instance.SourceService c.DestinationService = instance.DestinationService c.MaxTTL = instance.MaxTTL - c.TimeoutMs = instance.TimeoutMs c.Protocol = payload.Protocol(strings.ToUpper(instance.Protocol)) c.MinCollectionInterval = firstNonZero( @@ -92,6 +95,15 @@ func NewCheckConfig(rawInstance integration.Data, rawInitConfig integration.Data return nil, fmt.Errorf("min collection interval must be > 0") } + c.Timeout = firstNonZero( + time.Duration(instance.TimeoutMs)*time.Millisecond, + time.Duration(initConfig.TimeoutMs)*time.Millisecond, + setup.DefaultNetworkPathTimeout*time.Millisecond, + ) + if c.Timeout <= 0 { + return nil, fmt.Errorf("timeout must be > 0") + } + c.Tags = instance.Tags c.Namespace = coreconfig.Datadog().GetString("network_devices.namespace") diff --git a/pkg/collector/corechecks/networkpath/config_test.go b/pkg/collector/corechecks/networkpath/config_test.go index 6d058f2c5f0ec..e34be2bb11cb9 100644 --- a/pkg/collector/corechecks/networkpath/config_test.go +++ b/pkg/collector/corechecks/networkpath/config_test.go @@ -11,6 +11,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" coreconfig "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/networkpath/payload" "github.com/stretchr/testify/assert" ) @@ -34,6 +35,7 @@ hostname: 1.2.3.4 DestHostname: "1.2.3.4", MinCollectionInterval: time.Duration(60) * time.Second, Namespace: "my-namespace", + Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond, }, }, { @@ -68,6 +70,7 @@ min_collection_interval: 10 DestHostname: "1.2.3.4", MinCollectionInterval: time.Duration(42) * time.Second, Namespace: "my-namespace", + Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond, }, }, { @@ -82,6 +85,7 @@ min_collection_interval: 10 DestHostname: "1.2.3.4", MinCollectionInterval: time.Duration(10) * time.Second, Namespace: "my-namespace", + Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond, }, }, { @@ -93,6 +97,7 @@ hostname: 1.2.3.4 DestHostname: "1.2.3.4", MinCollectionInterval: time.Duration(1) * time.Minute, Namespace: "my-namespace", + Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond, }, }, { @@ -109,6 +114,7 @@ destination_service: service-b DestinationService: "service-b", MinCollectionInterval: time.Duration(60) * time.Second, Namespace: "my-namespace", + Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond, }, }, { @@ -123,6 +129,7 @@ protocol: udp MinCollectionInterval: time.Duration(60) * time.Second, Namespace: "my-namespace", Protocol: payload.ProtocolUDP, + Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond, }, }, { @@ -137,6 +144,7 @@ protocol: UDP MinCollectionInterval: time.Duration(60) * time.Second, Namespace: "my-namespace", Protocol: payload.ProtocolUDP, + Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond, }, }, { @@ -151,8 +159,89 @@ protocol: TCP MinCollectionInterval: time.Duration(60) * time.Second, Namespace: "my-namespace", Protocol: payload.ProtocolTCP, + Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond, }, }, + { + name: "timeout from instance config", + rawInstance: []byte(` +hostname: 1.2.3.4 +timeout: 50000 +min_collection_interval: 42 +`), + rawInitConfig: []byte(` +min_collection_interval: 10 +`), + expectedConfig: &CheckConfig{ + DestHostname: "1.2.3.4", + MinCollectionInterval: time.Duration(42) * time.Second, + Namespace: "my-namespace", + Timeout: 50000 * time.Millisecond, + }, + }, + { + name: "timeout from instance config preferred over init config", + rawInstance: []byte(` +hostname: 1.2.3.4 +timeout: 50000 +min_collection_interval: 42 +`), + rawInitConfig: []byte(` +min_collection_interval: 10 +timeout: 70000 +`), + expectedConfig: &CheckConfig{ + DestHostname: "1.2.3.4", + MinCollectionInterval: time.Duration(42) * time.Second, + Namespace: "my-namespace", + Timeout: 50000 * time.Millisecond, + }, + }, + { + name: "timeout from init config", + rawInstance: []byte(` +hostname: 1.2.3.4 +min_collection_interval: 42 +`), + rawInitConfig: []byte(` +min_collection_interval: 10 +timeout: 70000 +`), + expectedConfig: &CheckConfig{ + DestHostname: "1.2.3.4", + MinCollectionInterval: time.Duration(42) * time.Second, + Namespace: "my-namespace", + Timeout: 70000 * time.Millisecond, + }, + }, + { + name: "default timeout", + rawInstance: []byte(` +hostname: 1.2.3.4 +min_collection_interval: 42 +`), + rawInitConfig: []byte(` +min_collection_interval: 10 +`), + expectedConfig: &CheckConfig{ + DestHostname: "1.2.3.4", + MinCollectionInterval: time.Duration(42) * time.Second, + Namespace: "my-namespace", + Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond, + }, + }, + { + name: "negative timeout returns an error", + rawInstance: []byte(` +hostname: 1.2.3.4 +min_collection_interval: 42 +`), + rawInitConfig: []byte(` +min_collection_interval: 10 +timeout: -1 +`), + expectedError: "timeout must be > 0", + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/pkg/collector/corechecks/networkpath/networkpath.go b/pkg/collector/corechecks/networkpath/networkpath.go index b1e9b735e1eab..e7ba9465f268b 100644 --- a/pkg/collector/corechecks/networkpath/networkpath.go +++ b/pkg/collector/corechecks/networkpath/networkpath.go @@ -54,7 +54,7 @@ func (c *Check) Run() error { DestHostname: c.config.DestHostname, DestPort: c.config.DestPort, MaxTTL: c.config.MaxTTL, - TimeoutMs: c.config.TimeoutMs, + Timeout: c.config.Timeout, Protocol: c.config.Protocol, } diff --git a/pkg/config/setup/config.go b/pkg/config/setup/config.go index 2cf2a4bdfb4b1..44c3efd418475 100644 --- a/pkg/config/setup/config.go +++ b/pkg/config/setup/config.go @@ -103,6 +103,9 @@ const ( // DefaultMaxMessageSizeBytes is the default value for max_message_size_bytes // If a log message is larger than this byte limit, the overflow bytes will be truncated. DefaultMaxMessageSizeBytes = 256 * 1000 + + // DefaultNetworkPathTimeout defines the default timeout for a network path test + DefaultNetworkPathTimeout = 10000 ) // datadog is the global configuration object @@ -433,6 +436,8 @@ func InitConfig(config pkgconfigmodel.Config) { // Network Path config.BindEnvAndSetDefault("network_path.connections_monitoring.enabled", false) config.BindEnvAndSetDefault("network_path.collector.workers", 4) + config.BindEnvAndSetDefault("network_path.collector.timeout", DefaultNetworkPathTimeout) + config.BindEnvAndSetDefault("network_path.collector.max_ttl", 30) config.BindEnvAndSetDefault("network_path.collector.input_chan_size", 1000) config.BindEnvAndSetDefault("network_path.collector.processing_chan_size", 1000) config.BindEnvAndSetDefault("network_path.collector.pathtest_contexts_limit", 10000) diff --git a/pkg/config/setup/config_test.go b/pkg/config/setup/config_test.go index 3598e7b8cd5c7..d2b59e285c2a0 100644 --- a/pkg/config/setup/config_test.go +++ b/pkg/config/setup/config_test.go @@ -659,6 +659,22 @@ network_devices: assert.Equal(t, "dev", config.GetString("network_devices.namespace")) } +func TestNetworkPathDefaults(t *testing.T) { + datadogYaml := "" + config := confFromYAML(t, datadogYaml) + + assert.Equal(t, false, config.GetBool("network_path.connections_monitoring.enabled")) + assert.Equal(t, 4, config.GetInt("network_path.collector.workers")) + assert.Equal(t, 10000, config.GetInt("network_path.collector.timeout")) + assert.Equal(t, 30, config.GetInt("network_path.collector.max_ttl")) + assert.Equal(t, 1000, config.GetInt("network_path.collector.input_chan_size")) + assert.Equal(t, 1000, config.GetInt("network_path.collector.processing_chan_size")) + assert.Equal(t, 10000, config.GetInt("network_path.collector.pathtest_contexts_limit")) + assert.Equal(t, 15*time.Minute, config.GetDuration("network_path.collector.pathtest_ttl")) + assert.Equal(t, 5*time.Minute, config.GetDuration("network_path.collector.pathtest_interval")) + assert.Equal(t, 10*time.Second, config.GetDuration("network_path.collector.flush_interval")) +} + func TestUsePodmanLogsAndDockerPathOverride(t *testing.T) { // If use_podman_logs is true and docker_path_override is set, the config should return an error datadogYaml := ` diff --git a/pkg/networkpath/traceroute/runner.go b/pkg/networkpath/traceroute/runner.go index 940ea9a7344e7..cf57bb40f73d0 100644 --- a/pkg/networkpath/traceroute/runner.go +++ b/pkg/networkpath/traceroute/runner.go @@ -20,6 +20,7 @@ import ( "github.com/vishvananda/netns" telemetryComponent "github.com/DataDog/datadog-agent/comp/core/telemetry" + "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/network" "github.com/DataDog/datadog-agent/pkg/networkpath/payload" "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/tcp" @@ -32,15 +33,20 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/log" ) -// TODO: are these good defaults? const ( - DefaultSourcePort = 12345 - DefaultDestPort = 33434 - DefaultNumPaths = 1 - DefaultMinTTL = 1 - DefaultMaxTTL = 30 - DefaultDelay = 50 //msec - DefaultReadTimeout = 10 * time.Second + // DefaultSourcePort defines the default source port + DefaultSourcePort = 12345 + // DefaultDestPort defines the default destination port + DefaultDestPort = 33434 + // DefaultNumPaths defines the default number of paths + DefaultNumPaths = 1 + // DefaultMinTTL defines the default minimum TTL + DefaultMinTTL = 1 + // DefaultMaxTTL defines the default maximum TTL + DefaultMaxTTL = 30 + // DefaultDelay defines the default delay + DefaultDelay = 50 //msec + // DefaultOutputFormat defines the default output format DefaultOutputFormat = "json" tracerouteRunnerModuleName = "traceroute_runner__" @@ -115,10 +121,10 @@ func (r *Runner) RunTraceroute(ctx context.Context, cfg Config) (payload.Network } var timeout time.Duration - if cfg.TimeoutMs == 0 { - timeout = DefaultReadTimeout + if cfg.Timeout == 0 { + timeout = setup.DefaultNetworkPathTimeout * time.Millisecond } else { - timeout = time.Duration(cfg.TimeoutMs) * time.Millisecond + timeout = cfg.Timeout } hname, err := hostname.Get(ctx) diff --git a/pkg/networkpath/traceroute/runner_test.go b/pkg/networkpath/traceroute/runner_test.go index c3ef189cca095..86f7b99f9c68a 100644 --- a/pkg/networkpath/traceroute/runner_test.go +++ b/pkg/networkpath/traceroute/runner_test.go @@ -6,8 +6,9 @@ package traceroute import ( - "github.com/stretchr/testify/assert" "testing" + + "github.com/stretchr/testify/assert" ) func TestGetPorts(t *testing.T) { diff --git a/pkg/networkpath/traceroute/traceroute.go b/pkg/networkpath/traceroute/traceroute.go index e23ac10ca64f3..5fb757fc88f71 100644 --- a/pkg/networkpath/traceroute/traceroute.go +++ b/pkg/networkpath/traceroute/traceroute.go @@ -8,6 +8,7 @@ package traceroute import ( "context" + "time" "github.com/DataDog/datadog-agent/pkg/networkpath/payload" ) @@ -30,7 +31,7 @@ type ( // Max number of hops to try MaxTTL uint8 // TODO: do we want to expose this? - TimeoutMs uint + Timeout time.Duration // Protocol is the protocol to use // for traceroute, default is UDP Protocol payload.Protocol diff --git a/pkg/networkpath/traceroute/traceroute_linux.go b/pkg/networkpath/traceroute/traceroute_linux.go index dd97f341c0a73..1e4e0f10321cb 100644 --- a/pkg/networkpath/traceroute/traceroute_linux.go +++ b/pkg/networkpath/traceroute/traceroute_linux.go @@ -47,7 +47,7 @@ func (l *LinuxTraceroute) Run(_ context.Context) (payload.NetworkPath, error) { return payload.NetworkPath{}, err } - resp, err := tu.GetTraceroute(clientID, l.cfg.DestHostname, l.cfg.DestPort, l.cfg.Protocol, l.cfg.MaxTTL, l.cfg.TimeoutMs) + resp, err := tu.GetTraceroute(clientID, l.cfg.DestHostname, l.cfg.DestPort, l.cfg.Protocol, l.cfg.MaxTTL, l.cfg.Timeout) if err != nil { return payload.NetworkPath{}, err } diff --git a/pkg/networkpath/traceroute/traceroute_windows.go b/pkg/networkpath/traceroute/traceroute_windows.go index d84089ca5a752..f6e1702121b3b 100644 --- a/pkg/networkpath/traceroute/traceroute_windows.go +++ b/pkg/networkpath/traceroute/traceroute_windows.go @@ -46,7 +46,7 @@ func (w *WindowsTraceroute) Run(_ context.Context) (payload.NetworkPath, error) log.Warnf("could not initialize system-probe connection: %s", err.Error()) return payload.NetworkPath{}, err } - resp, err := tu.GetTraceroute(clientID, w.cfg.DestHostname, w.cfg.DestPort, w.cfg.Protocol, w.cfg.MaxTTL, w.cfg.TimeoutMs) + resp, err := tu.GetTraceroute(clientID, w.cfg.DestHostname, w.cfg.DestPort, w.cfg.Protocol, w.cfg.MaxTTL, w.cfg.Timeout) if err != nil { return payload.NetworkPath{}, err } diff --git a/pkg/process/net/common.go b/pkg/process/net/common.go index 4ccf712cbd804..a9b7a64430143 100644 --- a/pkg/process/net/common.go +++ b/pkg/process/net/common.go @@ -58,10 +58,10 @@ type RemoteSysProbeUtil struct { // Retrier used to setup system probe initRetry retry.Retrier - path string - httpClient http.Client - pprofClient http.Client - extendedTimeoutClient http.Client + path string + httpClient http.Client + pprofClient http.Client + tracerouteClient http.Client } // GetRemoteSystemProbeUtil returns a ready to use RemoteSysProbeUtil. It is backed by a shared singleton. @@ -199,14 +199,17 @@ func (r *RemoteSysProbeUtil) GetPing(clientID string, host string, count int, in } // GetTraceroute returns the results of a traceroute to a host -func (r *RemoteSysProbeUtil) GetTraceroute(clientID string, host string, port uint16, protocol nppayload.Protocol, maxTTL uint8, timeout uint) ([]byte, error) { - req, err := http.NewRequest("GET", fmt.Sprintf("%s/%s?client_id=%s&port=%d&max_ttl=%d&timeout=%d&protocol=%s", tracerouteURL, host, clientID, port, maxTTL, timeout, protocol), nil) +func (r *RemoteSysProbeUtil) GetTraceroute(clientID string, host string, port uint16, protocol nppayload.Protocol, maxTTL uint8, timeout time.Duration) ([]byte, error) { + ctx, cancel := context.WithTimeout(context.Background(), timeout+10*time.Second) // allow extra time for the system probe communication overhead + defer cancel() + + req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/%s?client_id=%s&port=%d&max_ttl=%d&timeout=%d&protocol=%s", tracerouteURL, host, clientID, port, maxTTL, timeout, protocol), nil) if err != nil { return nil, err } req.Header.Set("Accept", "application/json") - resp, err := r.extendedTimeoutClient.Do(req) + resp, err := r.tracerouteClient.Do(req) if err != nil { return nil, err } @@ -303,17 +306,13 @@ func newSystemProbe(path string) *RemoteSysProbeUtil { }, }, }, - extendedTimeoutClient: http.Client{ - Timeout: 60 * time.Second, + tracerouteClient: http.Client{ + // no timeout set here, the expected usage of this client + // is that the caller will set a timeout on each request Transport: &http.Transport{ - MaxIdleConns: 2, - IdleConnTimeout: 30 * time.Second, DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { return net.Dial(netType, path) }, - TLSHandshakeTimeout: 1 * time.Second, - ResponseHeaderTimeout: 50 * time.Second, - ExpectContinueTimeout: 50 * time.Millisecond, }, }, } From 1fae63c3635d9398bbe32acc0fad41042b9fe91e Mon Sep 17 00:00:00 2001 From: Scott Opell Date: Wed, 4 Sep 2024 17:09:27 -0400 Subject: [PATCH 014/128] Increases the idle rss limit to account for the default checks (#29063) --- test/regression/cases/idle/experiment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/regression/cases/idle/experiment.yaml b/test/regression/cases/idle/experiment.yaml index 8cbad50293983..07fdaa0c45fc3 100644 --- a/test/regression/cases/idle/experiment.yaml +++ b/test/regression/cases/idle/experiment.yaml @@ -32,4 +32,4 @@ checks: description: "Memory usage quality gate. This puts a bound on the total agent memory usage." bounds: series: total_rss_bytes - upper_bound: "424.0 MiB" + upper_bound: "465.0 MiB" From b938bcfa3bcd23b4bac6a598e2c486a5414566d8 Mon Sep 17 00:00:00 2001 From: Ethan Wood-Thomas Date: Wed, 4 Sep 2024 17:12:46 -0400 Subject: [PATCH 015/128] [CONTINT-4408] add tags to events for all kubernetes resource types (#28868) --- .../kubernetesapiserver/events_common.go | 33 +++++++++- .../kubernetesapiserver/events_common_test.go | 64 +++++++++++++++++++ .../kubernetesapiserver/unbundled_events.go | 21 +----- .../unbundled_events_test.go | 15 +---- 4 files changed, 99 insertions(+), 34 deletions(-) diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common.go index 2509961cdcd01..7054a232e7282 100644 --- a/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common.go +++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common.go @@ -260,13 +260,39 @@ func getInvolvedObjectTags(involvedObject v1.ObjectReference, taggerInstance tag fmt.Sprintf("namespace:%s", involvedObject.Namespace), ) - namespaceEntityID := fmt.Sprintf("kubernetes_metadata://%s", string(util.GenerateKubeMetadataEntityID("", "namespaces", "", involvedObject.Namespace))) + namespaceEntityID := types.NewEntityID(types.KubernetesMetadata, string(util.GenerateKubeMetadataEntityID("", "namespaces", "", involvedObject.Namespace))).String() namespaceEntity, err := taggerInstance.GetEntity(namespaceEntityID) if err == nil { tagList = append(tagList, namespaceEntity.GetTags(types.HighCardinality)...) } } + var entityID string + + switch involvedObject.Kind { + case podKind: + entityID = types.NewEntityID(types.KubernetesPodUID, string(involvedObject.UID)).String() + case deploymentKind: + entityID = types.NewEntityID(types.KubernetesDeployment, fmt.Sprintf("%s/%s", involvedObject.Namespace, involvedObject.Name)).String() + default: + var apiGroup string + apiVersionParts := strings.Split(involvedObject.APIVersion, "/") + if len(apiVersionParts) == 2 { + apiGroup = apiVersionParts[0] + } else { + apiGroup = "" + } + resourceType := strings.ToLower(involvedObject.Kind) + "s" + entityID = types.NewEntityID(types.KubernetesMetadata, string(util.GenerateKubeMetadataEntityID(apiGroup, resourceType, involvedObject.Namespace, involvedObject.Name))).String() + } + + entity, err := taggerInstance.GetEntity(entityID) + if err == nil { + tagList = append(tagList, entity.GetTags(types.HighCardinality)...) + } else { + log.Debugf("error getting entity for entity ID '%s': tags may be missing", entityID) + } + kindTag := getKindTag(involvedObject.Kind, involvedObject.Name) if kindTag != "" { tagList = append(tagList, kindTag) @@ -276,8 +302,9 @@ func getInvolvedObjectTags(involvedObject v1.ObjectReference, taggerInstance tag } const ( - podKind = "Pod" - nodeKind = "Node" + podKind = "Pod" + nodeKind = "Node" + deploymentKind = "Deployment" ) func getEventHostInfo(clusterName string, ev *v1.Event) eventHostInfo { diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common_test.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common_test.go index 693751cb20d14..34aff34230dda 100644 --- a/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common_test.go +++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common_test.go @@ -59,7 +59,11 @@ func Test_getInvolvedObjectTags(t *testing.T) { telemetryStore := telemetry.NewStore(telemetryComponent) cfg := configmock.New(t) taggerInstance := local.NewFakeTagger(cfg, telemetryStore) + taggerInstance.SetTags("kubernetes_pod_uid://nginx", "workloadmeta-kubernetes_pod", nil, []string{"additional_pod_tag:nginx"}, nil, nil) + taggerInstance.SetTags("deployment://workload-redis/my-deployment-1", "workloadmeta-kubernetes_deployment", nil, []string{"deployment_tag:redis-1"}, nil, nil) + taggerInstance.SetTags("deployment://default/my-deployment-2", "workloadmeta-kubernetes_deployment", nil, []string{"deployment_tag:redis-2"}, nil, nil) taggerInstance.SetTags("kubernetes_metadata:///namespaces//default", "workloadmeta-kubernetes_node", []string{"team:container-int"}, nil, nil, nil) + taggerInstance.SetTags("kubernetes_metadata://api-group/resourcetypes/default/generic-resource", "workloadmeta-kubernetes_resource", []string{"generic_tag:generic-resource"}, nil, nil, nil) tests := []struct { name string involvedObject v1.ObjectReference @@ -68,6 +72,7 @@ func Test_getInvolvedObjectTags(t *testing.T) { { name: "get pod basic tags", involvedObject: v1.ObjectReference{ + UID: "nginx", Kind: "Pod", Name: "my-pod", Namespace: "my-namespace", @@ -80,11 +85,13 @@ func Test_getInvolvedObjectTags(t *testing.T) { "kube_namespace:my-namespace", "namespace:my-namespace", "pod_name:my-pod", + "additional_pod_tag:nginx", }, }, { name: "get pod namespace tags", involvedObject: v1.ObjectReference{ + UID: "nginx", Kind: "Pod", Name: "my-pod", Namespace: "default", @@ -98,6 +105,63 @@ func Test_getInvolvedObjectTags(t *testing.T) { "namespace:default", "team:container-int", // this tag is coming from the namespace "pod_name:my-pod", + "additional_pod_tag:nginx", + }, + }, + { + name: "get deployment basic tags", + involvedObject: v1.ObjectReference{ + Kind: "Deployment", + Name: "my-deployment-1", + Namespace: "workload-redis", + }, + tags: []string{ + "kube_kind:Deployment", + "kube_name:my-deployment-1", + "kubernetes_kind:Deployment", + "name:my-deployment-1", + "kube_namespace:workload-redis", + "namespace:workload-redis", + "kube_deployment:my-deployment-1", + "deployment_tag:redis-1", + }, + }, + { + name: "get deployment namespace tags", + involvedObject: v1.ObjectReference{ + Kind: "Deployment", + Name: "my-deployment-2", + Namespace: "default", + }, + tags: []string{ + "kube_kind:Deployment", + "kube_name:my-deployment-2", + "kubernetes_kind:Deployment", + "name:my-deployment-2", + "kube_namespace:default", + "namespace:default", + "kube_deployment:my-deployment-2", + "team:container-int", // this tag is coming from the namespace + "deployment_tag:redis-2", + }, + }, + { + name: "get tags for any metadata resource", + involvedObject: v1.ObjectReference{ + Kind: "ResourceType", + Name: "generic-resource", + Namespace: "default", + APIVersion: "api-group/v1", + }, + tags: []string{ + "kube_kind:ResourceType", + "kube_name:generic-resource", + "kubernetes_kind:ResourceType", + "name:generic-resource", + "kube_namespace:default", + "namespace:default", + "team:container-int", // this tag is coming from the namespace + "generic_tag:generic-resource", }, }, } diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/unbundled_events.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/unbundled_events.go index d6ae0ca1a4a0c..8cd906d244ff3 100644 --- a/pkg/collector/corechecks/cluster/kubernetesapiserver/unbundled_events.go +++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/unbundled_events.go @@ -151,15 +151,14 @@ func (c *unbundledTransformer) buildEventTags(ev *v1.Event, involvedObject v1.Ob tagsAccumulator.Append(getInvolvedObjectTags(involvedObject, c.taggerInstance)...) // Finally tags from the tagger - c.getTagsFromTagger(involvedObject, tagsAccumulator) + c.getTagsFromTagger(tagsAccumulator) tagsAccumulator.SortUniq() return tagsAccumulator.Get() } -// getTagsFromTagger add to the TagsAccumulator associated object tags from the tagger. -// For now only Pod object kind is supported. -func (c *unbundledTransformer) getTagsFromTagger(obj v1.ObjectReference, tagsAcc tagset.TagsAccumulator) { +// getTagsFromTagger add to the TagsAccumulator global tags from the tagger +func (c *unbundledTransformer) getTagsFromTagger(tagsAcc tagset.TagsAccumulator) { if c.taggerInstance == nil { return } @@ -169,20 +168,6 @@ func (c *unbundledTransformer) getTagsFromTagger(obj v1.ObjectReference, tagsAcc log.Debugf("error getting global tags: %s", err) } tagsAcc.Append(globalTags...) - - switch obj.Kind { - case podKind: - entityID := fmt.Sprintf("kubernetes_pod_uid://%s", obj.UID) - entity, err := c.taggerInstance.GetEntity(entityID) - if err == nil { - // we can get high Cardinality because tags on events is seemless. - tagsAcc.Append(entity.GetTags(types.HighCardinality)...) - } else { - log.Debugf("error getting pod entity for entity ID: %s, pod tags may be missing", err) - } - - default: - } } func (c *unbundledTransformer) shouldCollect(ev *v1.Event) bool { diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/unbundled_events_test.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/unbundled_events_test.go index 0c1e03893b675..768f0812e3460 100644 --- a/pkg/collector/corechecks/cluster/kubernetesapiserver/unbundled_events_test.go +++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/unbundled_events_test.go @@ -901,7 +901,6 @@ func TestUnbundledEventsTransformFiltering(t *testing.T) { func TestGetTagsFromTagger(t *testing.T) { taggerInstance := taggerimpl.SetupFakeTagger(t) - taggerInstance.SetTags("kubernetes_pod_uid://nginx", "workloadmeta-kubernetes_pod", nil, []string{"pod_name:nginx"}, nil, nil) taggerInstance.SetGlobalTags([]string{"global:here"}, nil, nil, nil) tests := []struct { @@ -910,7 +909,7 @@ func TestGetTagsFromTagger(t *testing.T) { expectedTags *tagset.HashlessTagsAccumulator }{ { - name: "accumulates basic pod tags", + name: "accumulates global tags", obj: v1.ObjectReference{ UID: "redis", Kind: "Pod", @@ -919,16 +918,6 @@ func TestGetTagsFromTagger(t *testing.T) { }, expectedTags: tagset.NewHashlessTagsAccumulatorFromSlice([]string{"global:here"}), }, - { - name: "add tagger pod tags", - obj: v1.ObjectReference{ - UID: "nginx", - Kind: "Pod", - Namespace: "default", - Name: "nginx", - }, - expectedTags: tagset.NewHashlessTagsAccumulatorFromSlice([]string{"global:here", "pod_name:nginx"}), - }, } for _, tt := range tests { @@ -938,7 +927,7 @@ func TestGetTagsFromTagger(t *testing.T) { } transformer := newUnbundledTransformer("test-cluster", taggerInstance, collectedTypes, false, false) accumulator := tagset.NewHashlessTagsAccumulator() - transformer.(*unbundledTransformer).getTagsFromTagger(tt.obj, accumulator) + transformer.(*unbundledTransformer).getTagsFromTagger(accumulator) assert.Equal(t, tt.expectedTags, accumulator) }) } From 017da1d0a50a274461e14c62cbed510ad9555b28 Mon Sep 17 00:00:00 2001 From: Derek Brown Date: Wed, 4 Sep 2024 15:03:02 -0700 Subject: [PATCH 016/128] [windows] Update release.json to include fixes to driver installation merge modules (#29032) Co-authored-by: DeForest Richards <56796055+drichards-87@users.noreply.github.com> --- .gitlab/e2e_install_packages/windows.yml | 2 ++ release.json | 24 +++++++++---------- ...stallationhandleleak-346543b8c5f21303.yaml | 11 +++++++++ 3 files changed, 25 insertions(+), 12 deletions(-) create mode 100644 releasenotes/notes/fixdriverinstallationhandleleak-346543b8c5f21303.yaml diff --git a/.gitlab/e2e_install_packages/windows.yml b/.gitlab/e2e_install_packages/windows.yml index ccf453543998c..70603614abca2 100644 --- a/.gitlab/e2e_install_packages/windows.yml +++ b/.gitlab/e2e_install_packages/windows.yml @@ -39,6 +39,7 @@ - E2E_MSI_TEST: TestInstall - E2E_MSI_TEST: TestRepair - E2E_MSI_TEST: TestUpgrade + - E2E_MSI_TEST: TestUpgradeRollback - E2E_MSI_TEST: TestUpgradeRollbackWithoutCWS - E2E_MSI_TEST: TestUpgradeChangeUser - E2E_MSI_TEST: TestUpgradeFromV5 @@ -61,6 +62,7 @@ - E2E_MSI_TEST: TestInstall - E2E_MSI_TEST: TestRepair - E2E_MSI_TEST: TestUpgrade + - E2E_MSI_TEST: TestUpgradeRollback - E2E_MSI_TEST: TestUpgradeRollbackWithoutCWS - E2E_MSI_TEST: TestUpgradeChangeUser - E2E_MSI_TEST: TestUpgradeFromV5 diff --git a/release.json b/release.json index c6e75e4722f0d..6d80d1d5fccc1 100644 --- a/release.json +++ b/release.json @@ -13,16 +13,16 @@ "JMXFETCH_HASH": "258085a94d529a6bdf914db36dd50faf6fde2cebc44b1f54a60eb209a5d8917c", "MACOS_BUILD_VERSION": "master", "WINDOWS_DDNPM_DRIVER": "release-signed", - "WINDOWS_DDNPM_VERSION": "2.7.0", - "WINDOWS_DDNPM_SHASUM": "de6a2f437b906d1d0f3cfc9222c7f686b3d69726355c940476448a34535064c8", + "WINDOWS_DDNPM_VERSION": "2.7.1", + "WINDOWS_DDNPM_SHASUM": "0f4665761324e1fef1c21651be5b70e79c72b5e7e5662d74619e7db2b27d5bc2", "SECURITY_AGENT_POLICIES_VERSION": "master", "WINDOWS_DDPROCMON_DRIVER": "release-signed", - "WINDOWS_DDPROCMON_VERSION": "1.0.2", - "WINDOWS_DDPROCMON_SHASUM": "cf55e5163659dbbfac0c0cced6559a3042107da9e4df8140bea17067278061ab", + "WINDOWS_DDPROCMON_VERSION": "1.0.4", + "WINDOWS_DDPROCMON_SHASUM": "3a23804adc7280390aabc01f0b709853755baa111f821f99627cd661ee917490", "WINDOWS_APMINJECT_COMMENT": "The WINDOWS_APMINJECT entries below should NOT be added to the release targets", "WINDOWS_APMINJECT_MODULE": "release-signed", - "WINDOWS_APMINJECT_VERSION": "1.1.2", - "WINDOWS_APMINJECT_SHASUM": "27d85ab3a26c123b2655a838b0bec099268de2f2b86d2b8a74232e65f4f8f05f" + "WINDOWS_APMINJECT_VERSION": "1.1.3", + "WINDOWS_APMINJECT_SHASUM": "5fdd62a84e640204386b9c28dc2e3ac5d9b8adde6427cb9f5914619f94d7b5bd" }, "nightly-a7": { "INTEGRATIONS_CORE_VERSION": "master", @@ -32,16 +32,16 @@ "JMXFETCH_HASH": "258085a94d529a6bdf914db36dd50faf6fde2cebc44b1f54a60eb209a5d8917c", "MACOS_BUILD_VERSION": "master", "WINDOWS_DDNPM_DRIVER": "release-signed", - "WINDOWS_DDNPM_VERSION": "2.7.0", - "WINDOWS_DDNPM_SHASUM": "de6a2f437b906d1d0f3cfc9222c7f686b3d69726355c940476448a34535064c8", + "WINDOWS_DDNPM_VERSION": "2.7.1", + "WINDOWS_DDNPM_SHASUM": "0f4665761324e1fef1c21651be5b70e79c72b5e7e5662d74619e7db2b27d5bc2", "SECURITY_AGENT_POLICIES_VERSION": "master", "WINDOWS_DDPROCMON_DRIVER": "release-signed", - "WINDOWS_DDPROCMON_VERSION": "1.0.2", - "WINDOWS_DDPROCMON_SHASUM": "cf55e5163659dbbfac0c0cced6559a3042107da9e4df8140bea17067278061ab", + "WINDOWS_DDPROCMON_VERSION": "1.0.4", + "WINDOWS_DDPROCMON_SHASUM": "3a23804adc7280390aabc01f0b709853755baa111f821f99627cd661ee917490", "WINDOWS_APMINJECT_COMMENT": "The WINDOWS_APMINJECT entries below should NOT be added to the release targets", "WINDOWS_APMINJECT_MODULE": "release-signed", - "WINDOWS_APMINJECT_VERSION": "1.1.2", - "WINDOWS_APMINJECT_SHASUM": "27d85ab3a26c123b2655a838b0bec099268de2f2b86d2b8a74232e65f4f8f05f" + "WINDOWS_APMINJECT_VERSION": "1.1.3", + "WINDOWS_APMINJECT_SHASUM": "5fdd62a84e640204386b9c28dc2e3ac5d9b8adde6427cb9f5914619f94d7b5bd" }, "release-a6": { "INTEGRATIONS_CORE_VERSION": "7.56.0-rc.2", diff --git a/releasenotes/notes/fixdriverinstallationhandleleak-346543b8c5f21303.yaml b/releasenotes/notes/fixdriverinstallationhandleleak-346543b8c5f21303.yaml new file mode 100644 index 0000000000000..cd824cd795d5d --- /dev/null +++ b/releasenotes/notes/fixdriverinstallationhandleleak-346543b8c5f21303.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Fixes a bug on Windows in the driver installation custom actions that could prevent rollback from working properly if an installation failed or was canceled. From b4d420bcca27c02c9c0a4ae675eae43c80f2bf9c Mon Sep 17 00:00:00 2001 From: Paul Cacheux Date: Thu, 5 Sep 2024 09:40:00 +0200 Subject: [PATCH 017/128] [CWS] secagent tests: simplify into one `fxutil.Test` invocation (#29061) --- pkg/security/tests/module_tester.go | 10 ++++++++++ pkg/security/tests/module_tester_linux.go | 9 ++++----- pkg/security/tests/module_tester_windows.go | 8 +++----- 3 files changed, 17 insertions(+), 10 deletions(-) diff --git a/pkg/security/tests/module_tester.go b/pkg/security/tests/module_tester.go index bee5088cccb82..e33fcd226b6e4 100644 --- a/pkg/security/tests/module_tester.go +++ b/pkg/security/tests/module_tester.go @@ -27,6 +27,9 @@ import ( "unsafe" spconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" + "github.com/DataDog/datadog-agent/comp/core/telemetry" + workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" + "go.uber.org/fx" emconfig "github.com/DataDog/datadog-agent/pkg/eventmonitor/config" secconfig "github.com/DataDog/datadog-agent/pkg/security/config" @@ -878,3 +881,10 @@ func jsonPathValidation(testMod *testModule, data []byte, fnc func(testMod *test fnc(testMod, obj) } + +type testModuleFxDeps struct { + fx.In + + Telemetry telemetry.Component + WMeta workloadmeta.Component +} diff --git a/pkg/security/tests/module_tester_linux.go b/pkg/security/tests/module_tester_linux.go index 13b8b1b608f59..ac8e76454c846 100644 --- a/pkg/security/tests/module_tester_linux.go +++ b/pkg/security/tests/module_tester_linux.go @@ -35,8 +35,6 @@ import ( "golang.org/x/sys/unix" "github.com/DataDog/datadog-agent/comp/core" - "github.com/DataDog/datadog-agent/comp/core/telemetry" - "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" wmmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" ebpftelemetry "github.com/DataDog/datadog-agent/pkg/ebpf/telemetry" @@ -795,12 +793,13 @@ func newTestModuleWithOnDemandProbes(t testing.TB, onDemandHooks []rules.OnDeman } else { emopts.ProbeOpts.TagsResolver = NewFakeResolverDifferentImageNames() } - telemetry := fxutil.Test[telemetry.Component](t, telemetryimpl.MockModule()) - wmeta := fxutil.Test[workloadmeta.Component](t, + + fxDeps := fxutil.Test[testModuleFxDeps]( + t, core.MockBundle(), wmmock.MockModule(workloadmeta.NewParams()), ) - testMod.eventMonitor, err = eventmonitor.NewEventMonitor(emconfig, secconfig, emopts, wmeta, telemetry) + testMod.eventMonitor, err = eventmonitor.NewEventMonitor(emconfig, secconfig, emopts, fxDeps.WMeta, fxDeps.Telemetry) if err != nil { return nil, err } diff --git a/pkg/security/tests/module_tester_windows.go b/pkg/security/tests/module_tester_windows.go index e494bf347a558..d7cca1b12d8ab 100644 --- a/pkg/security/tests/module_tester_windows.go +++ b/pkg/security/tests/module_tester_windows.go @@ -19,8 +19,6 @@ import ( "github.com/hashicorp/go-multierror" "github.com/DataDog/datadog-agent/comp/core" - "github.com/DataDog/datadog-agent/comp/core/telemetry" - "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" wmmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" "github.com/DataDog/datadog-agent/pkg/eventmonitor" @@ -265,12 +263,12 @@ func newTestModule(t testing.TB, macroDefs []*rules.MacroDefinition, ruleDefs [] StatsdClient: statsdClient, }, } - telemetry := fxutil.Test[telemetry.Component](t, telemetryimpl.MockModule()) - wmeta := fxutil.Test[workloadmeta.Component](t, + fxDeps := fxutil.Test[testModuleFxDeps]( + t, core.MockBundle(), wmmock.MockModule(workloadmeta.NewParams()), ) - testMod.eventMonitor, err = eventmonitor.NewEventMonitor(emconfig, secconfig, emopts, wmeta, telemetry) + testMod.eventMonitor, err = eventmonitor.NewEventMonitor(emconfig, secconfig, emopts, fxDeps.WMeta, fxDeps.Telemetry) if err != nil { return nil, err } From 3901b23906d804a16bcf70fca3c8b55622f9172c Mon Sep 17 00:00:00 2001 From: Paul Cacheux Date: Thu, 5 Sep 2024 09:45:34 +0200 Subject: [PATCH 018/128] [CWS] remove "ignore DD agent containers" telemetry option (#29065) --- cmd/security-agent/subcommands/runtime/command.go | 3 +-- pkg/config/setup/config.go | 1 - pkg/security/agent/agent.go | 3 +-- pkg/security/agent/agent_nix.go | 2 +- pkg/security/agent/telemetry_linux.go | 3 +-- pkg/security/telemetry/telemetry.go | 14 ++++++-------- 6 files changed, 10 insertions(+), 16 deletions(-) diff --git a/cmd/security-agent/subcommands/runtime/command.go b/cmd/security-agent/subcommands/runtime/command.go index 667f9778f2467..7725d89c81e44 100644 --- a/cmd/security-agent/subcommands/runtime/command.go +++ b/cmd/security-agent/subcommands/runtime/command.go @@ -684,8 +684,7 @@ func StartRuntimeSecurity(log log.Component, config config.Component, hostname s // start/stop order is important, agent need to be stopped first and started after all the others // components agent, err := secagent.NewRuntimeSecurityAgent(statsdClient, hostname, secagent.RSAOptions{ - LogProfiledWorkloads: config.GetBool("runtime_security_config.log_profiled_workloads"), - IgnoreDDAgentContainers: config.GetBool("runtime_security_config.telemetry.ignore_dd_agent_containers"), + LogProfiledWorkloads: config.GetBool("runtime_security_config.log_profiled_workloads"), }, wmeta) if err != nil { return nil, fmt.Errorf("unable to create a runtime security agent instance: %w", err) diff --git a/pkg/config/setup/config.go b/pkg/config/setup/config.go index 44c3efd418475..85df73f4c7eed 100644 --- a/pkg/config/setup/config.go +++ b/pkg/config/setup/config.go @@ -910,7 +910,6 @@ func InitConfig(config pkgconfigmodel.Config) { config.BindEnvAndSetDefault("runtime_security_config.socket", filepath.Join(InstallPath, "run/runtime-security.sock")) } config.BindEnvAndSetDefault("runtime_security_config.log_profiled_workloads", false) - config.BindEnvAndSetDefault("runtime_security_config.telemetry.ignore_dd_agent_containers", true) config.BindEnvAndSetDefault("runtime_security_config.use_secruntime_track", true) bindEnvAndSetLogsConfigKeys(config, "runtime_security_config.endpoints.") bindEnvAndSetLogsConfigKeys(config, "runtime_security_config.activity_dump.remote_storage.endpoints.") diff --git a/pkg/security/agent/agent.go b/pkg/security/agent/agent.go index 7251fe7dff99e..3d2fd1832d0b3 100644 --- a/pkg/security/agent/agent.go +++ b/pkg/security/agent/agent.go @@ -48,8 +48,7 @@ type RuntimeSecurityAgent struct { // RSAOptions represents the runtime security agent options type RSAOptions struct { - LogProfiledWorkloads bool - IgnoreDDAgentContainers bool + LogProfiledWorkloads bool } // Start the runtime security agent diff --git a/pkg/security/agent/agent_nix.go b/pkg/security/agent/agent_nix.go index f7c537a7d7525..5a051c9a125a1 100644 --- a/pkg/security/agent/agent_nix.go +++ b/pkg/security/agent/agent_nix.go @@ -25,7 +25,7 @@ func NewRuntimeSecurityAgent(statsdClient statsd.ClientInterface, hostname strin } // on windows do no telemetry - telemetry, err := newTelemetry(statsdClient, wmeta, opts.IgnoreDDAgentContainers) + telemetry, err := newTelemetry(statsdClient, wmeta) if err != nil { return nil, errors.New("failed to initialize the telemetry reporter") } diff --git a/pkg/security/agent/telemetry_linux.go b/pkg/security/agent/telemetry_linux.go index abd606a35b400..71c5c41b864bd 100644 --- a/pkg/security/agent/telemetry_linux.go +++ b/pkg/security/agent/telemetry_linux.go @@ -26,7 +26,7 @@ type telemetry struct { runtimeSecurityClient *RuntimeSecurityClient } -func newTelemetry(statsdClient statsd.ClientInterface, wmeta workloadmeta.Component, ignoreDDAgentContainers bool) (*telemetry, error) { +func newTelemetry(statsdClient statsd.ClientInterface, wmeta workloadmeta.Component) (*telemetry, error) { runtimeSecurityClient, err := NewRuntimeSecurityClient() if err != nil { return nil, err @@ -37,7 +37,6 @@ func newTelemetry(statsdClient statsd.ClientInterface, wmeta workloadmeta.Compon if err != nil { return nil, err } - containersTelemetry.IgnoreDDAgent = ignoreDDAgentContainers return &telemetry{ containers: containersTelemetry, diff --git a/pkg/security/telemetry/telemetry.go b/pkg/security/telemetry/telemetry.go index 70ba2ca19a1c9..71985fb7b4b97 100644 --- a/pkg/security/telemetry/telemetry.go +++ b/pkg/security/telemetry/telemetry.go @@ -18,7 +18,6 @@ import ( type ContainersTelemetry struct { TelemetrySender SimpleTelemetrySender MetadataStore workloadmeta.Component - IgnoreDDAgent bool } // NewContainersTelemetry returns a new ContainersTelemetry based on default/global objects @@ -40,13 +39,12 @@ func (c *ContainersTelemetry) ReportContainers(metricName string) { containers := c.ListRunningContainers() for _, container := range containers { - if c.IgnoreDDAgent { - value := container.EnvVars["DOCKER_DD_AGENT"] - value = strings.ToLower(value) - if value == "yes" || value == "true" { - log.Debugf("ignoring container: name=%s id=%s image_id=%s", container.Name, container.ID, container.Image.ID) - continue - } + // ignore DD agent containers + value := container.EnvVars["DOCKER_DD_AGENT"] + value = strings.ToLower(value) + if value == "yes" || value == "true" { + log.Debugf("ignoring container: name=%s id=%s image_id=%s", container.Name, container.ID, container.Image.ID) + continue } c.TelemetrySender.Gauge(metricName, 1.0, []string{"container_id:" + container.ID}) From 93cb175e1f94540484c8d6fffdb89783af032e98 Mon Sep 17 00:00:00 2001 From: Yoann Ghigoff Date: Thu, 5 Sep 2024 10:04:59 +0200 Subject: [PATCH 019/128] [CWS-2960] Allow disarming kill actions on inaccurate rules (#28970) --- LICENSE-3rdparty.csv | 1 + go.mod | 1 + go.sum | 2 + pkg/config/setup/system_probe_cws.go | 6 + pkg/security/config/config.go | 40 +++- pkg/security/probe/probe_ebpf.go | 4 + pkg/security/probe/probe_ebpfless.go | 3 + pkg/security/probe/probe_windows.go | 4 + pkg/security/probe/process_killer.go | 166 ++++++++++++++++ pkg/security/tests/action_test.go | 209 ++++++++++++++++++++ pkg/security/tests/main_linux.go | 1 + pkg/security/tests/module_tester.go | 9 +- pkg/security/tests/module_tester_linux.go | 23 ++- pkg/security/tests/module_tester_windows.go | 16 ++ pkg/security/tests/testopts.go | 16 +- 15 files changed, 489 insertions(+), 12 deletions(-) diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index 8701e0696d2b5..2ebca5bc6d7b6 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -1368,6 +1368,7 @@ core,github.com/jaegertracing/jaeger/thrift-gen/jaeger,Apache-2.0,Copyright 2015 core,github.com/jaegertracing/jaeger/thrift-gen/sampling,Apache-2.0,Copyright 2015-2019 The Jaeger Project Authors core,github.com/jaegertracing/jaeger/thrift-gen/zipkincore,Apache-2.0,Copyright 2015-2019 The Jaeger Project Authors core,github.com/jbenet/go-context/io,MIT,Copyright (c) 2014 Juan Batiz-Benet +core,github.com/jellydator/ttlcache/v3,MIT,Copyright (c) 2022 Jellydator core,github.com/jinzhu/inflection,MIT,Copyright (c) 2015 - Jinzhu core,github.com/jlaffaye/ftp,ISC,"Copyright (c) 2011-2013, Julien Laffaye " core,github.com/jmespath/go-jmespath,Apache-2.0,Copyright 2015 James Saryerwinnie diff --git a/go.mod b/go.mod index 4d5c0ef3cdb58..77c67a228ff17 100644 --- a/go.mod +++ b/go.mod @@ -602,6 +602,7 @@ require ( github.com/containerd/containerd/api v1.7.19 github.com/containerd/errdefs v0.1.0 github.com/distribution/reference v0.6.0 + github.com/jellydator/ttlcache/v3 v3.0.1 github.com/kouhin/envflag v0.0.0-20150818174321-0e9a86061649 github.com/lorenzosaino/go-sysctl v0.3.1 ) diff --git a/go.sum b/go.sum index f239ac2747fbf..20463da9250a2 100644 --- a/go.sum +++ b/go.sum @@ -1741,6 +1741,8 @@ github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInw github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jellydator/ttlcache/v3 v3.0.1 h1:cHgCSMS7TdQcoprXnWUptJZzyFsqs18Lt8VVhRuZYVU= +github.com/jellydator/ttlcache/v3 v3.0.1/go.mod h1:WwTaEmcXQ3MTjOm4bsZoDFiCu/hMvNWLO1w67RXz6h4= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jlaffaye/ftp v0.0.0-20180404123514-2403248fa8cc/go.mod h1:lli8NYPQOFy3O++YmYbqVgOcQ1JPCwdOy+5zSjKJ9qY= diff --git a/pkg/config/setup/system_probe_cws.go b/pkg/config/setup/system_probe_cws.go index ffa2f283ae56d..3497ffc143007 100644 --- a/pkg/config/setup/system_probe_cws.go +++ b/pkg/config/setup/system_probe_cws.go @@ -133,4 +133,10 @@ func initCWSSystemProbeConfig(cfg pkgconfigmodel.Config) { cfg.BindEnvAndSetDefault("runtime_security_config.enforcement.raw_syscall.enabled", false) cfg.BindEnvAndSetDefault("runtime_security_config.enforcement.exclude_binaries", []string{}) cfg.BindEnvAndSetDefault("runtime_security_config.enforcement.rule_source_allowed", []string{"file", "remote-config"}) + cfg.BindEnvAndSetDefault("runtime_security_config.enforcement.disarmer.container.enabled", true) + cfg.BindEnvAndSetDefault("runtime_security_config.enforcement.disarmer.container.max_allowed", 5) + cfg.BindEnvAndSetDefault("runtime_security_config.enforcement.disarmer.container.period", "1m") + cfg.BindEnvAndSetDefault("runtime_security_config.enforcement.disarmer.executable.enabled", true) + cfg.BindEnvAndSetDefault("runtime_security_config.enforcement.disarmer.executable.max_allowed", 5) + cfg.BindEnvAndSetDefault("runtime_security_config.enforcement.disarmer.executable.period", "1m") } diff --git a/pkg/security/config/config.go b/pkg/security/config/config.go index 630c3a952ce10..f854a755126ec 100644 --- a/pkg/security/config/config.go +++ b/pkg/security/config/config.go @@ -233,10 +233,26 @@ type RuntimeSecurityConfig struct { EBPFLessSocket string // Enforcement capabilities - EnforcementEnabled bool + // EnforcementEnabled defines if the enforcement capability should be enabled + EnforcementEnabled bool + // EnforcementRawSyscallEnabled defines if the enforcement should be performed using the sys_enter tracepoint EnforcementRawSyscallEnabled bool EnforcementBinaryExcluded []string EnforcementRuleSourceAllowed []string + // EnforcementDisarmerContainerEnabled defines if an enforcement rule should be disarmed when hitting too many different containers + EnforcementDisarmerContainerEnabled bool + // EnforcementDisarmerContainerMaxAllowed defines the maximum number of different containers that can trigger an enforcement rule + // within a period before the enforcement is disarmed for this rule + EnforcementDisarmerContainerMaxAllowed int + // EnforcementDisarmerContainerPeriod defines the period during which EnforcementDisarmerContainerMaxAllowed is checked + EnforcementDisarmerContainerPeriod time.Duration + // EnforcementDisarmerExecutableEnabled defines if an enforcement rule should be disarmed when hitting too many different executables + EnforcementDisarmerExecutableEnabled bool + // EnforcementDisarmerExecutableMaxAllowed defines the maximum number of different executables that can trigger an enforcement rule + // within a period before the enforcement is disarmed for this rule + EnforcementDisarmerExecutableMaxAllowed int + // EnforcementDisarmerExecutablePeriod defines the period during which EnforcementDisarmerExecutableMaxAllowed is checked + EnforcementDisarmerExecutablePeriod time.Duration //WindowsFilenameCacheSize is the max number of filenames to cache WindowsFilenameCacheSize int @@ -416,10 +432,16 @@ func NewRuntimeSecurityConfig() (*RuntimeSecurityConfig, error) { AnomalyDetectionEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.security_profile.anomaly_detection.enabled"), // enforcement - EnforcementEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.enforcement.enabled"), - EnforcementBinaryExcluded: coreconfig.SystemProbe().GetStringSlice("runtime_security_config.enforcement.exclude_binaries"), - EnforcementRawSyscallEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.enforcement.raw_syscall.enabled"), - EnforcementRuleSourceAllowed: coreconfig.SystemProbe().GetStringSlice("runtime_security_config.enforcement.rule_source_allowed"), + EnforcementEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.enforcement.enabled"), + EnforcementBinaryExcluded: coreconfig.SystemProbe().GetStringSlice("runtime_security_config.enforcement.exclude_binaries"), + EnforcementRawSyscallEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.enforcement.raw_syscall.enabled"), + EnforcementRuleSourceAllowed: coreconfig.SystemProbe().GetStringSlice("runtime_security_config.enforcement.rule_source_allowed"), + EnforcementDisarmerContainerEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.enforcement.disarmer.container.enabled"), + EnforcementDisarmerContainerMaxAllowed: coreconfig.SystemProbe().GetInt("runtime_security_config.enforcement.disarmer.container.max_allowed"), + EnforcementDisarmerContainerPeriod: coreconfig.SystemProbe().GetDuration("runtime_security_config.enforcement.disarmer.container.period"), + EnforcementDisarmerExecutableEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.enforcement.disarmer.executable.enabled"), + EnforcementDisarmerExecutableMaxAllowed: coreconfig.SystemProbe().GetInt("runtime_security_config.enforcement.disarmer.executable.max_allowed"), + EnforcementDisarmerExecutablePeriod: coreconfig.SystemProbe().GetDuration("runtime_security_config.enforcement.disarmer.executable.period"), // User Sessions UserSessionsCacheSize: coreconfig.SystemProbe().GetInt("runtime_security_config.user_sessions.cache_size"), @@ -489,6 +511,14 @@ func (c *RuntimeSecurityConfig) sanitize() error { return fmt.Errorf("invalid IPv4 address: got %v", coreconfig.SystemProbe().GetString("runtime_security_config.imds_ipv4")) } + if c.EnforcementDisarmerContainerEnabled && c.EnforcementDisarmerContainerMaxAllowed <= 0 { + return fmt.Errorf("invalid value for runtime_security_config.enforcement.disarmer.container.max_allowed: %d", c.EnforcementDisarmerContainerMaxAllowed) + } + + if c.EnforcementDisarmerExecutableEnabled && c.EnforcementDisarmerExecutableMaxAllowed <= 0 { + return fmt.Errorf("invalid value for runtime_security_config.enforcement.disarmer.executable.max_allowed: %d", c.EnforcementDisarmerExecutableMaxAllowed) + } + return c.sanitizeRuntimeSecurityConfigActivityDump() } diff --git a/pkg/security/probe/probe_ebpf.go b/pkg/security/probe/probe_ebpf.go index f5737aa88bdbe..86be48d856c76 100644 --- a/pkg/security/probe/probe_ebpf.go +++ b/pkg/security/probe/probe_ebpf.go @@ -331,6 +331,8 @@ func (p *EBPFProbe) Init() error { return err } + p.processKiller.Start(p.ctx, &p.wg) + return nil } @@ -1572,6 +1574,8 @@ func (p *EBPFProbe) ApplyRuleSet(rs *rules.RuleSet) (*kfilters.ApplyRuleSetRepor // activity dump & security profiles needRawSyscalls := p.isNeededForActivityDump(model.SyscallsEventType.String()) + p.processKiller.Reset() + // kill action if p.config.RuntimeSecurity.EnforcementEnabled && isKillActionPresent(rs) { if !p.config.RuntimeSecurity.EnforcementRawSyscallEnabled { diff --git a/pkg/security/probe/probe_ebpfless.go b/pkg/security/probe/probe_ebpfless.go index 36e24729bddf7..492abb05f42b6 100644 --- a/pkg/security/probe/probe_ebpfless.go +++ b/pkg/security/probe/probe_ebpfless.go @@ -351,6 +351,8 @@ func (p *EBPFLessProbe) DispatchEvent(event *model.Event) { // Init the probe func (p *EBPFLessProbe) Init() error { + p.processKiller.Start(p.ctx, &p.wg) + if err := p.Resolvers.Start(p.ctx); err != nil { return err } @@ -571,6 +573,7 @@ func (p *EBPFLessProbe) FlushDiscarders() error { // ApplyRuleSet applies the new ruleset func (p *EBPFLessProbe) ApplyRuleSet(_ *rules.RuleSet) (*kfilters.ApplyRuleSetReport, error) { + p.processKiller.Reset() return &kfilters.ApplyRuleSetReport{}, nil } diff --git a/pkg/security/probe/probe_windows.go b/pkg/security/probe/probe_windows.go index e440f8f5bf66e..0bff0416519fe 100644 --- a/pkg/security/probe/probe_windows.go +++ b/pkg/security/probe/probe_windows.go @@ -196,6 +196,8 @@ type etwCallback func(n interface{}, pid uint32) // Init initializes the probe func (p *WindowsProbe) Init() error { + p.processKiller.Start(p.ctx, &p.wg) + if !p.opts.disableProcmon { pm, err := procmon.NewWinProcMon(p.onStart, p.onStop, p.onError, procmon.ProcmonDefaultReceiveSize, procmon.ProcmonDefaultNumBufs) if err != nil { @@ -1255,6 +1257,8 @@ func (p *WindowsProbe) ApplyRuleSet(rs *rules.RuleSet) (*kfilters.ApplyRuleSetRe } } + p.processKiller.Reset() + ars, err := kfilters.NewApplyRuleSetReport(p.config.Probe, rs) if err != nil { return nil, err diff --git a/pkg/security/probe/process_killer.go b/pkg/security/probe/process_killer.go index 2bb2559e50a10..9af3dd54c1975 100644 --- a/pkg/security/probe/process_killer.go +++ b/pkg/security/probe/process_killer.go @@ -9,10 +9,13 @@ package probe import ( + "context" "slices" "sync" "time" + "github.com/jellydator/ttlcache/v3" + "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/model" @@ -24,20 +27,28 @@ import ( const ( defaultKillActionFlushDelay = 2 * time.Second + dismarmerCacheFlushInterval = 5 * time.Second ) // ProcessKiller defines a process killer structure type ProcessKiller struct { sync.Mutex + cfg *config.Config + pendingReports []*KillActionReport binariesExcluded []*eval.Glob sourceAllowed []string + + ruleDisarmersLock sync.Mutex + ruleDisarmers map[rules.RuleID]*killDisarmer } // NewProcessKiller returns a new ProcessKiller func NewProcessKiller(cfg *config.Config) (*ProcessKiller, error) { p := &ProcessKiller{ + cfg: cfg, + ruleDisarmers: make(map[rules.RuleID]*killDisarmer), sourceAllowed: cfg.RuntimeSecurity.EnforcementRuleSourceAllowed, } @@ -129,6 +140,39 @@ func (p *ProcessKiller) KillAndReport(scope string, signal string, rule *rules.R return } + rsConfig := p.cfg.RuntimeSecurity + + if rsConfig.EnforcementDisarmerContainerEnabled || rsConfig.EnforcementDisarmerExecutableEnabled { + var dismarmer *killDisarmer + p.ruleDisarmersLock.Lock() + if dismarmer = p.ruleDisarmers[rule.ID]; dismarmer == nil { + dismarmer = newKillDisarmer(rsConfig, rule.ID) + p.ruleDisarmers[rule.ID] = dismarmer + } + p.ruleDisarmersLock.Unlock() + + if rsConfig.EnforcementDisarmerContainerEnabled { + if containerID := ev.FieldHandlers.ResolveContainerID(ev, ev.ContainerContext); containerID != "" { + if !dismarmer.allow(dismarmer.containerCache, containerID, func() { + seclog.Warnf("disarming kill action of rule `%s` because more than %d different containers triggered it in the last %s", rule.ID, dismarmer.containerCache.capacity, rsConfig.EnforcementDisarmerContainerPeriod) + }) { + seclog.Warnf("skipping kill action of rule `%s` because it has been disarmed", rule.ID) + return + } + } + } + + if rsConfig.EnforcementDisarmerExecutableEnabled { + executable := entry.Process.FileEvent.PathnameStr + if !dismarmer.allow(dismarmer.executableCache, executable, func() { + seclog.Warnf("disarmed kill action of rule `%s` because more than %d different executables triggered it in the last %s", rule.ID, dismarmer.executableCache.capacity, rsConfig.EnforcementDisarmerExecutablePeriod) + }) { + seclog.Warnf("skipping kill action of rule `%s` because it has been disarmed", rule.ID) + return + } + } + } + switch scope { case "container", "process": default: @@ -173,3 +217,125 @@ func (p *ProcessKiller) KillAndReport(scope string, signal string, rule *rules.R ev.ActionReports = append(ev.ActionReports, report) p.pendingReports = append(p.pendingReports, report) } + +// Reset resets the disarmer state +func (p *ProcessKiller) Reset() { + p.ruleDisarmersLock.Lock() + clear(p.ruleDisarmers) + p.ruleDisarmersLock.Unlock() +} + +// Start starts the go rountine responsible for flushing the disarmer caches +func (p *ProcessKiller) Start(ctx context.Context, wg *sync.WaitGroup) { + if !p.cfg.RuntimeSecurity.EnforcementEnabled || (!p.cfg.RuntimeSecurity.EnforcementDisarmerContainerEnabled && !p.cfg.RuntimeSecurity.EnforcementDisarmerExecutableEnabled) { + return + } + + wg.Add(1) + go func() { + defer wg.Done() + ticker := time.NewTicker(dismarmerCacheFlushInterval) + defer ticker.Stop() + for { + select { + case <-ticker.C: + p.ruleDisarmersLock.Lock() + for _, disarmer := range p.ruleDisarmers { + disarmer.Lock() + var cLength, eLength int + if disarmer.containerCache != nil { + cLength = disarmer.containerCache.flush() + } + if disarmer.executableCache != nil { + eLength = disarmer.executableCache.flush() + } + if disarmer.disarmed && cLength == 0 && eLength == 0 { + disarmer.disarmed = false + seclog.Infof("kill action of rule `%s` has been re-armed", disarmer.ruleID) + } + disarmer.Unlock() + } + p.ruleDisarmersLock.Unlock() + case <-ctx.Done(): + return + } + } + }() +} + +type killDisarmer struct { + sync.Mutex + disarmed bool + ruleID rules.RuleID + containerCache *disarmerCache[string, bool] + executableCache *disarmerCache[string, bool] +} + +type disarmerCache[K comparable, V any] struct { + *ttlcache.Cache[K, V] + capacity uint64 +} + +func newDisarmerCache[K comparable, V any](capacity uint64, period time.Duration) *disarmerCache[K, V] { + cacheOpts := []ttlcache.Option[K, V]{ + ttlcache.WithCapacity[K, V](capacity), + } + + if period > 0 { + cacheOpts = append(cacheOpts, ttlcache.WithTTL[K, V](period)) + } + + return &disarmerCache[K, V]{ + Cache: ttlcache.New[K, V](cacheOpts...), + capacity: capacity, + } +} + +func (c *disarmerCache[K, V]) flush() int { + c.DeleteExpired() + return c.Len() +} + +func newKillDisarmer(cfg *config.RuntimeSecurityConfig, ruleID rules.RuleID) *killDisarmer { + kd := &killDisarmer{ + disarmed: false, + ruleID: ruleID, + } + + if cfg.EnforcementDisarmerContainerEnabled { + kd.containerCache = newDisarmerCache[string, bool](uint64(cfg.EnforcementDisarmerContainerMaxAllowed), cfg.EnforcementDisarmerContainerPeriod) + } + + if cfg.EnforcementDisarmerExecutableEnabled { + kd.executableCache = newDisarmerCache[string, bool](uint64(cfg.EnforcementDisarmerExecutableMaxAllowed), cfg.EnforcementDisarmerExecutablePeriod) + } + + return kd +} + +func (kd *killDisarmer) allow(cache *disarmerCache[string, bool], key string, onDisarm func()) bool { + kd.Lock() + defer kd.Unlock() + + if kd.disarmed { + return false + } + + if cache == nil { + return true + } + + cache.DeleteExpired() + // if the key is not in the cache, check if the new key causes the number of keys to exceed the capacity + // otherwise, the key is already in the cache and cache.Get will update its TTL + if cache.Get(key) == nil { + alreadyAtCapacity := uint64(cache.Len()) >= cache.capacity + cache.Set(key, true, ttlcache.DefaultTTL) + if alreadyAtCapacity && !kd.disarmed { + kd.disarmed = true + onDisarm() + } + } + + return !kd.disarmed +} diff --git a/pkg/security/tests/action_test.go b/pkg/security/tests/action_test.go index 8a9f9b6084ac1..5214d6961619c 100644 --- a/pkg/security/tests/action_test.go +++ b/pkg/security/tests/action_test.go @@ -354,3 +354,212 @@ func TestActionKillRuleSpecific(t *testing.T) { }, retry.Delay(200*time.Millisecond), retry.Attempts(30), retry.DelayType(retry.FixedDelay)) assert.NoError(t, err) } + +func TestActionKillDisarm(t *testing.T) { + SkipIfNotAvailable(t) + + if testEnvironment == DockerEnvironment { + t.Skip("Skip test spawning docker containers on docker") + } + + if _, err := whichNonFatal("docker"); err != nil { + t.Skip("Skip test where docker is unavailable") + } + + checkKernelCompatibility(t, "bpf_send_signal is not supported on this kernel and agent is running in container mode", func(kv *kernel.Version) bool { + return !kv.SupportBPFSendSignal() && env.IsContainerized() + }) + + ruleDefs := []*rules.RuleDefinition{ + { + ID: "kill_action_disarm_executable", + Expression: `exec.envs in ["TARGETTOKILL"] && container.id == ""`, + Actions: []*rules.ActionDefinition{ + { + Kill: &rules.KillDefinition{ + Signal: "SIGKILL", + }, + }, + }, + }, + { + ID: "kill_action_disarm_container", + Expression: `exec.envs in ["TARGETTOKILL"] && container.id != ""`, + Actions: []*rules.ActionDefinition{ + { + Kill: &rules.KillDefinition{ + Signal: "SIGKILL", + }, + }, + }, + }, + } + + sleep := which(t, "sleep") + const ( + enforcementDisarmerContainerPeriod = 10 * time.Second + enforcementDisarmerExecutablePeriod = 10 * time.Second + ) + + test, err := newTestModule(t, nil, ruleDefs, withStaticOpts(testOpts{ + enforcementDisarmerContainerEnabled: true, + enforcementDisarmerContainerMaxAllowed: 1, + enforcementDisarmerContainerPeriod: enforcementDisarmerContainerPeriod, + enforcementDisarmerExecutableEnabled: true, + enforcementDisarmerExecutableMaxAllowed: 1, + enforcementDisarmerExecutablePeriod: enforcementDisarmerExecutablePeriod, + eventServerRetention: 1 * time.Nanosecond, + })) + if err != nil { + t.Fatal(err) + } + defer test.Close() + + syscallTester, err := loadSyscallTester(t, test, "syscall_tester") + if err != nil { + t.Fatal(err) + } + + testKillActionSuccess := func(t *testing.T, ruleID string, cmdFunc func(context.Context)) { + test.msgSender.flush() + err := test.GetEventSent(t, func() error { + ch := make(chan bool, 1) + + go func() { + timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + cmdFunc(timeoutCtx) + + ch <- true + }() + + select { + case <-ch: + case <-time.After(time.Second * 3): + t.Error("signal timeout") + } + return nil + }, func(_ *rules.Rule, _ *model.Event) bool { + return true + }, time.Second*5, ruleID) + if err != nil { + t.Error(err) + } + + err = retry.Do(func() error { + msg := test.msgSender.getMsg(ruleID) + if msg == nil { + return errors.New("not found") + } + validateMessageSchema(t, string(msg.Data)) + + jsonPathValidation(test, msg.Data, func(_ *testModule, obj interface{}) { + if _, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.signal="sigkill")]`); err != nil { + t.Error(err) + } + if _, err = jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.exited_at=~/20.*/)]`); err != nil { + t.Error(err) + } + }) + + return nil + }, retry.Delay(200*time.Millisecond), retry.Attempts(30), retry.DelayType(retry.FixedDelay)) + assert.NoError(t, err) + } + + testKillActionIgnored := func(t *testing.T, ruleID string, cmdFunc func(context.Context)) { + test.msgSender.flush() + err := test.GetEventSent(t, func() error { + cmdFunc(nil) + return nil + }, func(_ *rules.Rule, _ *model.Event) bool { + return true + }, time.Second*5, ruleID) + if err != nil { + t.Error(err) + } + + err = retry.Do(func() error { + msg := test.msgSender.getMsg(ruleID) + if msg == nil { + return errors.New("not found") + } + validateMessageSchema(t, string(msg.Data)) + + jsonPathValidation(test, msg.Data, func(_ *testModule, obj interface{}) { + if _, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions`); err == nil { + t.Error(errors.New("unexpected rule action")) + } + }) + + return nil + }, retry.Delay(200*time.Millisecond), retry.Attempts(30), retry.DelayType(retry.FixedDelay)) + assert.NoError(t, err) + } + + t.Run("executable", func(t *testing.T) { + // test that we can kill processes with the same executable more than once + for i := 0; i < 2; i++ { + t.Logf("test iteration %d", i) + testKillActionSuccess(t, "kill_action_disarm_executable", func(ctx context.Context) { + cmd := exec.CommandContext(ctx, syscallTester, "sleep", "5") + cmd.Env = []string{"TARGETTOKILL=1"} + _ = cmd.Run() + }) + } + + // test that another executable dismars the kill action + testKillActionIgnored(t, "kill_action_disarm_executable", func(_ context.Context) { + cmd := exec.Command(sleep, "1") + cmd.Env = []string{"TARGETTOKILL=1"} + _ = cmd.Run() + }) + + // test that the kill action is re-armed after both executable cache entries have expired + // sleep for: (TTL + cache flush period + 1s) to ensure the cache is flushed + time.Sleep(enforcementDisarmerExecutablePeriod + 5*time.Second + 1*time.Second) + testKillActionSuccess(t, "kill_action_disarm_executable", func(_ context.Context) { + cmd := exec.Command(sleep, "1") + cmd.Env = []string{"TARGETTOKILL=1"} + _ = cmd.Run() + }) + }) + + t.Run("container", func(t *testing.T) { + dockerInstance, err := test.StartADocker() + if err != nil { + t.Fatalf("failed to start a Docker instance: %v", err) + } + defer dockerInstance.stop() + + // test that we can kill processes within the same container more than once + for i := 0; i < 2; i++ { + t.Logf("test iteration %d", i) + testKillActionSuccess(t, "kill_action_disarm_container", func(_ context.Context) { + cmd := dockerInstance.Command("env", []string{"-i", "-", "TARGETTOKILL=1", "sleep", "5"}, []string{}) + _ = cmd.Run() + }) + } + + newDockerInstance, err := test.StartADocker() + if err != nil { + t.Fatalf("failed to start a second Docker instance: %v", err) + } + defer newDockerInstance.stop() + + // test that another container dismars the kill action + testKillActionIgnored(t, "kill_action_disarm_container", func(_ context.Context) { + cmd := newDockerInstance.Command("env", []string{"-i", "-", "TARGETTOKILL=1", "sleep", "1"}, []string{}) + _ = cmd.Run() + }) + + // test that the kill action is re-armed after both container cache entries have expired + // sleep for: (TTL + cache flush period + 1s) to ensure the cache is flushed + time.Sleep(enforcementDisarmerContainerPeriod + 5*time.Second + 1*time.Second) + testKillActionSuccess(t, "kill_action_disarm_container", func(_ context.Context) { + cmd := newDockerInstance.Command("env", []string{"-i", "-", "TARGETTOKILL=1", "sleep", "5"}, []string{}) + _ = cmd.Run() + }) + }) +} diff --git a/pkg/security/tests/main_linux.go b/pkg/security/tests/main_linux.go index f503c3090829e..7de491a609fb9 100644 --- a/pkg/security/tests/main_linux.go +++ b/pkg/security/tests/main_linux.go @@ -101,6 +101,7 @@ func SkipIfNotAvailable(t *testing.T) { "TestLoginUID/login-uid-open-test", "TestLoginUID/login-uid-exec-test", "TestActionKillExcludeBinary", + "TestActionKillDisarm", } if disableSeccomp { diff --git a/pkg/security/tests/module_tester.go b/pkg/security/tests/module_tester.go index e33fcd226b6e4..dcd853df9e258 100644 --- a/pkg/security/tests/module_tester.go +++ b/pkg/security/tests/module_tester.go @@ -778,6 +778,13 @@ func genTestConfigs(cfgDir string, opts testOpts) (*emconfig.Config, *secconfig. "NetworkIngressEnabled": opts.networkIngressEnabled, "OnDemandRateLimiterEnabled": !opts.disableOnDemandRateLimiter, "EnforcementExcludeBinary": opts.enforcementExcludeBinary, + "EnforcementDisarmerContainerEnabled": opts.enforcementDisarmerContainerEnabled, + "EnforcementDisarmerContainerMaxAllowed": opts.enforcementDisarmerContainerMaxAllowed, + "EnforcementDisarmerContainerPeriod": opts.enforcementDisarmerContainerPeriod, + "EnforcementDisarmerExecutableEnabled": opts.enforcementDisarmerExecutableEnabled, + "EnforcementDisarmerExecutableMaxAllowed": opts.enforcementDisarmerExecutableMaxAllowed, + "EnforcementDisarmerExecutablePeriod": opts.enforcementDisarmerExecutablePeriod, + "EventServerRetention": opts.eventServerRetention, }); err != nil { return nil, nil, err } @@ -835,7 +842,7 @@ type fakeMsgSender struct { msgs map[eval.RuleID]*api.SecurityEventMessage } -func (fs *fakeMsgSender) Send(msg *api.SecurityEventMessage, expireFnc func(*api.SecurityEventMessage)) { +func (fs *fakeMsgSender) Send(msg *api.SecurityEventMessage, _ func(*api.SecurityEventMessage)) { fs.Lock() defer fs.Unlock() diff --git a/pkg/security/tests/module_tester_linux.go b/pkg/security/tests/module_tester_linux.go index ac8e76454c846..07a3376d9398f 100644 --- a/pkg/security/tests/module_tester_linux.go +++ b/pkg/security/tests/module_tester_linux.go @@ -105,6 +105,10 @@ runtime_security_config: enabled: {{ .RuntimeSecurityEnabled }} internal_monitoring: enabled: true +{{ if gt .EventServerRetention 0 }} + event_server: + retention: {{ .EventServerRetention }} +{{ end }} remote_configuration: enabled: false on_demand: @@ -116,11 +120,6 @@ runtime_security_config: enabled: {{ .SBOMEnabled }} host: enabled: {{ .HostSBOMEnabled }} - enforcement: - exclude_binaries: - - {{ .EnforcementExcludeBinary }} - rule_source_allowed: - - file activity_dump: enabled: {{ .EnableActivityDump }} syscall_monitor: @@ -193,6 +192,20 @@ runtime_security_config: enabled: {{.EBPFLessEnabled}} hash_resolver: enabled: true + enforcement: + exclude_binaries: + - {{ .EnforcementExcludeBinary }} + rule_source_allowed: + - file + disarmer: + container: + enabled: {{.EnforcementDisarmerContainerEnabled}} + max_allowed: {{.EnforcementDisarmerContainerMaxAllowed}} + period: {{.EnforcementDisarmerContainerPeriod}} + executable: + enabled: {{.EnforcementDisarmerExecutableEnabled}} + max_allowed: {{.EnforcementDisarmerExecutableMaxAllowed}} + period: {{.EnforcementDisarmerExecutablePeriod}} ` const testPolicy = `--- diff --git a/pkg/security/tests/module_tester_windows.go b/pkg/security/tests/module_tester_windows.go index d7cca1b12d8ab..b2c6d20952a79 100644 --- a/pkg/security/tests/module_tester_windows.go +++ b/pkg/security/tests/module_tester_windows.go @@ -111,6 +111,10 @@ event_monitoring_config: runtime_security_config: enabled: {{ .RuntimeSecurityEnabled }} +{{ if gt .EventServerRetention 0 }} + event_server: + retention: {{ .EventServerRetention }} +{{ end }} internal_monitoring: enabled: true remote_configuration: @@ -177,6 +181,18 @@ runtime_security_config: {{end}} ebpfless: enabled: {{.EBPFLessEnabled}} + enforcement: + exclude_binaries: + - {{ .EnforcementExcludeBinary }} + disarmer: + container: + enabled: {{.EnforcementDisarmerContainerEnabled}} + max_allowed: {{.EnforcementDisarmerContainerMaxAllowed}} + period: {{.EnforcementDisarmerContainerPeriod}} + executable: + enabled: {{.EnforcementDisarmerExecutableEnabled}} + max_allowed: {{.EnforcementDisarmerExecutableMaxAllowed}} + period: {{.EnforcementDisarmerExecutablePeriod}} ` type onRuleHandler func(*model.Event, *rules.Rule) diff --git a/pkg/security/tests/testopts.go b/pkg/security/tests/testopts.go index 1ea52a0344b79..43ef3dc1bbebf 100644 --- a/pkg/security/tests/testopts.go +++ b/pkg/security/tests/testopts.go @@ -64,6 +64,13 @@ type testOpts struct { ebpfLessEnabled bool dontWaitEBPFLessClient bool enforcementExcludeBinary string + enforcementDisarmerContainerEnabled bool + enforcementDisarmerContainerMaxAllowed int + enforcementDisarmerContainerPeriod time.Duration + enforcementDisarmerExecutableEnabled bool + enforcementDisarmerExecutableMaxAllowed int + enforcementDisarmerExecutablePeriod time.Duration + eventServerRetention time.Duration } type dynamicTestOpts struct { @@ -139,5 +146,12 @@ func (to testOpts) Equal(opts testOpts) bool { to.networkIngressEnabled == opts.networkIngressEnabled && to.disableOnDemandRateLimiter == opts.disableOnDemandRateLimiter && to.ebpfLessEnabled == opts.ebpfLessEnabled && - to.enforcementExcludeBinary == opts.enforcementExcludeBinary + to.enforcementExcludeBinary == opts.enforcementExcludeBinary && + to.enforcementDisarmerContainerEnabled == opts.enforcementDisarmerContainerEnabled && + to.enforcementDisarmerContainerMaxAllowed == opts.enforcementDisarmerContainerMaxAllowed && + to.enforcementDisarmerContainerPeriod == opts.enforcementDisarmerContainerPeriod && + to.enforcementDisarmerExecutableEnabled == opts.enforcementDisarmerExecutableEnabled && + to.enforcementDisarmerExecutableMaxAllowed == opts.enforcementDisarmerExecutableMaxAllowed && + to.enforcementDisarmerExecutablePeriod == opts.enforcementDisarmerExecutablePeriod && + to.eventServerRetention == opts.eventServerRetention } From 16c2a95a235cb969f39040f5bbd26495ae61a5b8 Mon Sep 17 00:00:00 2001 From: Guy Arbitman Date: Thu, 5 Sep 2024 12:01:32 +0300 Subject: [PATCH 020/128] service discovery: Remove redundant map (#29045) --- .../corechecks/servicediscovery/impl_linux.go | 2 +- .../servicediscovery/servicetype/servicetype.go | 13 ++----------- .../servicetype/servicetype_test.go | 10 ++-------- 3 files changed, 5 insertions(+), 20 deletions(-) diff --git a/pkg/collector/corechecks/servicediscovery/impl_linux.go b/pkg/collector/corechecks/servicediscovery/impl_linux.go index c24bd5087cd09..eb2f533ab901c 100644 --- a/pkg/collector/corechecks/servicediscovery/impl_linux.go +++ b/pkg/collector/corechecks/servicediscovery/impl_linux.go @@ -137,7 +137,7 @@ func (li *linuxImpl) getServiceInfo(service model.Service) serviceInfo { // for now, docker-proxy is going on the ignore list - serviceType := servicetype.Detect(service.Name, service.Ports) + serviceType := servicetype.Detect(service.Ports) meta := ServiceMetadata{ Name: service.Name, diff --git a/pkg/collector/corechecks/servicediscovery/servicetype/servicetype.go b/pkg/collector/corechecks/servicediscovery/servicetype/servicetype.go index 30b2b9f597053..a55781aecc01b 100644 --- a/pkg/collector/corechecks/servicediscovery/servicetype/servicetype.go +++ b/pkg/collector/corechecks/servicediscovery/servicetype/servicetype.go @@ -1,7 +1,7 @@ // Unless explicitly stated otherwise all files in this repository are licensed // under the Apache License Version 2.0. // This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. +// Copyright 2024-present Datadog, Inc. // Package servicetype provides functionality to detect the service type for a given process. package servicetype @@ -84,15 +84,10 @@ var ( 80: FrontEnd, 443: FrontEnd, } - - // for now, this is unpopulated, but - // as we find common service names that are listening on a - // commonly used port, we can add them here - nameMap = map[string]ServiceType{} ) // Detect returns the ServiceType from the provided process information. -func Detect(name string, ports []uint16) ServiceType { +func Detect(ports []uint16) ServiceType { // start with ports for _, v := range ports { if st, ok := portMap[v]; ok { @@ -100,10 +95,6 @@ func Detect(name string, ports []uint16) ServiceType { } } - // next check name - if st, ok := nameMap[name]; ok { - return st - } // anything else is a webservice return WebService diff --git a/pkg/collector/corechecks/servicediscovery/servicetype/servicetype_test.go b/pkg/collector/corechecks/servicediscovery/servicetype/servicetype_test.go index edbff8dd73934..e895bea26c9bf 100644 --- a/pkg/collector/corechecks/servicediscovery/servicetype/servicetype_test.go +++ b/pkg/collector/corechecks/servicediscovery/servicetype/servicetype_test.go @@ -1,7 +1,7 @@ // Unless explicitly stated otherwise all files in this repository are licensed // under the Apache License Version 2.0. // This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. +// Copyright 2024-present Datadog, Inc. package servicetype_test @@ -14,44 +14,38 @@ import ( func TestDetect(t *testing.T) { data := []struct { name string - serviceName string ports []uint16 serviceType servicetype.ServiceType }{ { name: "redis", - serviceName: "redis", ports: []uint16{9443}, serviceType: servicetype.DB, }, { name: "mongo", - serviceName: "mongo", ports: []uint16{27017, 27018, 27019, 27020}, serviceType: servicetype.DB, }, { name: "elastic", - serviceName: "elastic", ports: []uint16{9200}, serviceType: servicetype.Storage, }, { name: "web", - serviceName: "apache", ports: []uint16{80}, serviceType: servicetype.FrontEnd, }, { name: "internal", - serviceName: "myService", ports: []uint16{8080}, serviceType: servicetype.WebService, }, } for _, d := range data { t.Run(d.name, func(t *testing.T) { - serviceType := servicetype.Detect(d.serviceName, d.ports) + serviceType := servicetype.Detect(d.ports) if serviceType != d.serviceType { t.Errorf("expected %v, got %v", d.serviceType, serviceType) } From 5bef8c65942618d9001400354f1280e5613b0b33 Mon Sep 17 00:00:00 2001 From: maxime mouial Date: Thu, 5 Sep 2024 11:16:14 +0200 Subject: [PATCH 021/128] Adding comment to flare provider to prevent people from adding more (#29037) --- comp/core/flare/flare.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/comp/core/flare/flare.go b/comp/core/flare/flare.go index 2b0e249774101..6340f1b2f4934 100644 --- a/comp/core/flare/flare.go +++ b/comp/core/flare/flare.go @@ -176,6 +176,9 @@ func (f *flare) Create(pdata ProfileData, ipcError error) (string, error) { } // Adding legacy and internal providers. Registering then as Provider through FX create cycle dependencies. + // + // Do not extend this list, this is legacy behavior that should be remove at some point. To add data to a flare + // use the flare provider system: https://datadoghq.dev/datadog-agent/components/shared_features/flares/ providers := append( f.providers, func(fb types.FlareBuilder) error { From 7422804593ad33f712c199adf958a2ae1fd42664 Mon Sep 17 00:00:00 2001 From: Thibaud Cheruy Date: Thu, 5 Sep 2024 11:57:36 +0200 Subject: [PATCH 022/128] [NDM][Cisco SD-WAN] Increase default statistics lookup interval to 30 mins (#28951) --- .../network-devices/cisco-sdwan/client/client.go | 2 +- .../network-devices/cisco-sdwan/client/client_test.go | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/collector/corechecks/network-devices/cisco-sdwan/client/client.go b/pkg/collector/corechecks/network-devices/cisco-sdwan/client/client.go index 43967280a41ae..636a599ead969 100644 --- a/pkg/collector/corechecks/network-devices/cisco-sdwan/client/client.go +++ b/pkg/collector/corechecks/network-devices/cisco-sdwan/client/client.go @@ -24,7 +24,7 @@ const ( defaultMaxAttempts = 3 defaultMaxPages = 100 defaultMaxCount = "2000" - defaultLookback = 10 * time.Minute + defaultLookback = 30 * time.Minute defaultHTTPTimeout = 10 defaultHTTPScheme = "https" ) diff --git a/pkg/collector/corechecks/network-devices/cisco-sdwan/client/client_test.go b/pkg/collector/corechecks/network-devices/cisco-sdwan/client/client_test.go index 62470f296e97d..1a09f467c5174 100644 --- a/pkg/collector/corechecks/network-devices/cisco-sdwan/client/client_test.go +++ b/pkg/collector/corechecks/network-devices/cisco-sdwan/client/client_test.go @@ -273,7 +273,7 @@ func TestGetInterfacesMetrics(t *testing.T) { require.Equal(t, "2000", count) require.Equal(t, "UTC", timeZone) - require.Equal(t, "1999-12-31T23:50:00", startDate) + require.Equal(t, "1999-12-31T23:30:00", startDate) require.Equal(t, "2000-01-01T00:00:00", endDate) w.WriteHeader(http.StatusOK) @@ -322,7 +322,7 @@ func TestGetDeviceHardwareMetrics(t *testing.T) { require.Equal(t, "2000", count) require.Equal(t, "UTC", timeZone) - require.Equal(t, "1999-12-31T23:50:00", startDate) + require.Equal(t, "1999-12-31T23:30:00", startDate) require.Equal(t, "2000-01-01T00:00:00", endDate) w.WriteHeader(http.StatusOK) @@ -365,7 +365,7 @@ func TestGetApplicationAwareRoutingMetrics(t *testing.T) { require.Equal(t, "2000", count) require.Equal(t, "UTC", timeZone) - require.Equal(t, "1999-12-31T23:50:00", startDate) + require.Equal(t, "1999-12-31T23:30:00", startDate) require.Equal(t, "2000-01-01T00:00:00", endDate) w.WriteHeader(http.StatusOK) @@ -552,7 +552,7 @@ func TestGetCloudExpressMetrics(t *testing.T) { require.Equal(t, "2000", count) require.Equal(t, "UTC", timeZone) - require.Equal(t, "1999-12-31T23:50:00", startDate) + require.Equal(t, "1999-12-31T23:30:00", startDate) require.Equal(t, "2000-01-01T00:00:00", endDate) w.WriteHeader(http.StatusOK) From a39ad23b8a677949ade2e8c4cf65e388a723296a Mon Sep 17 00:00:00 2001 From: Sylvain Afchain Date: Thu, 5 Sep 2024 12:12:15 +0200 Subject: [PATCH 023/128] [CWS] do not approve with zero flags entry (#29050) --- .../ebpf/c/include/helpers/approvers.h | 95 +++++++++++++------ pkg/security/ebpf/c/include/maps.h | 16 ++-- pkg/security/ebpf/c/include/structs/filter.h | 10 ++ pkg/security/ebpf/map.go | 40 ++++++++ pkg/security/probe/kfilters/approvers.go | 45 +++++---- pkg/security/probe/kfilters/bpf.go | 2 +- pkg/security/probe/kfilters/mmap.go | 4 +- pkg/security/probe/kfilters/mprotect.go | 4 +- pkg/security/probe/kfilters/open.go | 2 +- pkg/security/probe/kfilters/splice.go | 4 +- pkg/security/tests/filters_test.go | 10 ++ 11 files changed, 170 insertions(+), 62 deletions(-) diff --git a/pkg/security/ebpf/c/include/helpers/approvers.h b/pkg/security/ebpf/c/include/helpers/approvers.h index dedc9b36772dd..02c10694c7a19 100644 --- a/pkg/security/ebpf/c/include/helpers/approvers.h +++ b/pkg/security/ebpf/c/include/helpers/approvers.h @@ -49,10 +49,26 @@ int __attribute__((always_inline)) chown_approvers(struct syscall_cache_t *sysca return basename_approver(syscall, syscall->setattr.dentry, EVENT_CHOWN); } -int __attribute__((always_inline)) approve_mmap_by_flags(struct syscall_cache_t *syscall) { +int __attribute__((always_inline)) lookup_u32_flags(void *map, u32 *flags) { u32 key = 0; - u32 *flags = bpf_map_lookup_elem(&mmap_flags_approvers, &key); - if (flags != NULL && (syscall->mmap.flags & *flags) > 0) { + struct u32_flags_filter_t *filter = bpf_map_lookup_elem(map, &key); + if (filter == NULL || !filter->is_set) { + return 0; + } + *flags = filter->flags; + + return 1; +} + +int __attribute__((always_inline)) approve_mmap_by_flags(struct syscall_cache_t *syscall) { + u32 flags = 0; + + int exists = lookup_u32_flags(&mmap_flags_approvers, &flags); + if (!exists) { + return 0; + } + + if ((syscall->mmap.flags & flags) > 0) { monitor_event_approved(syscall->type, FLAG_APPROVER_TYPE); return 1; } @@ -60,12 +76,13 @@ int __attribute__((always_inline)) approve_mmap_by_flags(struct syscall_cache_t } int __attribute__((always_inline)) approve_mmap_by_protection(struct syscall_cache_t *syscall) { - u32 key = 0; - u32 *flags_ptr = bpf_map_lookup_elem(&mmap_protection_approvers, &key); - if (flags_ptr == NULL) { + u32 flags = 0; + + int exists = lookup_u32_flags(&mmap_protection_approvers, &flags); + if (!exists) { return 0; } - u32 flags = *flags_ptr; + if ((flags == 0 && syscall->mmap.protection == 0) || (syscall->mmap.protection & flags) > 0) { monitor_event_approved(syscall->type, FLAG_APPROVER_TYPE); return 1; @@ -104,9 +121,14 @@ int __attribute__((always_inline)) chdir_approvers(struct syscall_cache_t *sysca } int __attribute__((always_inline)) approve_mprotect_by_vm_protection(struct syscall_cache_t *syscall) { - u32 key = 0; - u32 *flags = bpf_map_lookup_elem(&mprotect_vm_protection_approvers, &key); - if (flags != NULL && (syscall->mprotect.vm_protection & *flags) > 0) { + u32 flags = 0; + + int exists = lookup_u32_flags(&mprotect_vm_protection_approvers, &flags); + if (!exists) { + return 0; + } + + if ((syscall->mprotect.vm_protection & flags) > 0) { monitor_event_approved(syscall->type, FLAG_APPROVER_TYPE); return 1; } @@ -114,9 +136,14 @@ int __attribute__((always_inline)) approve_mprotect_by_vm_protection(struct sysc } int __attribute__((always_inline)) approve_mprotect_by_req_protection(struct syscall_cache_t *syscall) { - u32 key = 0; - u32 *flags = bpf_map_lookup_elem(&mprotect_req_protection_approvers, &key); - if (flags != NULL && (syscall->mprotect.req_protection & *flags) > 0) { + u32 flags = 0; + + int exists = lookup_u32_flags(&mprotect_req_protection_approvers, &flags); + if (!exists) { + return 0; + } + + if ((syscall->mprotect.req_protection & flags) > 0) { monitor_event_approved(syscall->type, FLAG_APPROVER_TYPE); return 1; } @@ -133,13 +160,13 @@ int __attribute__((always_inline)) mprotect_approvers(struct syscall_cache_t *sy } int __attribute__((always_inline)) approve_by_flags(struct syscall_cache_t *syscall) { - u32 key = 0; - u32 *flags_ptr = bpf_map_lookup_elem(&open_flags_approvers, &key); - if (flags_ptr == NULL) { + u32 flags = 0; + + int exists = lookup_u32_flags(&open_flags_approvers, &flags); + if (!exists) { return 0; } - u32 flags = *flags_ptr; if ((flags == 0 && syscall->open.flags == 0) || ((syscall->open.flags & flags) > 0)) { monitor_event_approved(syscall->type, FLAG_APPROVER_TYPE); @@ -171,9 +198,14 @@ int __attribute__((always_inline)) rmdir_approvers(struct syscall_cache_t *sysca } int __attribute__((always_inline)) approve_splice_by_entry_flags(struct syscall_cache_t *syscall) { - u32 key = 0; - u32 *flags = bpf_map_lookup_elem(&splice_entry_flags_approvers, &key); - if (flags != NULL && (syscall->splice.pipe_entry_flag & *flags) > 0) { + u32 flags = 0; + + int exists = lookup_u32_flags(&splice_entry_flags_approvers, &flags); + if (!exists) { + return 0; + } + + if ((syscall->splice.pipe_entry_flag & flags) > 0) { monitor_event_approved(syscall->type, FLAG_APPROVER_TYPE); return 1; } @@ -181,9 +213,14 @@ int __attribute__((always_inline)) approve_splice_by_entry_flags(struct syscall_ } int __attribute__((always_inline)) approve_splice_by_exit_flags(struct syscall_cache_t *syscall) { - u32 key = 0; - u32 *flags = bpf_map_lookup_elem(&splice_exit_flags_approvers, &key); - if (flags != NULL && (syscall->splice.pipe_exit_flag & *flags) > 0) { + u32 flags = 0; + + int exists = lookup_u32_flags(&splice_exit_flags_approvers, &flags); + if (!exists) { + return 0; + } + + if ((syscall->splice.pipe_exit_flag & flags) > 0) { monitor_event_approved(syscall->type, FLAG_APPROVER_TYPE); return 1; } @@ -216,16 +253,18 @@ int __attribute__((always_inline)) utime_approvers(struct syscall_cache_t *sysca } int __attribute__((always_inline)) bpf_approvers(struct syscall_cache_t *syscall) { - int pass_to_userspace = 0; u32 key = 0; + struct u64_flags_filter_t *filter = bpf_map_lookup_elem(&bpf_cmd_approvers, &key); + if (filter == NULL || !filter->is_set) { + return 0; + } - u64 *cmd_bitmask = bpf_map_lookup_elem(&bpf_cmd_approvers, &key); - if (cmd_bitmask != NULL && ((1 << syscall->bpf.cmd) & *cmd_bitmask) > 0) { + if (((1 << syscall->bpf.cmd) & filter->flags) > 0) { monitor_event_approved(syscall->type, FLAG_APPROVER_TYPE); - pass_to_userspace = 1; + return 1; } - return pass_to_userspace; + return 0; } #endif diff --git a/pkg/security/ebpf/c/include/maps.h b/pkg/security/ebpf/c/include/maps.h index d472b54d64627..91cca71ffe8ec 100644 --- a/pkg/security/ebpf/c/include/maps.h +++ b/pkg/security/ebpf/c/include/maps.h @@ -14,15 +14,15 @@ BPF_ARRAY_MAP(dr_erpc_buffer, char[DR_ERPC_BUFFER_LENGTH * 2], 1) BPF_ARRAY_MAP(inode_disc_revisions, u32, REVISION_ARRAY_SIZE) BPF_ARRAY_MAP(discarders_revision, u32, 1) BPF_ARRAY_MAP(filter_policy, struct policy_t, EVENT_MAX) -BPF_ARRAY_MAP(mmap_flags_approvers, u32, 1) -BPF_ARRAY_MAP(mmap_protection_approvers, u32, 1) -BPF_ARRAY_MAP(mprotect_vm_protection_approvers, u32, 1) -BPF_ARRAY_MAP(mprotect_req_protection_approvers, u32, 1) -BPF_ARRAY_MAP(open_flags_approvers, u32, 1) +BPF_ARRAY_MAP(mmap_flags_approvers, struct u32_flags_filter_t, 1) +BPF_ARRAY_MAP(mmap_protection_approvers, struct u32_flags_filter_t, 1) +BPF_ARRAY_MAP(mprotect_vm_protection_approvers, struct u32_flags_filter_t, 1) +BPF_ARRAY_MAP(mprotect_req_protection_approvers, struct u32_flags_filter_t, 1) +BPF_ARRAY_MAP(open_flags_approvers, struct u32_flags_filter_t, 1) BPF_ARRAY_MAP(selinux_enforce_status, u16, 2) -BPF_ARRAY_MAP(splice_entry_flags_approvers, u32, 1) -BPF_ARRAY_MAP(splice_exit_flags_approvers, u32, 1) -BPF_ARRAY_MAP(bpf_cmd_approvers, u64, 1) +BPF_ARRAY_MAP(splice_entry_flags_approvers, struct u32_flags_filter_t, 1) +BPF_ARRAY_MAP(splice_exit_flags_approvers, struct u32_flags_filter_t, 1) +BPF_ARRAY_MAP(bpf_cmd_approvers, struct u64_flags_filter_t, 1) BPF_ARRAY_MAP(syscalls_stats_enabled, u32, 1) BPF_ARRAY_MAP(syscall_ctx_gen_id, u32, 1) BPF_ARRAY_MAP(syscall_ctx, char[MAX_SYSCALL_CTX_SIZE], MAX_SYSCALL_CTX_ENTRIES) diff --git a/pkg/security/ebpf/c/include/structs/filter.h b/pkg/security/ebpf/c/include/structs/filter.h index 724331aa84841..9d876d0a89ef9 100644 --- a/pkg/security/ebpf/c/include/structs/filter.h +++ b/pkg/security/ebpf/c/include/structs/filter.h @@ -24,6 +24,16 @@ struct basename_filter_t { u64 event_mask; }; +struct u32_flags_filter_t { + u32 flags; + u8 is_set; +}; + +struct u64_flags_filter_t { + u64 flags; + u8 is_set; +}; + // Discarders struct discarder_stats_t { diff --git a/pkg/security/ebpf/map.go b/pkg/security/ebpf/map.go index 27973d376b00a..99e39dff3dbf5 100644 --- a/pkg/security/ebpf/map.go +++ b/pkg/security/ebpf/map.go @@ -86,6 +86,46 @@ func NewStringMapItem(str string, size int) *StringMapItem { return &StringMapItem{str: str, size: size} } +// Uint32FlagsZeroMapItem value used to reset the map entry +var Uint32FlagsZeroMapItem = make([]byte, 8) + +// Uint32FlagsMapItem describes an flags table key or value +type Uint32FlagsMapItem uint32 + +// MarshalBinary returns the binary representation of a Uint32FlagsMapItem +func (i *Uint32FlagsMapItem) MarshalBinary() ([]byte, error) { + b := make([]byte, 8) + binary.NativeEndian.PutUint32(b, uint32(*i)) + b[4] = 1 + return b, nil +} + +// NewUint32FlagsMapItem returns a new Uint32FlagsMapItem +func NewUint32FlagsMapItem(i uint32) *Uint32FlagsMapItem { + item := Uint32FlagsMapItem(i) + return &item +} + +// Uint64FlagsZeroMapItem value used to reset the map entry +var Uint64FlagsZeroMapItem = make([]byte, 16) + +// Uint64FlagsMapItem describes an flags table key or value +type Uint64FlagsMapItem uint64 + +// MarshalBinary returns the binary representation of a Uint64FlagsMapItem +func (i *Uint64FlagsMapItem) MarshalBinary() ([]byte, error) { + b := make([]byte, 16) + binary.NativeEndian.PutUint64(b, uint64(*i)) + b[8] = 1 + return b, nil +} + +// NewUint64FlagsMapItem returns a new Uint64FlagsMapItem +func NewUint64FlagsMapItem(i uint64) *Uint64FlagsMapItem { + item := Uint64FlagsMapItem(i) + return &item +} + // Zero table items var ( ZeroUint8MapItem = BytesMapItem([]byte{0}) diff --git a/pkg/security/probe/kfilters/approvers.go b/pkg/security/probe/kfilters/approvers.go index 4da7b1cfb93a1..f902b9ccd0e83 100644 --- a/pkg/security/probe/kfilters/approvers.go +++ b/pkg/security/probe/kfilters/approvers.go @@ -44,7 +44,7 @@ func newBasenameKFilters(tableName string, eventType model.EventType, basenames return approvers, nil } -func intValues[I int32 | int64](fvs rules.FilterValues) []I { +func uintValues[I uint32 | uint64](fvs rules.FilterValues) []I { var values []I for _, v := range fvs { values = append(values, I(v.Value.(int))) @@ -52,35 +52,44 @@ func intValues[I int32 | int64](fvs rules.FilterValues) []I { return values } -func newKFilterWithFlags[I int32 | int64](tableName string, flags ...I) (activeKFilter, error) { - var flagsItem I - +func newKFilterWithUInt32Flags(tableName string, flags ...uint32) (activeKFilter, error) { + var bitmask uint32 for _, flag := range flags { - flagsItem |= flag + bitmask |= flag } - if flagsItem != 0 { - return &arrayEntry{ - tableName: tableName, - index: uint32(0), - value: flagsItem, - zeroValue: I(0), - }, nil + return &arrayEntry{ + tableName: tableName, + index: uint32(0), + value: ebpf.NewUint32FlagsMapItem(bitmask), + zeroValue: ebpf.Uint32FlagsZeroMapItem, + }, nil +} + +func newKFilterWithUInt64Flags(tableName string, flags ...uint64) (activeKFilter, error) { + var bitmask uint64 + for _, flag := range flags { + bitmask |= flag } - return nil, nil + return &arrayEntry{ + tableName: tableName, + index: uint32(0), + value: ebpf.NewUint64FlagsMapItem(bitmask), + zeroValue: ebpf.Uint64FlagsZeroMapItem, + }, nil } -func getFlagsKFilters(tableName string, flags ...int32) (activeKFilter, error) { - return newKFilterWithFlags(tableName, flags...) +func getFlagsKFilter(tableName string, flags ...uint32) (activeKFilter, error) { + return newKFilterWithUInt32Flags(tableName, flags...) } -func getEnumsKFilters(tableName string, enums ...int64) (activeKFilter, error) { - var flags []int64 +func getEnumsKFilters(tableName string, enums ...uint64) (activeKFilter, error) { + var flags []uint64 for _, enum := range enums { flags = append(flags, 1< Date: Thu, 5 Sep 2024 13:22:28 +0200 Subject: [PATCH 024/128] [CWS] Use a single TTL-based cache implementation (#29074) --- LICENSE-3rdparty.csv | 1 - go.mod | 2 +- go.sum | 4 +- pkg/security/secl/compiler/eval/variables.go | 53 ++++++++++++-------- pkg/security/secl/go.mod | 1 + pkg/security/secl/go.sum | 4 ++ pkg/security/secl/rules/policy_test.go | 4 +- pkg/security/seclwin/go.mod | 3 +- pkg/security/seclwin/go.sum | 8 ++- 9 files changed, 51 insertions(+), 29 deletions(-) diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index 2ebca5bc6d7b6..219896d013391 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -1289,7 +1289,6 @@ core,github.com/hashicorp/go-sockaddr,MPL-2.0,"Copyright © 2014-2018 HashiCorp, core,github.com/hashicorp/go-version,MPL-2.0,"Copyright © 2014-2018 HashiCorp, Inc" core,github.com/hashicorp/golang-lru/simplelru,MPL-2.0,"Copyright © 2014-2018 HashiCorp, Inc" core,github.com/hashicorp/golang-lru/v2,MPL-2.0,"Copyright © 2014-2018 HashiCorp, Inc" -core,github.com/hashicorp/golang-lru/v2/expirable,MPL-2.0,"Copyright © 2014-2018 HashiCorp, Inc" core,github.com/hashicorp/golang-lru/v2/internal,MPL-2.0,"Copyright © 2014-2018 HashiCorp, Inc" core,github.com/hashicorp/golang-lru/v2/simplelru,MPL-2.0,"Copyright © 2014-2018 HashiCorp, Inc" core,github.com/hashicorp/hcl,MPL-2.0,"Copyright © 2014-2018 HashiCorp, Inc" diff --git a/go.mod b/go.mod index 77c67a228ff17..84cece8bc97a7 100644 --- a/go.mod +++ b/go.mod @@ -602,7 +602,7 @@ require ( github.com/containerd/containerd/api v1.7.19 github.com/containerd/errdefs v0.1.0 github.com/distribution/reference v0.6.0 - github.com/jellydator/ttlcache/v3 v3.0.1 + github.com/jellydator/ttlcache/v3 v3.3.0 github.com/kouhin/envflag v0.0.0-20150818174321-0e9a86061649 github.com/lorenzosaino/go-sysctl v0.3.1 ) diff --git a/go.sum b/go.sum index 20463da9250a2..a310d674a2d1f 100644 --- a/go.sum +++ b/go.sum @@ -1741,8 +1741,8 @@ github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInw github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jellydator/ttlcache/v3 v3.0.1 h1:cHgCSMS7TdQcoprXnWUptJZzyFsqs18Lt8VVhRuZYVU= -github.com/jellydator/ttlcache/v3 v3.0.1/go.mod h1:WwTaEmcXQ3MTjOm4bsZoDFiCu/hMvNWLO1w67RXz6h4= +github.com/jellydator/ttlcache/v3 v3.3.0 h1:BdoC9cE81qXfrxeb9eoJi9dWrdhSuwXMAnHTbnBm4Wc= +github.com/jellydator/ttlcache/v3 v3.3.0/go.mod h1:bj2/e0l4jRnQdrnSTaGTsh4GSXvMjQcy41i7th0GVGw= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jlaffaye/ftp v0.0.0-20180404123514-2403248fa8cc/go.mod h1:lli8NYPQOFy3O++YmYbqVgOcQ1JPCwdOy+5zSjKJ9qY= diff --git a/pkg/security/secl/compiler/eval/variables.go b/pkg/security/secl/compiler/eval/variables.go index 37802d9ed2649..3d0d865054276 100644 --- a/pkg/security/secl/compiler/eval/variables.go +++ b/pkg/security/secl/compiler/eval/variables.go @@ -13,7 +13,7 @@ import ( "regexp" "time" - "github.com/hashicorp/golang-lru/v2/expirable" + "github.com/jellydator/ttlcache/v3" ) var ( @@ -304,7 +304,7 @@ func NewMutableStringVariable() *MutableStringVariable { // MutableStringArrayVariable describes a mutable string array variable type MutableStringArrayVariable struct { - *expirable.LRU[string, bool] + LRU *ttlcache.Cache[string, bool] } // Set the variable with the specified value @@ -314,7 +314,7 @@ func (m *MutableStringArrayVariable) Set(_ *Context, values interface{}) error { } for _, v := range values.([]string) { - m.LRU.Add(v, true) + m.LRU.Set(v, true, ttlcache.DefaultTTL) } return nil } @@ -323,10 +323,10 @@ func (m *MutableStringArrayVariable) Set(_ *Context, values interface{}) error { func (m *MutableStringArrayVariable) Append(_ *Context, value interface{}) error { switch value := value.(type) { case string: - m.LRU.Add(value, true) + m.LRU.Set(value, true, ttlcache.DefaultTTL) case []string: for _, v := range value { - m.LRU.Add(v, true) + m.LRU.Set(v, true, ttlcache.DefaultTTL) } default: return errAppendNotSupported @@ -349,7 +349,9 @@ func NewMutableStringArrayVariable(size int, ttl time.Duration) *MutableStringAr size = defaultMaxVariables } - lru := expirable.NewLRU[string, bool](size, nil, ttl) + lru := ttlcache.New(ttlcache.WithCapacity[string, bool](uint64(size)), ttlcache.WithTTL[string, bool](ttl)) + go lru.Start() + return &MutableStringArrayVariable{ LRU: lru, } @@ -433,7 +435,7 @@ func (v *GlobalVariables) GetVariable(_ string, value interface{}, opts Variable // Variables holds a set of variables type Variables struct { - lru *expirable.LRU[string, interface{}] + lru *ttlcache.Cache[string, interface{}] ttl time.Duration } @@ -444,36 +446,46 @@ func NewVariables() *Variables { // GetBool returns the boolean value of the specified variable func (v *Variables) GetBool(name string) bool { - value, _ := v.lru.Get(name) - bval, _ := value.(bool) + var bval bool + if item := v.lru.Get(name); item != nil { + bval, _ = item.Value().(bool) + } return bval } // GetInt returns the integer value of the specified variable func (v *Variables) GetInt(name string) int { - value, _ := v.lru.Get(name) - ival, _ := value.(int) + var ival int + if item := v.lru.Get(name); item != nil { + ival, _ = item.Value().(int) + } return ival } // GetString returns the string value of the specified variable func (v *Variables) GetString(name string) string { - value, _ := v.lru.Get(name) - sval, _ := value.(string) + var sval string + if item := v.lru.Get(name); item != nil { + sval, _ = item.Value().(string) + } return sval } // GetStringArray returns the string array value of the specified variable func (v *Variables) GetStringArray(name string) []string { - value, _ := v.lru.Get(name) - slval, _ := value.([]string) + var slval []string + if item := v.lru.Get(name); item != nil { + slval, _ = item.Value().([]string) + } return slval } // GetIntArray returns the integer array value of the specified variable func (v *Variables) GetIntArray(name string) []int { - value, _ := v.lru.Get(name) - ilval, _ := value.([]int) + var ilval []int + if item := v.lru.Get(name); item != nil { + ilval, _ = item.Value().([]int) + } return ilval } @@ -483,12 +495,13 @@ const defaultMaxVariables = 100 func (v *Variables) Set(name string, value interface{}) bool { existed := false if v.lru == nil { - v.lru = expirable.NewLRU[string, interface{}](defaultMaxVariables, nil, v.ttl) + v.lru = ttlcache.New(ttlcache.WithCapacity[string, interface{}](uint64(defaultMaxVariables)), ttlcache.WithTTL[string, interface{}](v.ttl)) + go v.lru.Start() } else { - _, existed = v.lru.Get(name) + existed = v.lru.Get(name) != nil } - v.lru.Add(name, value) + v.lru.Set(name, value, ttlcache.DefaultTTL) return !existed } diff --git a/pkg/security/secl/go.mod b/pkg/security/secl/go.mod index b4da8135a20ad..22d7931b1557d 100644 --- a/pkg/security/secl/go.mod +++ b/pkg/security/secl/go.mod @@ -12,6 +12,7 @@ require ( github.com/google/go-cmp v0.6.0 github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/golang-lru/v2 v2.0.7 + github.com/jellydator/ttlcache/v3 v3.3.0 github.com/skydive-project/go-debouncer v1.0.0 github.com/spf13/cast v1.7.0 github.com/stretchr/testify v1.9.0 diff --git a/pkg/security/secl/go.sum b/pkg/security/secl/go.sum index b0fa6b12c6903..d111f0dc627d3 100644 --- a/pkg/security/secl/go.sum +++ b/pkg/security/secl/go.sum @@ -30,6 +30,8 @@ github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/jellydator/ttlcache/v3 v3.3.0 h1:BdoC9cE81qXfrxeb9eoJi9dWrdhSuwXMAnHTbnBm4Wc= +github.com/jellydator/ttlcache/v3 v3.3.0/go.mod h1:bj2/e0l4jRnQdrnSTaGTsh4GSXvMjQcy41i7th0GVGw= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -57,6 +59,8 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= diff --git a/pkg/security/secl/rules/policy_test.go b/pkg/security/secl/rules/policy_test.go index 8b7e44ebb734b..112001e8aeea3 100644 --- a/pkg/security/secl/rules/policy_test.go +++ b/pkg/security/secl/rules/policy_test.go @@ -342,9 +342,9 @@ func TestActionSetVariableTTL(t *testing.T) { assert.NotNil(t, stringArrayVar) assert.True(t, ok) - assert.True(t, stringArrayVar.Contains("foo")) + assert.True(t, stringArrayVar.LRU.Has("foo")) time.Sleep(time.Second + 100*time.Millisecond) - assert.False(t, stringArrayVar.Contains("foo")) + assert.False(t, stringArrayVar.LRU.Has("foo")) } func TestActionSetVariableConflict(t *testing.T) { diff --git a/pkg/security/seclwin/go.mod b/pkg/security/seclwin/go.mod index 87d480ed5940e..dc91e07840f51 100644 --- a/pkg/security/seclwin/go.mod +++ b/pkg/security/seclwin/go.mod @@ -11,6 +11,7 @@ require ( require ( github.com/alecthomas/participle v0.7.1 // indirect - github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect + github.com/jellydator/ttlcache/v3 v3.3.0 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect + golang.org/x/sync v0.8.0 // indirect ) diff --git a/pkg/security/seclwin/go.sum b/pkg/security/seclwin/go.sum index c8f0a2d739e33..39492baacd3ec 100644 --- a/pkg/security/seclwin/go.sum +++ b/pkg/security/seclwin/go.sum @@ -4,8 +4,8 @@ github.com/alecthomas/repr v0.0.0-20181024024818-d37bc2a10ba1/go.mod h1:xTS7Pm1p github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= -github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/jellydator/ttlcache/v3 v3.3.0 h1:BdoC9cE81qXfrxeb9eoJi9dWrdhSuwXMAnHTbnBm4Wc= +github.com/jellydator/ttlcache/v3 v3.3.0/go.mod h1:bj2/e0l4jRnQdrnSTaGTsh4GSXvMjQcy41i7th0GVGw= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= @@ -14,6 +14,10 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= From 19b9e3340a3a6a25474bf6e96cefed4694a42ab1 Mon Sep 17 00:00:00 2001 From: Guy Arbitman Date: Thu, 5 Sep 2024 15:08:08 +0300 Subject: [PATCH 025/128] [USMON-417] gotls: Restore support on fedora (#27437) --- .../ebpf/c/protocols/tls/go-tls-maps.h | 4 + pkg/network/ebpf/c/protocols/tls/https.h | 46 +++--------- .../protocols/tls/gotls/testutil/helpers.go | 27 +------ pkg/network/usm/ebpf_gotls.go | 73 ++++++++++++++----- pkg/network/usm/kafka_monitor_test.go | 6 +- pkg/network/usm/monitor_tls_test.go | 12 ++- pkg/network/usm/postgres_monitor_test.go | 2 +- .../usm/tests/tracer_usm_linux_test.go | 4 +- pkg/network/usm/usm_grpc_monitor_test.go | 2 +- pkg/network/usm/usm_http2_monitor_test.go | 2 +- 10 files changed, 83 insertions(+), 95 deletions(-) diff --git a/pkg/network/ebpf/c/protocols/tls/go-tls-maps.h b/pkg/network/ebpf/c/protocols/tls/go-tls-maps.h index 1c66dee1d0abc..58f3ce3ac771f 100644 --- a/pkg/network/ebpf/c/protocols/tls/go-tls-maps.h +++ b/pkg/network/ebpf/c/protocols/tls/go-tls-maps.h @@ -9,6 +9,10 @@ // offsets_data map contains the information about the locations of structs in the inspected binary, mapped by the binary's inode number. BPF_HASH_MAP(offsets_data, go_tls_offsets_data_key_t, tls_offsets_data_t, 1024) +// Maps PID to the - tuple, that is used to find the offsets_data map for the binary. +// Size is a 10 times the size of the offsets_data map, to have enough space for all the binaries. +BPF_HASH_MAP(pid_to_device_inode, u32, go_tls_offsets_data_key_t, 10240) + /* go_tls_read_args is used to get the read function info when running in the read-return uprobe. The key contains the go routine id and the pid. */ BPF_LRU_MAP(go_tls_read_args, go_tls_function_args_key_t, go_tls_read_args_data_t, 2048) diff --git a/pkg/network/ebpf/c/protocols/tls/https.h b/pkg/network/ebpf/c/protocols/tls/https.h index db485ab14403c..ccb9550f3602c 100644 --- a/pkg/network/ebpf/c/protocols/tls/https.h +++ b/pkg/network/ebpf/c/protocols/tls/https.h @@ -3,10 +3,6 @@ #ifdef COMPILE_CORE #include "ktypes.h" -#define MINORBITS 20 -#define MINORMASK ((1U << MINORBITS) - 1) -#define MAJOR(dev) ((unsigned int) ((dev) >> MINORBITS)) -#define MINOR(dev) ((unsigned int) ((dev) & MINORMASK)) #else #include #include @@ -287,41 +283,19 @@ static __always_inline void map_ssl_ctx_to_sock(struct sock *skp) { bpf_map_update_with_telemetry(ssl_sock_by_ctx, &ssl_ctx, &ssl_sock, BPF_ANY); } -/** - * get_offsets_data retrieves the result of binary analysis for the - * current task binary's inode number. - */ -static __always_inline tls_offsets_data_t* get_offsets_data() { - struct task_struct *t = (struct task_struct *) bpf_get_current_task(); - struct inode *inode; - go_tls_offsets_data_key_t key; - dev_t dev_id; - - inode = BPF_CORE_READ(t, mm, exe_file, f_inode); - if (!inode) { - log_debug("get_offsets_data: could not read f_inode field"); - return NULL; - } - - int err; - err = BPF_CORE_READ_INTO(&key.ino, inode, i_ino); - if (err) { - log_debug("get_offsets_data: could not read i_ino field"); - return NULL; - } - err = BPF_CORE_READ_INTO(&dev_id, inode, i_sb, s_dev); - if (err) { - log_debug("get_offsets_data: could not read s_dev field"); +// Retrieves the result of binary analysis for the current task binary's inode number. +// For the current PID, we retrieve the inode number of the binary and then we look up the binary's analysis result. +static __always_inline tls_offsets_data_t* get_offsets_data() { + u64 pid_tgid = bpf_get_current_pid_tgid(); + u32 pid = pid_tgid >> 32; + go_tls_offsets_data_key_t *key = bpf_map_lookup_elem(&pid_to_device_inode, &pid); + if (key == NULL) { + log_debug("get_offsets_data: could not find key for pid %u", pid); return NULL; } - - key.device_id_major = MAJOR(dev_id); - key.device_id_minor = MINOR(dev_id); - - log_debug("get_offsets_data: task binary inode number: %llu; device ID %x:%x", key.ino, key.device_id_major, key.device_id_minor); - - return bpf_map_lookup_elem(&offsets_data, &key); + go_tls_offsets_data_key_t key_copy = *key; + return bpf_map_lookup_elem(&offsets_data, &key_copy); } #endif diff --git a/pkg/network/protocols/tls/gotls/testutil/helpers.go b/pkg/network/protocols/tls/gotls/testutil/helpers.go index 2315779aa184a..fcf418ae505a9 100644 --- a/pkg/network/protocols/tls/gotls/testutil/helpers.go +++ b/pkg/network/protocols/tls/gotls/testutil/helpers.go @@ -8,34 +8,11 @@ package testutil import ( - "slices" - "testing" - - "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/network/config" usmconfig "github.com/DataDog/datadog-agent/pkg/network/usm/config" - "github.com/DataDog/datadog-agent/pkg/util/kernel" -) - -const ( - fedoraPlatform = "fedora" ) -var fedoraUnsupportedVersions = []string{"35", "36", "37", "38"} - -// isFedora returns true if the current OS is Fedora. -// go-tls does not work correctly on Fedora 35, 36, 37 and 38. -func isFedora(t *testing.T) bool { - platform, err := kernel.Platform() - require.NoError(t, err) - platformVersion, err := kernel.PlatformVersion() - require.NoError(t, err) - - return platform == fedoraPlatform && slices.Contains(fedoraUnsupportedVersions, platformVersion) -} - // GoTLSSupported returns true if GO-TLS monitoring is supported on the current OS. -func GoTLSSupported(t *testing.T, cfg *config.Config) bool { - return usmconfig.TLSSupported(cfg) && (cfg.EnableRuntimeCompiler || cfg.EnableCORE) && !isFedora(t) +func GoTLSSupported(cfg *config.Config) bool { + return usmconfig.TLSSupported(cfg) && (cfg.EnableRuntimeCompiler || cfg.EnableCORE) } diff --git a/pkg/network/usm/ebpf_gotls.go b/pkg/network/usm/ebpf_gotls.go index e0090e3ba889c..b7e75f7c3c964 100644 --- a/pkg/network/usm/ebpf_gotls.go +++ b/pkg/network/usm/ebpf_gotls.go @@ -42,6 +42,7 @@ import ( const ( offsetsDataMap = "offsets_data" + pidToDeviceInodeMap = "pid_to_device_inode" goTLSReadArgsMap = "go_tls_read_args" goTLSWriteArgsMap = "go_tls_write_args" connectionTupleByGoTLSMap = "conn_tup_by_go_tls_conn" @@ -115,6 +116,11 @@ type goTLSProgram struct { // inodes. offsetsDataMap *ebpf.Map + // eBPF map holding the mapping of PIDs to device/inode numbers. + // On some filesystems (like btrfs), the device-id in the task-struct can be different from the device-id extracted + // in the user-mode. This map is used to ensure the eBPF probes are getting the correct device/inode numbers. + pidToDeviceInodeMap *ebpf.Map + // binAnalysisMetric handles telemetry on the time spent doing binary // analysis binAnalysisMetric *libtelemetry.Counter @@ -131,6 +137,7 @@ var _ utils.Attacher = &goTLSProgram{} var goTLSSpec = &protocols.ProtocolSpec{ Maps: []*manager.Map{ {Name: offsetsDataMap}, + {Name: pidToDeviceInodeMap}, {Name: goTLSReadArgsMap}, {Name: goTLSWriteArgsMap}, {Name: connectionTupleByGoTLSMap}, @@ -216,6 +223,10 @@ func (p *goTLSProgram) PreStart(m *manager.Manager) error { if err != nil { return fmt.Errorf("could not get offsets_data map: %s", err) } + p.pidToDeviceInodeMap, _, err = m.GetMap(pidToDeviceInodeMap) + if err != nil { + return fmt.Errorf("could not get %s map: %s", pidToDeviceInodeMap, err) + } procMonitor := monitor.GetProcessMonitor() cleanupExec := procMonitor.SubscribeExec(p.handleProcessStart) @@ -242,7 +253,7 @@ func (p *goTLSProgram) PreStart(m *manager.Manager) error { processSet := p.registry.GetRegisteredProcesses() deletedPids := monitor.FindDeletedProcesses(processSet) for deletedPid := range deletedPids { - _ = p.registry.Unregister(deletedPid) + _ = p.DetachPID(deletedPid) } } } @@ -278,6 +289,7 @@ var ( // DetachPID detaches the provided PID from the eBPF program. func (p *goTLSProgram) DetachPID(pid uint32) error { + _ = p.pidToDeviceInodeMap.Delete(unsafe.Pointer(&pid)) return p.registry.Unregister(pid) } @@ -338,12 +350,13 @@ func (p *goTLSProgram) AttachPID(pid uint32) error { // Check go process probeList := make([]manager.ProbeIdentificationPair, 0) - return p.registry.Register(binPath, pid, registerCBCreator(p.manager, p.offsetsDataMap, &probeList, p.binAnalysisMetric, p.binNoSymbolsMetric), - unregisterCBCreator(p.manager, &probeList, p.offsetsDataMap), - utils.IgnoreCB) + return p.registry.Register(binPath, pid, + registerCBCreator(p.manager, p.offsetsDataMap, p.pidToDeviceInodeMap, &probeList, p.binAnalysisMetric, p.binNoSymbolsMetric), + unregisterCBCreator(p.manager, &probeList, p.offsetsDataMap, p.pidToDeviceInodeMap), + alreadyCBCreator(p.pidToDeviceInodeMap)) } -func registerCBCreator(mgr *manager.Manager, offsetsDataMap *ebpf.Map, probeIDs *[]manager.ProbeIdentificationPair, binAnalysisMetric, binNoSymbolsMetric *libtelemetry.Counter) func(path utils.FilePath) error { +func registerCBCreator(mgr *manager.Manager, offsetsDataMap, pidToDeviceInodeMap *ebpf.Map, probeIDs *[]manager.ProbeIdentificationPair, binAnalysisMetric, binNoSymbolsMetric *libtelemetry.Counter) func(path utils.FilePath) error { return func(filePath utils.FilePath) error { start := time.Now() @@ -366,13 +379,13 @@ func registerCBCreator(mgr *manager.Manager, offsetsDataMap *ebpf.Map, probeIDs return fmt.Errorf("error extracting inspectoin data from %s: %w", filePath.HostPath, err) } - if err := addInspectionResultToMap(offsetsDataMap, filePath.ID, inspectionResult); err != nil { + if err := addInspectionResultToMap(offsetsDataMap, pidToDeviceInodeMap, filePath, inspectionResult); err != nil { return fmt.Errorf("failed adding inspection rules: %w", err) } pIDs, err := attachHooks(mgr, inspectionResult, filePath.HostPath, filePath.ID) if err != nil { - removeInspectionResultFromMap(offsetsDataMap, filePath.ID) + removeInspectionResultFromMap(offsetsDataMap, pidToDeviceInodeMap, filePath) return fmt.Errorf("error while attaching hooks to %s: %w", filePath.HostPath, err) } *probeIDs = pIDs @@ -385,6 +398,21 @@ func registerCBCreator(mgr *manager.Manager, offsetsDataMap *ebpf.Map, probeIDs } } +// alreadyCBCreator handles the case where a binary is already registered. In such a case the registry callback won't +// be called, so we need to add a mapping from the PID to the device/inode of the binary. +func alreadyCBCreator(pidToDeviceInodeMap *ebpf.Map) func(utils.FilePath) error { + return func(filePath utils.FilePath) error { + if filePath.PID == 0 { + return nil + } + return pidToDeviceInodeMap.Put(unsafe.Pointer(&filePath.PID), unsafe.Pointer(&gotls.TlsBinaryId{ + Id_major: unix.Major(filePath.ID.Dev), + Id_minor: unix.Minor(filePath.ID.Dev), + Ino: filePath.ID.Inode, + })) + } +} + func (p *goTLSProgram) handleProcessExit(pid pid) { _ = p.DetachPID(pid) } @@ -395,32 +423,39 @@ func (p *goTLSProgram) handleProcessStart(pid pid) { // addInspectionResultToMap runs a binary inspection and adds the result to the // map that's being read by the probes, indexed by the binary's inode number `ino`. -func addInspectionResultToMap(offsetsDataMap *ebpf.Map, binID utils.PathIdentifier, result *bininspect.Result) error { +func addInspectionResultToMap(offsetsDataMap, pidToDeviceInodeMap *ebpf.Map, filePath utils.FilePath, result *bininspect.Result) error { offsetsData, err := inspectionResultToProbeData(result) if err != nil { return fmt.Errorf("error while parsing inspection result: %w", err) } key := &gotls.TlsBinaryId{ - Id_major: unix.Major(binID.Dev), - Id_minor: unix.Minor(binID.Dev), - Ino: binID.Inode, + Id_major: unix.Major(filePath.ID.Dev), + Id_minor: unix.Minor(filePath.ID.Dev), + Ino: filePath.ID.Inode, } if err := offsetsDataMap.Put(unsafe.Pointer(key), unsafe.Pointer(&offsetsData)); err != nil { - return fmt.Errorf("could not write binary inspection result to map for binID %v: %w", binID, err) + return fmt.Errorf("could not write binary inspection result to map for binID %v (pid %v): %w", filePath.ID, filePath.PID, err) } + if err := pidToDeviceInodeMap.Put(unsafe.Pointer(&filePath.PID), unsafe.Pointer(key)); err != nil { + return fmt.Errorf("could not write pid to device/inode (%s) map for pid %v: %w", filePath.ID.String(), filePath.PID, err) + } return nil } -func removeInspectionResultFromMap(offsetsDataMap *ebpf.Map, binID utils.PathIdentifier) { +func removeInspectionResultFromMap(offsetsDataMap, pidToDeviceInodeMap *ebpf.Map, filePath utils.FilePath) { key := &gotls.TlsBinaryId{ - Id_major: unix.Major(binID.Dev), - Id_minor: unix.Minor(binID.Dev), - Ino: binID.Inode, + Id_major: unix.Major(filePath.ID.Dev), + Id_minor: unix.Minor(filePath.ID.Dev), + Ino: filePath.ID.Inode, + } + if filePath.PID != 0 { + _ = pidToDeviceInodeMap.Delete(unsafe.Pointer(&filePath.PID)) } if err := offsetsDataMap.Delete(unsafe.Pointer(key)); err != nil { - log.Errorf("could not remove inspection result from map for ino %v: %s", binID, err) + log.Errorf("could not remove inspection result from map for ino %v: %s", filePath.ID, err) + return } } @@ -475,12 +510,12 @@ func attachHooks(mgr *manager.Manager, result *bininspect.Result, binPath string return probeIDs, nil } -func unregisterCBCreator(mgr *manager.Manager, probeIDs *[]manager.ProbeIdentificationPair, offsetsDataMap *ebpf.Map) func(path utils.FilePath) error { +func unregisterCBCreator(mgr *manager.Manager, probeIDs *[]manager.ProbeIdentificationPair, offsetsDataMap, pidToDeviceInodeMap *ebpf.Map) func(path utils.FilePath) error { return func(path utils.FilePath) error { if len(*probeIDs) == 0 { return nil } - removeInspectionResultFromMap(offsetsDataMap, path.ID) + removeInspectionResultFromMap(offsetsDataMap, pidToDeviceInodeMap, path) for _, probeID := range *probeIDs { err := mgr.DetachHook(probeID) if err != nil { diff --git a/pkg/network/usm/kafka_monitor_test.go b/pkg/network/usm/kafka_monitor_test.go index bbcedaecb2be6..c5ac048be3a80 100644 --- a/pkg/network/usm/kafka_monitor_test.go +++ b/pkg/network/usm/kafka_monitor_test.go @@ -148,7 +148,7 @@ func (s *KafkaProtocolParsingSuite) TestKafkaProtocolParsing() { for mode, name := range map[bool]string{false: "without TLS", true: "with TLS"} { t.Run(name, func(t *testing.T) { - if mode && !gotlsutils.GoTLSSupported(t, config.New()) { + if mode && !gotlsutils.GoTLSSupported(config.New()) { t.Skip("GoTLS not supported for this setup") } for _, version := range versions { @@ -1244,7 +1244,7 @@ func (s *KafkaProtocolParsingSuite) TestKafkaFetchRaw() { }) t.Run("with TLS", func(t *testing.T) { - if !gotlsutils.GoTLSSupported(t, config.New()) { + if !gotlsutils.GoTLSSupported(config.New()) { t.Skip("GoTLS not supported for this setup") } @@ -1470,7 +1470,7 @@ func (s *KafkaProtocolParsingSuite) TestKafkaProduceRaw() { }) t.Run("with TLS", func(t *testing.T) { - if !gotlsutils.GoTLSSupported(t, config.New()) { + if !gotlsutils.GoTLSSupported(config.New()) { t.Skip("GoTLS not supported for this setup") } diff --git a/pkg/network/usm/monitor_tls_test.go b/pkg/network/usm/monitor_tls_test.go index c706b5fe874d4..b9df53c92dd16 100644 --- a/pkg/network/usm/monitor_tls_test.go +++ b/pkg/network/usm/monitor_tls_test.go @@ -548,11 +548,9 @@ func (s *tlsSuite) TestJavaInjection() { } func TestHTTPGoTLSAttachProbes(t *testing.T) { - t.Skip("skipping GoTLS tests while we investigate their flakiness") - modes := []ebpftest.BuildMode{ebpftest.RuntimeCompiled, ebpftest.CORE} ebpftest.TestBuildModes(t, modes, "", func(t *testing.T) { - if !gotlstestutil.GoTLSSupported(t, config.New()) { + if !gotlstestutil.GoTLSSupported(config.New()) { t.Skip("GoTLS not supported for this setup") } @@ -571,7 +569,7 @@ func testHTTP2GoTLSAttachProbes(t *testing.T, cfg *config.Config) { if !http2.Supported() { t.Skip("HTTP2 not supported for this setup") } - if !gotlstestutil.GoTLSSupported(t, cfg) { + if !gotlstestutil.GoTLSSupported(cfg) { t.Skip("GoTLS not supported for this setup") } @@ -603,7 +601,7 @@ func TestHTTPSGoTLSAttachProbesOnContainer(t *testing.T) { t.Skip("Skipping a flaky test") modes := []ebpftest.BuildMode{ebpftest.RuntimeCompiled, ebpftest.CORE} ebpftest.TestBuildModes(t, modes, "", func(t *testing.T) { - if !gotlstestutil.GoTLSSupported(t, config.New()) { + if !gotlstestutil.GoTLSSupported(config.New()) { t.Skip("GoTLS not supported for this setup") } @@ -621,7 +619,7 @@ func TestOldConnectionRegression(t *testing.T) { modes := []ebpftest.BuildMode{ebpftest.RuntimeCompiled, ebpftest.CORE} ebpftest.TestBuildModes(t, modes, "", func(t *testing.T) { - if !gotlstestutil.GoTLSSupported(t, config.New()) { + if !gotlstestutil.GoTLSSupported(config.New()) { t.Skip("GoTLS not supported for this setup") } @@ -696,7 +694,7 @@ func TestOldConnectionRegression(t *testing.T) { func TestLimitListenerRegression(t *testing.T) { modes := []ebpftest.BuildMode{ebpftest.RuntimeCompiled, ebpftest.CORE} ebpftest.TestBuildModes(t, modes, "", func(t *testing.T) { - if !gotlstestutil.GoTLSSupported(t, config.New()) { + if !gotlstestutil.GoTLSSupported(config.New()) { t.Skip("GoTLS not supported for this setup") } diff --git a/pkg/network/usm/postgres_monitor_test.go b/pkg/network/usm/postgres_monitor_test.go index a74ef203f3721..47a891cd4040b 100644 --- a/pkg/network/usm/postgres_monitor_test.go +++ b/pkg/network/usm/postgres_monitor_test.go @@ -137,7 +137,7 @@ func (s *postgresProtocolParsingSuite) TestDecoding() { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if tt.isTLS && !gotlstestutil.GoTLSSupported(t, config.New()) { + if tt.isTLS && !gotlstestutil.GoTLSSupported(config.New()) { t.Skip("GoTLS not supported for this setup") } testDecoding(t, tt.isTLS) diff --git a/pkg/network/usm/tests/tracer_usm_linux_test.go b/pkg/network/usm/tests/tracer_usm_linux_test.go index 2da79eadbf80a..9399e6b1cb210 100644 --- a/pkg/network/usm/tests/tracer_usm_linux_test.go +++ b/pkg/network/usm/tests/tracer_usm_linux_test.go @@ -118,7 +118,7 @@ func skipIfUsingNAT(t *testing.T, ctx testContext) { // skipIfGoTLSNotSupported skips the test if GoTLS is not supported. func skipIfGoTLSNotSupported(t *testing.T, _ testContext) { - if !gotlstestutil.GoTLSSupported(t, config.New()) { + if !gotlstestutil.GoTLSSupported(config.New()) { t.Skip("GoTLS is not supported") } } @@ -183,7 +183,7 @@ func (s *USMSuite) TestProtocolClassification() { cfg.EnableNativeTLSMonitoring = true cfg.EnableHTTPMonitoring = true cfg.EnablePostgresMonitoring = true - cfg.EnableGoTLSSupport = gotlstestutil.GoTLSSupported(t, cfg) + cfg.EnableGoTLSSupport = gotlstestutil.GoTLSSupported(cfg) cfg.BypassEnabled = true tr, err := tracer.NewTracer(cfg, nil) require.NoError(t, err) diff --git a/pkg/network/usm/usm_grpc_monitor_test.go b/pkg/network/usm/usm_grpc_monitor_test.go index 35ae34a9d46d4..73e3a5de28f2e 100644 --- a/pkg/network/usm/usm_grpc_monitor_test.go +++ b/pkg/network/usm/usm_grpc_monitor_test.go @@ -71,7 +71,7 @@ func TestGRPCScenarios(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - if tc.isTLS && !gotlsutils.GoTLSSupported(t, config.New()) { + if tc.isTLS && !gotlsutils.GoTLSSupported(config.New()) { t.Skip("GoTLS not supported for this setup") } suite.Run(t, &usmGRPCSuite{isTLS: tc.isTLS}) diff --git a/pkg/network/usm/usm_http2_monitor_test.go b/pkg/network/usm/usm_http2_monitor_test.go index 46c3188b09833..825c842fe68cd 100644 --- a/pkg/network/usm/usm_http2_monitor_test.go +++ b/pkg/network/usm/usm_http2_monitor_test.go @@ -108,7 +108,7 @@ func TestHTTP2Scenarios(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - if tc.isTLS && !gotlsutils.GoTLSSupported(t, config.New()) { + if tc.isTLS && !gotlsutils.GoTLSSupported(config.New()) { t.Skip("GoTLS not supported for this setup") } suite.Run(t, &usmHTTP2Suite{isTLS: tc.isTLS}) From d99d8f26ffce251265a8bd207c1feffb10ace0ce Mon Sep 17 00:00:00 2001 From: "agent-platform-auto-pr[bot]" <153269286+agent-platform-auto-pr[bot]@users.noreply.github.com> Date: Thu, 5 Sep 2024 12:19:03 +0000 Subject: [PATCH 026/128] [test-infra-definitions][automated] Bump test-infra-definitions to c9ee795ec7528d8044a39d436a1ab195b46b23e9 (#29073) Co-authored-by: KevinFairise2 --- .gitlab/common/test_infra_version.yml | 2 +- test/new-e2e/examples/aks_test.go | 4 +--- test/new-e2e/go.mod | 8 ++++---- test/new-e2e/go.sum | 16 ++++++++-------- 4 files changed, 14 insertions(+), 16 deletions(-) diff --git a/.gitlab/common/test_infra_version.yml b/.gitlab/common/test_infra_version.yml index 9a29e393a30ce..6e55837cfb724 100644 --- a/.gitlab/common/test_infra_version.yml +++ b/.gitlab/common/test_infra_version.yml @@ -4,4 +4,4 @@ variables: # and check the job creating the image to make sure you have the right SHA prefix TEST_INFRA_DEFINITIONS_BUILDIMAGES_SUFFIX: "" # Make sure to update test-infra-definitions version in go.mod as well - TEST_INFRA_DEFINITIONS_BUILDIMAGES: c0ecdf254c23 + TEST_INFRA_DEFINITIONS_BUILDIMAGES: c9ee795ec752 diff --git a/test/new-e2e/examples/aks_test.go b/test/new-e2e/examples/aks_test.go index 9b13ffb1fac02..4df8cfc7728c8 100644 --- a/test/new-e2e/examples/aks_test.go +++ b/test/new-e2e/examples/aks_test.go @@ -15,8 +15,6 @@ import ( corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/DataDog/test-infra-definitions/scenarios/azure/fakeintake" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" azurekubernetes "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/azure/kubernetes" @@ -27,7 +25,7 @@ type aksSuite struct { } func TestAKSSuite(t *testing.T) { - e2e.Run(t, &aksSuite{}, e2e.WithProvisioner(azurekubernetes.AKSProvisioner(azurekubernetes.WithFakeIntakeOptions(fakeintake.WithDDDevForwarding())))) + e2e.Run(t, &aksSuite{}, e2e.WithProvisioner(azurekubernetes.AKSProvisioner())) } func (v *aksSuite) TestAKS() { diff --git a/test/new-e2e/go.mod b/test/new-e2e/go.mod index 3eaa3216f47de..dd5a7b64ddc56 100644 --- a/test/new-e2e/go.mod +++ b/test/new-e2e/go.mod @@ -32,7 +32,7 @@ require ( // `TEST_INFRA_DEFINITIONS_BUILDIMAGES` matches the commit sha in the module version // Example: github.com/DataDog/test-infra-definitions v0.0.0-YYYYMMDDHHmmSS-0123456789AB // => TEST_INFRA_DEFINITIONS_BUILDIMAGES: 0123456789AB - github.com/DataDog/test-infra-definitions v0.0.0-20240828165228-c0ecdf254c23 + github.com/DataDog/test-infra-definitions v0.0.0-20240904143845-c9ee795ec752 github.com/aws/aws-sdk-go-v2 v1.30.4 github.com/aws/aws-sdk-go-v2/config v1.27.19 github.com/aws/aws-sdk-go-v2/service/ec2 v1.164.2 @@ -50,7 +50,7 @@ require ( github.com/pulumi/pulumi-awsx/sdk/v2 v2.14.0 github.com/pulumi/pulumi-eks/sdk/v2 v2.7.8 github.com/pulumi/pulumi-kubernetes/sdk/v4 v4.17.1 - github.com/pulumi/pulumi/sdk/v3 v3.129.0 + github.com/pulumi/pulumi/sdk/v3 v3.130.0 github.com/samber/lo v1.47.0 github.com/sethvargo/go-retry v0.2.4 github.com/stretchr/testify v1.9.0 @@ -268,7 +268,7 @@ require github.com/hairyhenderson/go-codeowners v0.5.0 require ( github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.57.0 // indirect github.com/pulumi/pulumi-azure-native-sdk/compute/v2 v2.56.0 // indirect - github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.51.0 // indirect + github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.59.0 // indirect github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.58.0 // indirect - github.com/pulumi/pulumi-azure-native-sdk/v2 v2.58.0 // indirect + github.com/pulumi/pulumi-azure-native-sdk/v2 v2.59.0 // indirect ) diff --git a/test/new-e2e/go.sum b/test/new-e2e/go.sum index 2927dfa3597e9..b8993893b924a 100644 --- a/test/new-e2e/go.sum +++ b/test/new-e2e/go.sum @@ -14,8 +14,8 @@ github.com/DataDog/datadog-api-client-go/v2 v2.27.0 h1:AGZj41frjnjMufQHQbJH2fzmi github.com/DataDog/datadog-api-client-go/v2 v2.27.0/go.mod h1:QKOu6vscsh87fMY1lHfLEmNSunyXImj8BUaUWJXOehc= github.com/DataDog/mmh3 v0.0.0-20200805151601-30884ca2197a h1:m9REhmyaWD5YJ0P53ygRHxKKo+KM+nw+zz0hEdKztMo= github.com/DataDog/mmh3 v0.0.0-20200805151601-30884ca2197a/go.mod h1:SvsjzyJlSg0rKsqYgdcFxeEVflx3ZNAyFfkUHP0TxXg= -github.com/DataDog/test-infra-definitions v0.0.0-20240828165228-c0ecdf254c23 h1:43tVATnDYuG7xuAJAqvnre/3Hpbw+ZW6qoT3eTU1xbY= -github.com/DataDog/test-infra-definitions v0.0.0-20240828165228-c0ecdf254c23/go.mod h1:i/i/Wn96wSbD3nKDr9pMe/R8+4Q1qps51tOg8+1q3NI= +github.com/DataDog/test-infra-definitions v0.0.0-20240904143845-c9ee795ec752 h1:J+KnqV0jYvXvcN1LzRiRxYJo/nHGYsnBQ22VXIdAJD8= +github.com/DataDog/test-infra-definitions v0.0.0-20240904143845-c9ee795ec752/go.mod h1:QEQPOdzBcxZly/1KtAPFgF1R7Tp98FajB06gZ75E+/U= github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f h1:5Vuo4niPKFkfwW55jV4vY0ih3VQ9RaQqeqY67fvRn8A= @@ -405,12 +405,12 @@ github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.57.0 h1:0QxN2F/yiy github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.57.0/go.mod h1:pv7oEJtA6Tn8dnE8/xya/yCQd6GU0Br9c9nHRkW9LiQ= github.com/pulumi/pulumi-azure-native-sdk/compute/v2 v2.56.0 h1:MFOd6X9FPlixzriy14fBHv7pFCCh/mu1pwHtSSjqfJ4= github.com/pulumi/pulumi-azure-native-sdk/compute/v2 v2.56.0/go.mod h1:453Ff5wNscroYfq+zxME7Nbt7HdZv+dh0zLZwLyGBws= -github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.51.0 h1:TZoU7xP9qI3M6eunVmyPtfqxgf+egUzc7GXVIeJdp5A= -github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.51.0/go.mod h1:J/ZpHh7KUmcMiki7fnrCYlA4YIdr2pG7yTWdrwlnedY= +github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.59.0 h1:ijcCyi+SPlJn3aIEb4p23FTk6fxjPLtVMhfkRaKp85A= +github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.59.0/go.mod h1:yQXpYXNeGVBcygd5Be/fzf+1Jcg4kDLAMZY6UDtIZvQ= github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.58.0 h1:joRRPeYxXSaCGF7we0NNAMsOy7HJFd7O4cWAjmKveRI= github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.58.0/go.mod h1:XFiuqPmtOASRrKU1q29sgzAuq9OcZ0bDzdBw9TSUyyo= -github.com/pulumi/pulumi-azure-native-sdk/v2 v2.58.0 h1:pPJMmpz7eIlnmqDPBJkJ1U27QuB/E7yfZgV6m8s1kmI= -github.com/pulumi/pulumi-azure-native-sdk/v2 v2.58.0/go.mod h1:M8QiZzL5yFrhRodptgpp8gZ9gLWEpqW7GkbWhMkvEzM= +github.com/pulumi/pulumi-azure-native-sdk/v2 v2.59.0 h1:1S0sh1N+9MV2eUOidjS1LA01eZ6x6j+I7G8CE0RBb8E= +github.com/pulumi/pulumi-azure-native-sdk/v2 v2.59.0/go.mod h1:yVyaGAI0YLEcqfP/8Bmk9VgtRxE5JsBayO9i5QNneWg= github.com/pulumi/pulumi-command/sdk v1.0.1 h1:ZuBSFT57nxg/fs8yBymUhKLkjJ6qmyN3gNvlY/idiN0= github.com/pulumi/pulumi-command/sdk v1.0.1/go.mod h1:C7sfdFbUIoXKoIASfXUbP/U9xnwPfxvz8dBpFodohlA= github.com/pulumi/pulumi-docker/sdk/v4 v4.5.5 h1:7OjAfgLz5PAy95ynbgPAlWls5WBe4I/QW/61TdPWRlQ= @@ -425,8 +425,8 @@ github.com/pulumi/pulumi-random/sdk/v4 v4.16.3 h1:nlN42MRSIuDh5Pc5nLq4b0lwZaX2ZU github.com/pulumi/pulumi-random/sdk/v4 v4.16.3/go.mod h1:yRfWJSLEAVZvkwgXajr3S9OmFkAZTxfO44Ef2HfixXQ= github.com/pulumi/pulumi-tls/sdk/v4 v4.11.1 h1:tXemWrzeVTqG8zq6hBdv1TdPFXjgZ+dob63a/6GlF1o= github.com/pulumi/pulumi-tls/sdk/v4 v4.11.1/go.mod h1:hODo3iEmmXDFOXqPK+V+vwI0a3Ww7BLjs5Tgamp86Ng= -github.com/pulumi/pulumi/sdk/v3 v3.129.0 h1:uZpTTwWTx7Mk8UT9FgatzxzArim47vZ6hzNCKvgvX6A= -github.com/pulumi/pulumi/sdk/v3 v3.129.0/go.mod h1:p1U24en3zt51agx+WlNboSOV8eLlPWYAkxMzVEXKbnY= +github.com/pulumi/pulumi/sdk/v3 v3.130.0 h1:gGJNd+akPqhZ+vrsZmAjSNJn6kGJkitjjkwrmIQMmn8= +github.com/pulumi/pulumi/sdk/v3 v3.130.0/go.mod h1:p1U24en3zt51agx+WlNboSOV8eLlPWYAkxMzVEXKbnY= github.com/pulumiverse/pulumi-time/sdk v0.0.17 h1:JNYVLglXeMAjyD3upIwKZ9o7MnNo7kc3FVsgxs7bc+A= github.com/pulumiverse/pulumi-time/sdk v0.0.17/go.mod h1:NUa1zA74DF002WrM6iF111A6UjX9knPpXufVRvBwNyg= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= From db892907db21e7c2fa96ac696f6724f31312b5e3 Mon Sep 17 00:00:00 2001 From: Paul Cacheux Date: Thu, 5 Sep 2024 15:12:57 +0200 Subject: [PATCH 027/128] [CWS] move `containers_running` telemetry to the system-probe (#29060) --- cmd/system-probe/modules/eventmonitor.go | 2 +- pkg/security/agent/agent.go | 6 -- pkg/security/agent/agent_nix.go | 7 --- pkg/security/agent/agent_windows.go | 1 - pkg/security/agent/status_provider_test.go | 1 - pkg/security/agent/telemetry_others.go | 4 -- pkg/security/module/cws.go | 22 ++++++-- pkg/security/module/opts.go | 4 +- .../containers_running_telemetry_linux.go} | 56 ++++++------------- .../containers_running_telemetry_others.go | 27 +++++++++ pkg/security/tests/module_tester_linux.go | 2 +- pkg/security/tests/module_tester_windows.go | 2 +- 12 files changed, 67 insertions(+), 67 deletions(-) rename pkg/security/{agent/telemetry_linux.go => telemetry/containers_running_telemetry_linux.go} (52%) create mode 100644 pkg/security/telemetry/containers_running_telemetry_others.go diff --git a/cmd/system-probe/modules/eventmonitor.go b/cmd/system-probe/modules/eventmonitor.go index cec624ea3f746..d94555cddd939 100644 --- a/cmd/system-probe/modules/eventmonitor.go +++ b/cmd/system-probe/modules/eventmonitor.go @@ -48,7 +48,7 @@ func createEventMonitorModule(_ *sysconfigtypes.Config, deps module.FactoryDepen } if secconfig.RuntimeSecurity.IsRuntimeEnabled() { - cws, err := secmodule.NewCWSConsumer(evm, secconfig.RuntimeSecurity, secmoduleOpts) + cws, err := secmodule.NewCWSConsumer(evm, secconfig.RuntimeSecurity, deps.WMeta, secmoduleOpts) if err != nil { return nil, err } diff --git a/pkg/security/agent/agent.go b/pkg/security/agent/agent.go index 3d2fd1832d0b3..60d71694f9188 100644 --- a/pkg/security/agent/agent.go +++ b/pkg/security/agent/agent.go @@ -37,7 +37,6 @@ type RuntimeSecurityAgent struct { connected *atomic.Bool eventReceived *atomic.Uint64 activityDumpReceived *atomic.Uint64 - telemetry *telemetry profContainersTelemetry *profContainersTelemetry endpoints *config.Endpoints cancel context.CancelFunc @@ -69,11 +68,6 @@ func (rsa *RuntimeSecurityAgent) Start(reporter common.RawReporter, endpoints *c go rsa.startActivityDumpStorageTelemetry(ctx) } - if rsa.telemetry != nil { - // Send Runtime Security Agent telemetry - go rsa.telemetry.run(ctx) - } - if rsa.profContainersTelemetry != nil { // Send Profiled Containers telemetry go rsa.profContainersTelemetry.run(ctx) diff --git a/pkg/security/agent/agent_nix.go b/pkg/security/agent/agent_nix.go index 5a051c9a125a1..bc444d6f049fe 100644 --- a/pkg/security/agent/agent_nix.go +++ b/pkg/security/agent/agent_nix.go @@ -24,12 +24,6 @@ func NewRuntimeSecurityAgent(statsdClient statsd.ClientInterface, hostname strin return nil, err } - // on windows do no telemetry - telemetry, err := newTelemetry(statsdClient, wmeta) - if err != nil { - return nil, errors.New("failed to initialize the telemetry reporter") - } - profContainersTelemetry, err := newProfContainersTelemetry(statsdClient, wmeta, opts.LogProfiledWorkloads) if err != nil { return nil, errors.New("failed to initialize the profiled containers telemetry reporter") @@ -44,7 +38,6 @@ func NewRuntimeSecurityAgent(statsdClient statsd.ClientInterface, hostname strin return &RuntimeSecurityAgent{ client: client, hostname: hostname, - telemetry: telemetry, profContainersTelemetry: profContainersTelemetry, storage: storage, running: atomic.NewBool(false), diff --git a/pkg/security/agent/agent_windows.go b/pkg/security/agent/agent_windows.go index 54bd6862155e2..3b1cad54f3e10 100644 --- a/pkg/security/agent/agent_windows.go +++ b/pkg/security/agent/agent_windows.go @@ -24,7 +24,6 @@ func NewRuntimeSecurityAgent(_ statsd.ClientInterface, hostname string, _ RSAOpt return &RuntimeSecurityAgent{ client: client, hostname: hostname, - telemetry: nil, storage: nil, running: atomic.NewBool(false), connected: atomic.NewBool(false), diff --git a/pkg/security/agent/status_provider_test.go b/pkg/security/agent/status_provider_test.go index 4c259193e5f67..af26830e9f3dd 100644 --- a/pkg/security/agent/status_provider_test.go +++ b/pkg/security/agent/status_provider_test.go @@ -18,7 +18,6 @@ func TestStatus(t *testing.T) { agent: &RuntimeSecurityAgent{ client: nil, hostname: "test", - telemetry: nil, storage: nil, running: atomic.NewBool(false), connected: atomic.NewBool(false), diff --git a/pkg/security/agent/telemetry_others.go b/pkg/security/agent/telemetry_others.go index d36f3e1aba54b..10647951c734b 100644 --- a/pkg/security/agent/telemetry_others.go +++ b/pkg/security/agent/telemetry_others.go @@ -10,10 +10,6 @@ package agent import "context" -type telemetry struct{} - -func (t *telemetry) run(_ context.Context) {} - type profContainersTelemetry struct{} func (t *profContainersTelemetry) registerProfiledContainer(_, _ string) {} diff --git a/pkg/security/module/cws.go b/pkg/security/module/cws.go index a5091b44ace95..4f2c298989e75 100644 --- a/pkg/security/module/cws.go +++ b/pkg/security/module/cws.go @@ -15,6 +15,7 @@ import ( "github.com/DataDog/datadog-go/v5/statsd" + workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/eventmonitor" "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/events" @@ -28,6 +29,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/secl/rules" "github.com/DataDog/datadog-agent/pkg/security/seclog" "github.com/DataDog/datadog-agent/pkg/security/serializers" + "github.com/DataDog/datadog-agent/pkg/security/telemetry" ) // CWSConsumer represents the system-probe module for the runtime security agent @@ -49,17 +51,19 @@ type CWSConsumer struct { ruleEngine *rulesmodule.RuleEngine selfTester *selftests.SelfTester reloader ReloaderInterface + crtelemetry *telemetry.ContainersRunningTelemetry } // NewCWSConsumer initializes the module with options -func NewCWSConsumer(evm *eventmonitor.EventMonitor, cfg *config.RuntimeSecurityConfig, opts Opts) (*CWSConsumer, error) { - ctx, cancelFnc := context.WithCancel(context.Background()) +func NewCWSConsumer(evm *eventmonitor.EventMonitor, cfg *config.RuntimeSecurityConfig, wmeta workloadmeta.Component, opts Opts) (*CWSConsumer, error) { + crtelemetry, err := telemetry.NewContainersRunningTelemetry(cfg, evm.StatsdClient, wmeta) + if err != nil { + return nil, err + } - var ( - selfTester *selftests.SelfTester - err error - ) + ctx, cancelFnc := context.WithCancel(context.Background()) + var selfTester *selftests.SelfTester if cfg.SelfTestEnabled { selfTester, err = selftests.NewSelfTester(cfg, evm.Probe) if err != nil { @@ -82,6 +86,7 @@ func NewCWSConsumer(evm *eventmonitor.EventMonitor, cfg *config.RuntimeSecurityC grpcServer: NewGRPCServer(family, address), selfTester: selfTester, reloader: NewReloader(), + crtelemetry: crtelemetry, } // set sender @@ -151,6 +156,11 @@ func (c *CWSConsumer) Start() error { c.wg.Add(1) go c.statsSender() + if c.crtelemetry != nil { + // Send containers running telemetry + go c.crtelemetry.Run(c.ctx) + } + seclog.Infof("runtime security started") // we can now wait for self test events diff --git a/pkg/security/module/opts.go b/pkg/security/module/opts.go index fd642eb438652..984f0c3872142 100644 --- a/pkg/security/module/opts.go +++ b/pkg/security/module/opts.go @@ -6,7 +6,9 @@ // Package module holds module related files package module -import "github.com/DataDog/datadog-agent/pkg/security/events" +import ( + "github.com/DataDog/datadog-agent/pkg/security/events" +) // Opts define module options type Opts struct { diff --git a/pkg/security/agent/telemetry_linux.go b/pkg/security/telemetry/containers_running_telemetry_linux.go similarity index 52% rename from pkg/security/agent/telemetry_linux.go rename to pkg/security/telemetry/containers_running_telemetry_linux.go index 71c5c41b864bd..3a325b7efa469 100644 --- a/pkg/security/agent/telemetry_linux.go +++ b/pkg/security/telemetry/containers_running_telemetry_linux.go @@ -3,48 +3,42 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -// Package agent holds agent related files -package agent +package telemetry import ( "context" - "errors" "os" "time" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" + "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/metrics" - "github.com/DataDog/datadog-agent/pkg/security/proto/api" - sectelemetry "github.com/DataDog/datadog-agent/pkg/security/telemetry" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-go/v5/statsd" ) -// telemetry reports environment information (e.g containers running) when the runtime security component is running -type telemetry struct { - containers *sectelemetry.ContainersTelemetry - runtimeSecurityClient *RuntimeSecurityClient +// ContainersRunningTelemetry reports environment information (e.g containers running) when the runtime security component is running +type ContainersRunningTelemetry struct { + cfg *config.RuntimeSecurityConfig + containers *ContainersTelemetry } -func newTelemetry(statsdClient statsd.ClientInterface, wmeta workloadmeta.Component) (*telemetry, error) { - runtimeSecurityClient, err := NewRuntimeSecurityClient() +// NewContainersRunningTelemetry creates a new ContainersRunningTelemetry instance +func NewContainersRunningTelemetry(cfg *config.RuntimeSecurityConfig, statsdClient statsd.ClientInterface, wmeta workloadmeta.Component) (*ContainersRunningTelemetry, error) { + telemetrySender := NewSimpleTelemetrySenderFromStatsd(statsdClient) + containersTelemetry, err := NewContainersTelemetry(telemetrySender, wmeta) if err != nil { return nil, err } - telemetrySender := sectelemetry.NewSimpleTelemetrySenderFromStatsd(statsdClient) - containersTelemetry, err := sectelemetry.NewContainersTelemetry(telemetrySender, wmeta) - if err != nil { - return nil, err - } - - return &telemetry{ - containers: containersTelemetry, - runtimeSecurityClient: runtimeSecurityClient, + return &ContainersRunningTelemetry{ + cfg: cfg, + containers: containersTelemetry, }, nil } -func (t *telemetry) run(ctx context.Context) { +// Run starts the telemetry collection +func (t *ContainersRunningTelemetry) Run(ctx context.Context) { log.Info("started collecting Runtime Security Agent telemetry") defer log.Info("stopping Runtime Security Agent telemetry") @@ -63,33 +57,19 @@ func (t *telemetry) run(ctx context.Context) { } } -func (t *telemetry) fetchConfig() (*api.SecurityConfigMessage, error) { - cfg, err := t.runtimeSecurityClient.GetConfig() - if err != nil { - return cfg, errors.New("couldn't fetch config from runtime security module") - } - return cfg, nil -} - -func (t *telemetry) reportContainers() error { - // retrieve the runtime security module config - cfg, err := t.fetchConfig() - if err != nil { - return err - } - +func (t *ContainersRunningTelemetry) reportContainers() error { var fargate bool if os.Getenv("ECS_FARGATE") == "true" || os.Getenv("DD_ECS_FARGATE") == "true" || os.Getenv("DD_EKS_FARGATE") == "true" { fargate = true } var metricName string - if cfg.RuntimeEnabled { + if t.cfg.RuntimeEnabled { metricName = metrics.MetricSecurityAgentRuntimeContainersRunning if fargate { metricName = metrics.MetricSecurityAgentFargateRuntimeContainersRunning } - } else if cfg.FIMEnabled { + } else if t.cfg.FIMEnabled { metricName = metrics.MetricSecurityAgentFIMContainersRunning if fargate { metricName = metrics.MetricSecurityAgentFargateFIMContainersRunning diff --git a/pkg/security/telemetry/containers_running_telemetry_others.go b/pkg/security/telemetry/containers_running_telemetry_others.go new file mode 100644 index 0000000000000..3bb9658228d9a --- /dev/null +++ b/pkg/security/telemetry/containers_running_telemetry_others.go @@ -0,0 +1,27 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build !linux + +package telemetry + +import ( + "context" + + workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" + "github.com/DataDog/datadog-agent/pkg/security/config" + "github.com/DataDog/datadog-go/v5/statsd" +) + +// ContainersRunningTelemetry reports environment information (e.g containers running) when the runtime security component is running +type ContainersRunningTelemetry struct{} + +// NewContainersRunningTelemetry creates a new ContainersRunningTelemetry instance (not supported on non-linux platforms) +func NewContainersRunningTelemetry(_ *config.RuntimeSecurityConfig, _ statsd.ClientInterface, _ workloadmeta.Component) (*ContainersRunningTelemetry, error) { + return nil, nil +} + +// Run starts the telemetry collection +func (t *ContainersRunningTelemetry) Run(_ context.Context) {} diff --git a/pkg/security/tests/module_tester_linux.go b/pkg/security/tests/module_tester_linux.go index 07a3376d9398f..da0946b338ce6 100644 --- a/pkg/security/tests/module_tester_linux.go +++ b/pkg/security/tests/module_tester_linux.go @@ -822,7 +822,7 @@ func newTestModuleWithOnDemandProbes(t testing.TB, onDemandHooks []rules.OnDeman if !opts.staticOpts.disableRuntimeSecurity { msgSender := newFakeMsgSender(testMod) - cws, err := module.NewCWSConsumer(testMod.eventMonitor, secconfig.RuntimeSecurity, module.Opts{EventSender: testMod, MsgSender: msgSender}) + cws, err := module.NewCWSConsumer(testMod.eventMonitor, secconfig.RuntimeSecurity, fxDeps.WMeta, module.Opts{EventSender: testMod, MsgSender: msgSender}) if err != nil { return nil, fmt.Errorf("failed to create module: %w", err) } diff --git a/pkg/security/tests/module_tester_windows.go b/pkg/security/tests/module_tester_windows.go index b2c6d20952a79..972437a8ca435 100644 --- a/pkg/security/tests/module_tester_windows.go +++ b/pkg/security/tests/module_tester_windows.go @@ -292,7 +292,7 @@ func newTestModule(t testing.TB, macroDefs []*rules.MacroDefinition, ruleDefs [] var ruleSetloadedErr *multierror.Error if !opts.staticOpts.disableRuntimeSecurity { - cws, err := module.NewCWSConsumer(testMod.eventMonitor, secconfig.RuntimeSecurity, module.Opts{EventSender: testMod}) + cws, err := module.NewCWSConsumer(testMod.eventMonitor, secconfig.RuntimeSecurity, fxDeps.WMeta, module.Opts{EventSender: testMod}) if err != nil { return nil, fmt.Errorf("failed to create module: %w", err) } From c4e3a1565923302d6821e41893e9db17a9877d5a Mon Sep 17 00:00:00 2001 From: Jennifer Chen <32009013+jennchenn@users.noreply.github.com> Date: Thu, 5 Sep 2024 09:49:09 -0400 Subject: [PATCH 028/128] [clusteragent/autoscaling] Check that autoscaling target is not cluster agent (#28723) --- .../autoscaling/workload/controller.go | 64 +++++++-- .../autoscaling/workload/controller_test.go | 121 ++++++++++++++++++ pkg/util/kubernetes/helpers.go | 48 +++++-- pkg/util/kubernetes/helpers_test.go | 56 ++++++++ 4 files changed, 262 insertions(+), 27 deletions(-) diff --git a/pkg/clusteragent/autoscaling/workload/controller.go b/pkg/clusteragent/autoscaling/workload/controller.go index fb874e4bd3483..27ae6f77070aa 100644 --- a/pkg/clusteragent/autoscaling/workload/controller.go +++ b/pkg/clusteragent/autoscaling/workload/controller.go @@ -26,7 +26,10 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/clusteragent/autoscaling" "github.com/DataDog/datadog-agent/pkg/clusteragent/autoscaling/workload/model" + "github.com/DataDog/datadog-agent/pkg/util/kubernetes" "github.com/DataDog/datadog-agent/pkg/util/log" + + "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" ) const ( @@ -272,22 +275,16 @@ func (c *Controller) syncPodAutoscaler(ctx context.Context, key, ns, name string // Reaching this point, we had an error in processing, clearing up global error podAutoscalerInternal.SetError(nil) - // Now that everything is synced, we can perform the actual processing - result, err := c.handleScaling(ctx, podAutoscaler, &podAutoscalerInternal) - - // Update status based on latest state - statusErr := c.updatePodAutoscalerStatus(ctx, podAutoscalerInternal, podAutoscaler) - if statusErr != nil { - log.Errorf("Failed to update status for PodAutoscaler: %s/%s, err: %v", ns, name, statusErr) - - // We want to return the status error if none to count in the requeue retries. - if err == nil { - err = statusErr - } + // Validate autoscaler requirements + validationErr := c.validateAutoscaler(podAutoscaler) + if validationErr != nil { + podAutoscalerInternal.SetError(validationErr) + return autoscaling.NoRequeue, c.updateAutoscalerStatusAndUnlock(ctx, key, ns, name, validationErr, podAutoscalerInternal, podAutoscaler) } - c.store.UnlockSet(key, podAutoscalerInternal, c.ID) - return result, err + // Now that everything is synced, we can perform the actual processing + result, scalingErr := c.handleScaling(ctx, podAutoscaler, &podAutoscalerInternal) + return result, c.updateAutoscalerStatusAndUnlock(ctx, key, ns, name, scalingErr, podAutoscalerInternal, podAutoscaler) } func (c *Controller) handleScaling(ctx context.Context, podAutoscaler *datadoghq.DatadogPodAutoscaler, podAutoscalerInternal *model.PodAutoscalerInternal) (autoscaling.ProcessResult, error) { @@ -389,3 +386,42 @@ func (c *Controller) deletePodAutoscaler(ns, name string) error { } return nil } + +func (c *Controller) validateAutoscaler(podAutoscaler *datadoghq.DatadogPodAutoscaler) error { + // Check that targetRef is not set to the cluster agent + clusterAgentPodName, err := common.GetSelfPodName() + if err != nil { + return fmt.Errorf("Unable to get the cluster agent pod name: %w", err) + } + + var resourceName string + switch owner := podAutoscaler.Spec.TargetRef.Kind; owner { + case "Deployment": + resourceName = kubernetes.ParseDeploymentForPodName(clusterAgentPodName) + case "ReplicaSet": + resourceName = kubernetes.ParseReplicaSetForPodName(clusterAgentPodName) + } + + clusterAgentNs := common.GetMyNamespace() + + if podAutoscaler.Namespace == clusterAgentNs && podAutoscaler.Spec.TargetRef.Name == resourceName { + return fmt.Errorf("Autoscaling target cannot be set to the cluster agent") + } + return nil +} + +func (c *Controller) updateAutoscalerStatusAndUnlock(ctx context.Context, key, ns, name string, err error, podAutoscalerInternal model.PodAutoscalerInternal, podAutoscaler *datadoghq.DatadogPodAutoscaler) error { + // Update status based on latest state + statusErr := c.updatePodAutoscalerStatus(ctx, podAutoscalerInternal, podAutoscaler) + if statusErr != nil { + log.Errorf("Failed to update status for PodAutoscaler: %s/%s, err: %v", ns, name, statusErr) + + // We want to return the status error if none to count in the requeue retries. + if err == nil { + err = statusErr + } + } + + c.store.UnlockSet(key, podAutoscalerInternal, c.ID) + return err +} diff --git a/pkg/clusteragent/autoscaling/workload/controller_test.go b/pkg/clusteragent/autoscaling/workload/controller_test.go index 3e9b8a2eace6e..be6233bf73825 100644 --- a/pkg/clusteragent/autoscaling/workload/controller_test.go +++ b/pkg/clusteragent/autoscaling/workload/controller_test.go @@ -8,6 +8,8 @@ package workload import ( + "errors" + "fmt" "testing" "time" @@ -26,6 +28,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/clusteragent/autoscaling" "github.com/DataDog/datadog-agent/pkg/clusteragent/autoscaling/workload/model" + "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" ) type fixture struct { @@ -228,3 +231,121 @@ func TestLeaderCreateDeleteRemote(t *testing.T) { f.RunControllerSync(true, "default/dpa-0") assert.Len(t, f.store.GetAll(), 0) } + +func TestDatadogPodAutoscalerTargetingClusterAgentErrors(t *testing.T) { + tests := []struct { + name string + targetRef autoscalingv2.CrossVersionObjectReference + }{ + { + "target set to cluster agent deployment", + autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "datadog-agent-cluster-agent", + APIVersion: "apps/v1", + }, + }, + { + "target set to cluster agent replicaset", + autoscalingv2.CrossVersionObjectReference{ + Kind: "ReplicaSet", + Name: "datadog-agent-cluster-agent-7dbf798595", + APIVersion: "apps/v1", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + testTime := time.Now() + f := newFixture(t, testTime) + + t.Setenv("DD_POD_NAME", "datadog-agent-cluster-agent-7dbf798595-tp9lg") + currentNs := common.GetMyNamespace() + id := fmt.Sprintf("%s/dpa-dca", currentNs) + + dpaSpec := datadoghq.DatadogPodAutoscalerSpec{ + TargetRef: tt.targetRef, + // Local owner means .Spec source of truth is K8S + Owner: datadoghq.DatadogPodAutoscalerLocalOwner, + } + + dpa, dpaTyped := newFakePodAutoscaler(currentNs, "dpa-dca", 1, dpaSpec, datadoghq.DatadogPodAutoscalerStatus{}) + f.InformerObjects = append(f.InformerObjects, dpa) + + expectedDPAError := &datadoghq.DatadogPodAutoscaler{ + TypeMeta: metav1.TypeMeta{ + Kind: "DatadogPodAutoscaler", + APIVersion: "datadoghq.com/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "dpa-dca", + Namespace: currentNs, + Generation: 1, + UID: dpa.GetUID(), + }, + Spec: datadoghq.DatadogPodAutoscalerSpec{ + TargetRef: autoscalingv2.CrossVersionObjectReference{ + Kind: "", + Name: "", + APIVersion: "", + }, + Owner: "", + }, + Status: datadoghq.DatadogPodAutoscalerStatus{ + Conditions: []datadoghq.DatadogPodAutoscalerCondition{ + { + Type: datadoghq.DatadogPodAutoscalerErrorCondition, + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.NewTime(testTime), + Reason: "Autoscaling target cannot be set to the cluster agent", + }, + { + Type: datadoghq.DatadogPodAutoscalerActiveCondition, + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.NewTime(testTime), + }, + { + Type: datadoghq.DatadogPodAutoscalerHorizontalAbleToRecommendCondition, + Status: corev1.ConditionUnknown, + LastTransitionTime: metav1.NewTime(testTime), + }, + { + Type: datadoghq.DatadogPodAutoscalerVerticalAbleToRecommendCondition, + Status: corev1.ConditionUnknown, + LastTransitionTime: metav1.NewTime(testTime), + }, + { + Type: datadoghq.DatadogPodAutoscalerHorizontalScalingLimitedCondition, + Status: corev1.ConditionFalse, + LastTransitionTime: metav1.NewTime(testTime), + }, + { + Type: datadoghq.DatadogPodAutoscalerHorizontalAbleToScaleCondition, + Status: corev1.ConditionUnknown, + LastTransitionTime: metav1.NewTime(testTime), + }, + { + Type: datadoghq.DatadogPodAutoscalerVerticalAbleToApply, + Status: corev1.ConditionUnknown, + LastTransitionTime: metav1.NewTime(testTime), + }, + }, + }, + } + expectedUnstructuredError, err := autoscaling.ToUnstructured(expectedDPAError) + assert.NoError(t, err) + f.RunControllerSync(true, id) + + f.Objects = append(f.Objects, dpaTyped) + f.Actions = nil + + f.ExpectUpdateStatusAction(expectedUnstructuredError) + f.RunControllerSync(true, id) + assert.Len(t, f.store.GetAll(), 1) + pai, found := f.store.Get(id) + assert.Truef(t, found, "Expected to find DatadogPodAutoscaler in store") + assert.Equal(t, errors.New("Autoscaling target cannot be set to the cluster agent"), pai.Error()) + }) + } +} diff --git a/pkg/util/kubernetes/helpers.go b/pkg/util/kubernetes/helpers.go index 66a6ef5fbce07..142e62e562fea 100644 --- a/pkg/util/kubernetes/helpers.go +++ b/pkg/util/kubernetes/helpers.go @@ -20,23 +20,23 @@ const Digits = "1234567890" // ParseDeploymentForReplicaSet gets the deployment name from a replicaset, // or returns an empty string if no parent deployment is found. func ParseDeploymentForReplicaSet(name string) string { - lastDash := strings.LastIndexByte(name, '-') - if lastDash == -1 { - // No dash - return "" - } - suffix := name[lastDash+1:] - if len(suffix) < 3 { - // Suffix is variable length but we cutoff at 3+ characters - return "" - } + return removeKubernetesNameSuffix(name) +} - if !stringInRuneset(suffix, Digits) && !stringInRuneset(suffix, KubeAllowedEncodeStringAlphaNums) { - // Invalid suffix +// ParseDeploymentForPodName gets the deployment name from a pod name, +// or returns an empty string if no parent deployment is found. +func ParseDeploymentForPodName(name string) string { + replicaSet := removeKubernetesNameSuffix(name) + if replicaSet == "" { return "" } + return ParseDeploymentForReplicaSet(replicaSet) +} - return name[:lastDash] +// ParseReplicaSetForPodName gets the replica set name from a pod name, +// or returns an empty string if no parent replica set is found. +func ParseReplicaSetForPodName(name string) string { + return removeKubernetesNameSuffix(name) } // ParseCronJobForJob gets the cronjob name from a job, @@ -79,3 +79,25 @@ func stringInRuneset(name, subset string) bool { } return true } + +// removeKubernetesNameSuffix removes the suffix from a kubernetes name +// or returns an empty string if either the suffix or name are invalid. +func removeKubernetesNameSuffix(name string) string { + lastDash := strings.LastIndexByte(name, '-') + if lastDash == -1 { + // No dash + return "" + } + suffix := name[lastDash+1:] + if len(suffix) < 3 { + // Suffix is variable length but we cutoff at 3+ characters + return "" + } + + if !stringInRuneset(suffix, Digits) && !stringInRuneset(suffix, KubeAllowedEncodeStringAlphaNums) { + // Invalid suffix + return "" + } + + return name[:lastDash] +} diff --git a/pkg/util/kubernetes/helpers_test.go b/pkg/util/kubernetes/helpers_test.go index b3bc300c953fd..899ba603a2c0e 100644 --- a/pkg/util/kubernetes/helpers_test.go +++ b/pkg/util/kubernetes/helpers_test.go @@ -40,6 +40,62 @@ func TestParseDeploymentForReplicaSet(t *testing.T) { } } +func TestParseDeploymentForPodName(t *testing.T) { + for in, out := range map[string]string{ + // Nominal 1.6 cases + "frontend-2891696001-51234": "frontend", + "front-end-2891696001-72346": "front-end", + + // Non-deployment 1.6 cases + "frontend2891696001-31-": "", + "-frontend2891696001-21": "", + "manually-created": "", + + // 1.8+ nominal cases + "frontend-56c89cfff7-tsdww": "frontend", + "frontend-56c-p2q": "frontend", + "frontend-56c89cff-qhxl8": "frontend", + "frontend-56c89cfff7c2-g9lmb": "frontend", + "front-end-768dd754b7-ptdcc": "front-end", + + // 1.8+ non-deployment cases + "frontend-56c89cff-bx": "", // too short + "frontend-56a89cfff7-a": "", // no vowels allowed + } { + t.Run(fmt.Sprintf("case: %s", in), func(t *testing.T) { + assert.Equal(t, out, ParseDeploymentForPodName(in)) + }) + } +} + +func TestParseReplicaSetForPodName(t *testing.T) { + for in, out := range map[string]string{ + // Nominal 1.6 cases + "frontend-2891696001-51234": "frontend-2891696001", + "front-end-2891696001-72346": "front-end-2891696001", + + // Non-replica-set 1.6 cases + "frontend2891696001-31-": "", + "-frontend2891696001-21": "", + "manually-created": "", + + // 1.8+ nominal cases + "frontend-56c89cfff7-tsdww": "frontend-56c89cfff7", + "frontend-56c-p2q": "frontend-56c", + "frontend-56c89cff-qhxl8": "frontend-56c89cff", + "frontend-56c89cfff7c2-g9lmb": "frontend-56c89cfff7c2", + "front-end-768dd754b7-ptdcc": "front-end-768dd754b7", + + // 1.8+ non-replica-set cases + "frontend-56c89cff-bx": "", // too short + "frontend-56a89cfff7-a": "", // no vowels allowed + } { + t.Run(fmt.Sprintf("case: %s", in), func(t *testing.T) { + assert.Equal(t, out, ParseReplicaSetForPodName(in)) + }) + } +} + func TestParseCronJobForJob(t *testing.T) { for in, out := range map[string]struct { string From 7786b7001a745788ff09a185fb17a43d9b3b4123 Mon Sep 17 00:00:00 2001 From: Zhengda Lu Date: Thu, 5 Sep 2024 09:53:30 -0400 Subject: [PATCH 029/128] [dbm] bump go-sqllexer to 0.0.14 (#29052) --- .../exporter/datadogexporter/go.mod | 2 +- .../exporter/datadogexporter/go.sum | 4 +-- .../otlp/components/statsprocessor/go.mod | 2 +- .../otlp/components/statsprocessor/go.sum | 4 +-- go.mod | 2 +- go.sum | 4 +-- pkg/obfuscate/go.mod | 2 +- pkg/obfuscate/go.sum | 4 +-- pkg/obfuscate/sql_test.go | 28 +++++++++++++++++++ pkg/trace/go.mod | 2 +- pkg/trace/go.sum | 4 +-- pkg/trace/stats/oteltest/go.mod | 2 +- pkg/trace/stats/oteltest/go.sum | 4 +-- ...p-go-sqllexer-0.0.14-26ba053cf04ac223.yaml | 11 ++++++++ test/otel/go.mod | 2 +- test/otel/go.sum | 4 +-- 16 files changed, 60 insertions(+), 21 deletions(-) create mode 100644 releasenotes/notes/bump-go-sqllexer-0.0.14-26ba053cf04ac223.yaml diff --git a/comp/otelcol/otlp/components/exporter/datadogexporter/go.mod b/comp/otelcol/otlp/components/exporter/datadogexporter/go.mod index 8857c8dcff0c4..5f48e6da16c46 100644 --- a/comp/otelcol/otlp/components/exporter/datadogexporter/go.mod +++ b/comp/otelcol/otlp/components/exporter/datadogexporter/go.mod @@ -185,7 +185,7 @@ require ( github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/datadog-api-client-go/v2 v2.26.0 // indirect github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect - github.com/DataDog/go-sqllexer v0.0.13 // indirect + github.com/DataDog/go-sqllexer v0.0.14 // indirect github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 // indirect github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.16.1 // indirect diff --git a/comp/otelcol/otlp/components/exporter/datadogexporter/go.sum b/comp/otelcol/otlp/components/exporter/datadogexporter/go.sum index e60a093490cb1..cdf2e4f5f136d 100644 --- a/comp/otelcol/otlp/components/exporter/datadogexporter/go.sum +++ b/comp/otelcol/otlp/components/exporter/datadogexporter/go.sum @@ -8,8 +8,8 @@ github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 h1:RoH7VLzTnxHEugRPIgnGlxwDFszFGI7b3WZZUtWuPRM= github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42/go.mod h1:TX7CTOQ3LbQjfAi4SwqUoR5gY1zfUk7VRBDTuArjaDc= -github.com/DataDog/go-sqllexer v0.0.13 h1:9mKfe+3s73GI/7dWBxi2Ds7+xZynJqMKK9cIUBrutak= -github.com/DataDog/go-sqllexer v0.0.13/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= +github.com/DataDog/go-sqllexer v0.0.14 h1:xUQh2tLr/95LGxDzLmttLgTo/1gzFeOyuwrQa/Iig4Q= +github.com/DataDog/go-sqllexer v0.0.14/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4= github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 h1:EbzDX8HPk5uE2FsJYxD74QmMw0/3CqSKhEr6teh0ncQ= diff --git a/comp/otelcol/otlp/components/statsprocessor/go.mod b/comp/otelcol/otlp/components/statsprocessor/go.mod index 50b1bae9469db..b21dae11b5ade 100644 --- a/comp/otelcol/otlp/components/statsprocessor/go.mod +++ b/comp/otelcol/otlp/components/statsprocessor/go.mod @@ -38,7 +38,7 @@ require ( github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect - github.com/DataDog/go-sqllexer v0.0.13 // indirect + github.com/DataDog/go-sqllexer v0.0.14 // indirect github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect github.com/DataDog/sketches-go v1.4.2 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect diff --git a/comp/otelcol/otlp/components/statsprocessor/go.sum b/comp/otelcol/otlp/components/statsprocessor/go.sum index dcf72c47c645c..a928af42bb225 100644 --- a/comp/otelcol/otlp/components/statsprocessor/go.sum +++ b/comp/otelcol/otlp/components/statsprocessor/go.sum @@ -1,7 +1,7 @@ github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU= github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= -github.com/DataDog/go-sqllexer v0.0.13 h1:9mKfe+3s73GI/7dWBxi2Ds7+xZynJqMKK9cIUBrutak= -github.com/DataDog/go-sqllexer v0.0.13/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= +github.com/DataDog/go-sqllexer v0.0.14 h1:xUQh2tLr/95LGxDzLmttLgTo/1gzFeOyuwrQa/Iig4Q= +github.com/DataDog/go-sqllexer v0.0.14/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4= github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.16.1 h1:ZI8u3CgdMXpDplrf9/gIr13+/g/tUzUcBMk2ZhXgzLE= diff --git a/go.mod b/go.mod index 84cece8bc97a7..b1f17ce865430 100644 --- a/go.mod +++ b/go.mod @@ -703,7 +703,7 @@ require ( github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.2 github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 github.com/DataDog/go-libddwaf/v3 v3.3.0 - github.com/DataDog/go-sqllexer v0.0.13 + github.com/DataDog/go-sqllexer v0.0.14 github.com/Datadog/dublin-traceroute v0.0.1 github.com/aquasecurity/trivy v0.49.2-0.20240227072422-e1ea02c7b80d github.com/aws/aws-sdk-go-v2/service/kms v1.34.1 diff --git a/go.sum b/go.sum index a310d674a2d1f..a131d6f6b7cd3 100644 --- a/go.sum +++ b/go.sum @@ -712,8 +712,8 @@ github.com/DataDog/go-grpc-bidirectional-streaming-example v0.0.0-20221024060302 github.com/DataDog/go-grpc-bidirectional-streaming-example v0.0.0-20221024060302-b9cf785c02fe/go.mod h1:90sqV0j7E8wYCyqIp5d9HmYWLTFQttqPFFtNYDyAybQ= github.com/DataDog/go-libddwaf/v3 v3.3.0 h1:jS72fuQpFgJZEdEJDmHJCPAgNTEMZoz1EUvimPUOiJ4= github.com/DataDog/go-libddwaf/v3 v3.3.0/go.mod h1:Bz/0JkpGf689mzbUjKJeheJINqsyyhM8p9PDuHdK2Ec= -github.com/DataDog/go-sqllexer v0.0.13 h1:9mKfe+3s73GI/7dWBxi2Ds7+xZynJqMKK9cIUBrutak= -github.com/DataDog/go-sqllexer v0.0.13/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= +github.com/DataDog/go-sqllexer v0.0.14 h1:xUQh2tLr/95LGxDzLmttLgTo/1gzFeOyuwrQa/Iig4Q= +github.com/DataDog/go-sqllexer v0.0.14/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4= github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= github.com/DataDog/gohai v0.0.0-20230524154621-4316413895ee h1:tXibLZk3G6HncIFJKaNItsdzcrk4YqILNDZlXPTNt4k= diff --git a/pkg/obfuscate/go.mod b/pkg/obfuscate/go.mod index 80da0db8971b4..e25602108c4fb 100644 --- a/pkg/obfuscate/go.mod +++ b/pkg/obfuscate/go.mod @@ -4,7 +4,7 @@ go 1.22.0 require ( github.com/DataDog/datadog-go/v5 v5.5.0 - github.com/DataDog/go-sqllexer v0.0.13 + github.com/DataDog/go-sqllexer v0.0.14 github.com/outcaste-io/ristretto v0.2.1 github.com/stretchr/testify v1.9.0 go.uber.org/atomic v1.10.0 diff --git a/pkg/obfuscate/go.sum b/pkg/obfuscate/go.sum index b0abab16649ad..e6d91aba3fc14 100644 --- a/pkg/obfuscate/go.sum +++ b/pkg/obfuscate/go.sum @@ -1,7 +1,7 @@ github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU= github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= -github.com/DataDog/go-sqllexer v0.0.13 h1:9mKfe+3s73GI/7dWBxi2Ds7+xZynJqMKK9cIUBrutak= -github.com/DataDog/go-sqllexer v0.0.13/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= +github.com/DataDog/go-sqllexer v0.0.14 h1:xUQh2tLr/95LGxDzLmttLgTo/1gzFeOyuwrQa/Iig4Q= +github.com/DataDog/go-sqllexer v0.0.14/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY= github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= diff --git a/pkg/obfuscate/sql_test.go b/pkg/obfuscate/sql_test.go index 9b0f5969e55f5..f6b7005c46342 100644 --- a/pkg/obfuscate/sql_test.go +++ b/pkg/obfuscate/sql_test.go @@ -2403,6 +2403,20 @@ func TestSQLLexerObfuscationAndNormalization(t *testing.T) { Procedures: []string{}, }, }, + { + name: "select with cte", + query: "WITH users AS (SELECT * FROM people) SELECT * FROM users where id = 1", + expected: "WITH users AS ( SELECT * FROM people ) SELECT * FROM users where id = ?", + metadata: SQLMetadata{ + Size: 12, + TablesCSV: "people", + Commands: []string{ + "SELECT", + }, + Comments: []string{}, + Procedures: []string{}, + }, + }, } for _, tt := range tests { @@ -2589,6 +2603,20 @@ func TestSQLLexerNormalization(t *testing.T) { Procedures: []string{}, }, }, + { + name: "select with cte", + query: "WITH users AS (SELECT * FROM people) SELECT * FROM users", + expected: "WITH users AS ( SELECT * FROM people ) SELECT * FROM users", + metadata: SQLMetadata{ + Size: 12, + TablesCSV: "people", + Commands: []string{ + "SELECT", + }, + Comments: []string{}, + Procedures: []string{}, + }, + }, } for _, tt := range tests { diff --git a/pkg/trace/go.mod b/pkg/trace/go.mod index 512debc4740e6..a16cdf5d8f6a9 100644 --- a/pkg/trace/go.mod +++ b/pkg/trace/go.mod @@ -53,7 +53,7 @@ require ( ) require ( - github.com/DataDog/go-sqllexer v0.0.13 // indirect + github.com/DataDog/go-sqllexer v0.0.14 // indirect github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect github.com/DataDog/zstd v1.5.5 // indirect github.com/beorn7/perks v1.0.1 // indirect diff --git a/pkg/trace/go.sum b/pkg/trace/go.sum index 9ae6fe28770bc..0acbab963039c 100644 --- a/pkg/trace/go.sum +++ b/pkg/trace/go.sum @@ -2,8 +2,8 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU= github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= -github.com/DataDog/go-sqllexer v0.0.13 h1:9mKfe+3s73GI/7dWBxi2Ds7+xZynJqMKK9cIUBrutak= -github.com/DataDog/go-sqllexer v0.0.13/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= +github.com/DataDog/go-sqllexer v0.0.14 h1:xUQh2tLr/95LGxDzLmttLgTo/1gzFeOyuwrQa/Iig4Q= +github.com/DataDog/go-sqllexer v0.0.14/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4= github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.14.0 h1:10TPqpTlIkmDPFWVIEZ4ZX3rWrCrx3rEoeoAooZr6LM= diff --git a/pkg/trace/stats/oteltest/go.mod b/pkg/trace/stats/oteltest/go.mod index 7ba2f55d9b746..e3466f3cda4f7 100644 --- a/pkg/trace/stats/oteltest/go.mod +++ b/pkg/trace/stats/oteltest/go.mod @@ -26,7 +26,7 @@ require ( github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect - github.com/DataDog/go-sqllexer v0.0.13 // indirect + github.com/DataDog/go-sqllexer v0.0.14 // indirect github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect github.com/DataDog/sketches-go v1.4.2 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect diff --git a/pkg/trace/stats/oteltest/go.sum b/pkg/trace/stats/oteltest/go.sum index dcf72c47c645c..a928af42bb225 100644 --- a/pkg/trace/stats/oteltest/go.sum +++ b/pkg/trace/stats/oteltest/go.sum @@ -1,7 +1,7 @@ github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU= github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= -github.com/DataDog/go-sqllexer v0.0.13 h1:9mKfe+3s73GI/7dWBxi2Ds7+xZynJqMKK9cIUBrutak= -github.com/DataDog/go-sqllexer v0.0.13/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= +github.com/DataDog/go-sqllexer v0.0.14 h1:xUQh2tLr/95LGxDzLmttLgTo/1gzFeOyuwrQa/Iig4Q= +github.com/DataDog/go-sqllexer v0.0.14/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4= github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.16.1 h1:ZI8u3CgdMXpDplrf9/gIr13+/g/tUzUcBMk2ZhXgzLE= diff --git a/releasenotes/notes/bump-go-sqllexer-0.0.14-26ba053cf04ac223.yaml b/releasenotes/notes/bump-go-sqllexer-0.0.14-26ba053cf04ac223.yaml new file mode 100644 index 0000000000000..75e66514f275a --- /dev/null +++ b/releasenotes/notes/bump-go-sqllexer-0.0.14-26ba053cf04ac223.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + [DBM] Bump go-sqllexer to 0.0.14 to skip collecting CTE tables as SQL metadata. diff --git a/test/otel/go.mod b/test/otel/go.mod index 1eaab0c1aa48e..1a1e56a250a8a 100644 --- a/test/otel/go.mod +++ b/test/otel/go.mod @@ -171,7 +171,7 @@ require ( github.com/DataDog/datadog-api-client-go/v2 v2.26.0 // indirect github.com/DataDog/datadog-go/v5 v5.5.0 // indirect github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect - github.com/DataDog/go-sqllexer v0.0.13 // indirect + github.com/DataDog/go-sqllexer v0.0.14 // indirect github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 // indirect github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.18.0 // indirect diff --git a/test/otel/go.sum b/test/otel/go.sum index e60a093490cb1..cdf2e4f5f136d 100644 --- a/test/otel/go.sum +++ b/test/otel/go.sum @@ -8,8 +8,8 @@ github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 h1:RoH7VLzTnxHEugRPIgnGlxwDFszFGI7b3WZZUtWuPRM= github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42/go.mod h1:TX7CTOQ3LbQjfAi4SwqUoR5gY1zfUk7VRBDTuArjaDc= -github.com/DataDog/go-sqllexer v0.0.13 h1:9mKfe+3s73GI/7dWBxi2Ds7+xZynJqMKK9cIUBrutak= -github.com/DataDog/go-sqllexer v0.0.13/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= +github.com/DataDog/go-sqllexer v0.0.14 h1:xUQh2tLr/95LGxDzLmttLgTo/1gzFeOyuwrQa/Iig4Q= +github.com/DataDog/go-sqllexer v0.0.14/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4= github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 h1:EbzDX8HPk5uE2FsJYxD74QmMw0/3CqSKhEr6teh0ncQ= From 9185aee97739b5ec32023db0115d130ad92fa5dd Mon Sep 17 00:00:00 2001 From: Guillaume Pagnoux Date: Thu, 5 Sep 2024 16:09:40 +0200 Subject: [PATCH 030/128] discovery: report CPU usage (#29003) --- .../corechecks/servicediscovery/events.go | 2 + .../servicediscovery/events_test.go | 4 + .../corechecks/servicediscovery/impl_linux.go | 2 + .../servicediscovery/impl_linux_test.go | 8 ++ .../servicediscovery/model/model.go | 1 + .../servicediscovery/module/impl_linux.go | 27 ++++++- .../module/impl_linux_test.go | 17 ++++ .../servicediscovery/module/stat.go | 79 +++++++++++++++++++ .../servicediscovery/servicediscovery.go | 4 +- .../aggregator/servicediscoveryAggregator.go | 27 ++++--- 10 files changed, 151 insertions(+), 20 deletions(-) diff --git a/pkg/collector/corechecks/servicediscovery/events.go b/pkg/collector/corechecks/servicediscovery/events.go index ebf17b1b8a994..aa02df577a7e9 100644 --- a/pkg/collector/corechecks/servicediscovery/events.go +++ b/pkg/collector/corechecks/servicediscovery/events.go @@ -42,6 +42,7 @@ type eventPayload struct { PID int `json:"pid"` CommandLine []string `json:"command_line"` RSSMemory uint64 `json:"rss_memory"` + CPUCores float64 `json:"cpu_cores"` } type event struct { @@ -87,6 +88,7 @@ func (ts *telemetrySender) newEvent(t eventType, svc serviceInfo) *event { PID: svc.service.PID, CommandLine: svc.service.CommandLine, RSSMemory: svc.service.RSS, + CPUCores: svc.service.CPUCores, }, } } diff --git a/pkg/collector/corechecks/servicediscovery/events_test.go b/pkg/collector/corechecks/servicediscovery/events_test.go index 93ffb9ffe1ecb..0675a7dedd4c9 100644 --- a/pkg/collector/corechecks/servicediscovery/events_test.go +++ b/pkg/collector/corechecks/servicediscovery/events_test.go @@ -64,6 +64,7 @@ func Test_telemetrySender(t *testing.T) { GeneratedName: "generated-name", DDService: "dd-service", DDServiceInjected: true, + CPUCores: 1.5, }, meta: ServiceMetadata{ Name: "test-service", @@ -99,6 +100,7 @@ func Test_telemetrySender(t *testing.T) { PID: 99, CommandLine: []string{"test-service", "--args"}, RSSMemory: 500 * 1024 * 1024, + CPUCores: 1.5, }, }, { @@ -121,6 +123,7 @@ func Test_telemetrySender(t *testing.T) { PID: 99, CommandLine: []string{"test-service", "--args"}, RSSMemory: 500 * 1024 * 1024, + CPUCores: 1.5, }, }, { @@ -143,6 +146,7 @@ func Test_telemetrySender(t *testing.T) { PID: 99, CommandLine: []string{"test-service", "--args"}, RSSMemory: 500 * 1024 * 1024, + CPUCores: 1.5, }, }, } diff --git a/pkg/collector/corechecks/servicediscovery/impl_linux.go b/pkg/collector/corechecks/servicediscovery/impl_linux.go index eb2f533ab901c..27ef6aa36cc66 100644 --- a/pkg/collector/corechecks/servicediscovery/impl_linux.go +++ b/pkg/collector/corechecks/servicediscovery/impl_linux.go @@ -78,6 +78,7 @@ func (li *linuxImpl) DiscoverServices() (*discoveredServices, error) { if service, ok := serviceMap[pid]; ok { svc.LastHeartbeat = now svc.service.RSS = service.RSS + svc.service.CPUCores = service.CPUCores li.aliveServices[pid] = svc events.start = append(events.start, *svc) } @@ -112,6 +113,7 @@ func (li *linuxImpl) DiscoverServices() (*discoveredServices, error) { } else if now.Sub(svc.LastHeartbeat).Truncate(time.Minute) >= heartbeatTime { svc.LastHeartbeat = now svc.service.RSS = service.RSS + svc.service.CPUCores = service.CPUCores events.heartbeat = append(events.heartbeat, *svc) } } diff --git a/pkg/collector/corechecks/servicediscovery/impl_linux_test.go b/pkg/collector/corechecks/servicediscovery/impl_linux_test.go index a42d2dbac7414..b8ea7760d724d 100644 --- a/pkg/collector/corechecks/servicediscovery/impl_linux_test.go +++ b/pkg/collector/corechecks/servicediscovery/impl_linux_test.go @@ -76,6 +76,7 @@ var ( Ports: []uint16{8080}, APMInstrumentation: string(apm.None), RSS: 100 * 1024 * 1024, + CPUCores: 1.5, CommandLine: []string{"test-service-1"}, StartTimeSecs: procLaunchedSeconds, } @@ -87,6 +88,7 @@ var ( Ports: []uint16{8080}, APMInstrumentation: string(apm.None), RSS: 200 * 1024 * 1024, + CPUCores: 1.5, CommandLine: []string{"test-service-1"}, StartTimeSecs: procLaunchedSeconds, } @@ -229,6 +231,7 @@ func Test_linuxImpl(t *testing.T) { CommandLine: []string{"test-service-1"}, APMInstrumentation: "none", RSSMemory: 100 * 1024 * 1024, + CPUCores: 1.5, }, }, { @@ -250,6 +253,7 @@ func Test_linuxImpl(t *testing.T) { CommandLine: []string{"test-service-1"}, APMInstrumentation: "none", RSSMemory: 200 * 1024 * 1024, + CPUCores: 1.5, }, }, { @@ -271,6 +275,7 @@ func Test_linuxImpl(t *testing.T) { CommandLine: []string{"test-service-1"}, APMInstrumentation: "none", RSSMemory: 200 * 1024 * 1024, + CPUCores: 1.5, }, }, { @@ -382,6 +387,7 @@ func Test_linuxImpl(t *testing.T) { CommandLine: []string{"test-service-1"}, APMInstrumentation: "none", RSSMemory: 100 * 1024 * 1024, + CPUCores: 1.5, }, }, { @@ -437,6 +443,7 @@ func Test_linuxImpl(t *testing.T) { CommandLine: []string{"test-service-1"}, APMInstrumentation: "none", RSSMemory: 100 * 1024 * 1024, + CPUCores: 1.5, }, }, }, @@ -493,6 +500,7 @@ func Test_linuxImpl(t *testing.T) { CommandLine: []string{"test-service-1"}, APMInstrumentation: "none", RSSMemory: 100 * 1024 * 1024, + CPUCores: 1.5, }, }, { diff --git a/pkg/collector/corechecks/servicediscovery/model/model.go b/pkg/collector/corechecks/servicediscovery/model/model.go index 79fd656bbfb58..93a3585167214 100644 --- a/pkg/collector/corechecks/servicediscovery/model/model.go +++ b/pkg/collector/corechecks/servicediscovery/model/model.go @@ -19,6 +19,7 @@ type Service struct { RSS uint64 `json:"rss"` CommandLine []string `json:"cmdline"` StartTimeSecs uint64 `json:"start_time"` + CPUCores float64 `json:"cpu_cores"` } // ServicesResponse is the response for the system-probe /discovery/services endpoint. diff --git a/pkg/collector/corechecks/servicediscovery/module/impl_linux.go b/pkg/collector/corechecks/servicediscovery/module/impl_linux.go index 4331835f96d91..7d7237c38484b 100644 --- a/pkg/collector/corechecks/servicediscovery/module/impl_linux.go +++ b/pkg/collector/corechecks/servicediscovery/module/impl_linux.go @@ -50,6 +50,7 @@ type serviceInfo struct { apmInstrumentation apm.Instrumentation cmdLine []string startTimeSecs uint64 + cpuTime uint64 } // discovery is an implementation of the Module interface for the discovery module. @@ -63,6 +64,10 @@ type discovery struct { // scrubber is used to remove potentially sensitive data from the command line scrubber *procutil.DataScrubber + + // lastGlobalCPUTime stores the total cpu time of the system from the last time + // the endpoint was called. + lastGlobalCPUTime uint64 } // NewDiscoveryModule creates a new discovery system probe module. @@ -288,8 +293,9 @@ func getNsInfo(pid int) (*namespaceInfo, error) { // parsingContext holds temporary context not preserved between invocations of // the endpoint. type parsingContext struct { - procRoot string - netNsInfo map[uint32]*namespaceInfo + procRoot string + netNsInfo map[uint32]*namespaceInfo + globalCPUTime uint64 } // getServiceInfo gets the service information for a process using the @@ -454,6 +460,11 @@ func (s *discovery) getService(context parsingContext, pid int32) *model.Service name = info.generatedName } + cpu, err := updateCPUCoresStats(proc, info, s.lastGlobalCPUTime, context.globalCPUTime) + if err != nil { + return nil + } + return &model.Service{ PID: int(pid), Name: name, @@ -466,6 +477,7 @@ func (s *discovery) getService(context parsingContext, pid int32) *model.Service RSS: rss, CommandLine: info.cmdLine, StartTimeSecs: info.startTimeSecs, + CPUCores: cpu, } } @@ -492,9 +504,15 @@ func (s *discovery) getServices() (*[]model.Service, error) { return nil, err } + globalCPUTime, err := getGlobalCPUTime() + if err != nil { + return nil, err + } + context := parsingContext{ - procRoot: procRoot, - netNsInfo: make(map[uint32]*namespaceInfo), + procRoot: procRoot, + netNsInfo: make(map[uint32]*namespaceInfo), + globalCPUTime: globalCPUTime, } var services []model.Service @@ -512,6 +530,7 @@ func (s *discovery) getServices() (*[]model.Service, error) { } s.cleanCache(alivePids) + s.lastGlobalCPUTime = context.globalCPUTime return &services, nil } diff --git a/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go b/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go index f9f0a58184710..7997723188262 100644 --- a/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go +++ b/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go @@ -484,6 +484,7 @@ func TestAPMInstrumentationProvided(t *testing.T) { assert.Equal(collect, string(test.language), portMap[pid].Language) assert.Equal(collect, string(apm.Provided), portMap[pid].APMInstrumentation) assertStat(t, portMap[pid]) + assertCPU(t, url, pid) }, 30*time.Second, 100*time.Millisecond) }) } @@ -512,6 +513,21 @@ func assertStat(t assert.TestingT, svc model.Service) { assert.Equal(t, uint64(createTimeMs/1000), svc.StartTimeSecs) } +func assertCPU(t *testing.T, url string, pid int) { + proc, err := process.NewProcess(int32(pid)) + require.NoError(t, err, "could not create gopsutil process handle") + + // Compare CPU usage measurement over an interval. + _ = getServicesMap(t, url) + referenceValue, err := proc.Percent(1 * time.Second) + require.NoError(t, err, "could not get gopsutil cpu usage value") + + // Calling getServicesMap a second time us the CPU usage percentage since the last call, which should be close to gopsutil value. + portMap := getServicesMap(t, url) + assert.Contains(t, portMap, pid) + assert.InDelta(t, referenceValue, portMap[pid].CPUCores, 0.10) +} + func TestCommandLineSanitization(t *testing.T) { serverDir := buildFakeServer(t) url := setupDiscoveryModule(t) @@ -590,6 +606,7 @@ func TestNodeDocker(t *testing.T) { assert.Equal(collect, svcMap[pid].GeneratedName, svcMap[pid].Name) assert.Equal(collect, "provided", svcMap[pid].APMInstrumentation) assertStat(collect, svcMap[pid]) + assertCPU(t, url, pid) }, 30*time.Second, 100*time.Millisecond) } diff --git a/pkg/collector/corechecks/servicediscovery/module/stat.go b/pkg/collector/corechecks/servicediscovery/module/stat.go index 4e12e840d741c..25077dc6d75a9 100644 --- a/pkg/collector/corechecks/servicediscovery/module/stat.go +++ b/pkg/collector/corechecks/servicediscovery/module/stat.go @@ -8,8 +8,11 @@ package module import ( + "bufio" + "bytes" "errors" "os" + "runtime" "strconv" "strings" @@ -47,3 +50,79 @@ func getRSS(proc *process.Process) (uint64, error) { return rssPages * pageSize, nil } + +func getGlobalCPUTime() (uint64, error) { + globalStatPath := kernel.HostProc("stat") + + // This file is very small so just read it fully. + file, err := os.Open(globalStatPath) + if err != nil { + return 0, err + } + defer file.Close() + + scanner := bufio.NewScanner(file) + // Try to read the first line; it contains all the info we need. + if !scanner.Scan() { + return 0, scanner.Err() + } + + // See proc(5) for a description of the format of statm and the fields. + fields := strings.Fields(scanner.Text()) + if fields[0] != "cpu" { + return 0, errors.New("invalid /proc/stat file") + } + + var totalTime uint64 + for _, field := range fields[1:] { + val, err := strconv.ParseUint(field, 10, 64) + if err != nil { + return 0, err + } + totalTime += val + } + + return totalTime, nil +} + +func updateCPUCoresStats(proc *process.Process, info *serviceInfo, lastGlobalCPUTime, currentGlobalCPUTime uint64) (float64, error) { + statPath := kernel.HostProc(strconv.Itoa(int(proc.Pid)), "stat") + + // This file is very small so just read it fully. + content, err := os.ReadFile(statPath) + if err != nil { + return 0, err + } + + startIndex := bytes.LastIndexByte(content, byte(')')) + if startIndex == -1 || startIndex+1 >= len(content) { + return 0, errors.New("invalid stat format") + } + + // See proc(5) for a description of the format of statm and the fields. + fields := strings.Fields(string(content[startIndex+1:])) + if len(fields) < 50 { + return 0, errors.New("invalid stat format") + } + + // Parse fields number 14 and 15, resp. User and System CPU time. + // See proc_pid_stat(5), for details. + // Here we address 11 & 12 since we skipped the first two fields. + usrTime, err := strconv.ParseUint(fields[11], 10, 64) + if err != nil { + return 0, err + } + + sysTime, err := strconv.ParseUint(fields[12], 10, 64) + if err != nil { + return 0, err + } + + processTimeDelta := float64(usrTime + sysTime - info.cpuTime) + globalTimeDelta := float64(currentGlobalCPUTime - lastGlobalCPUTime) + cpuUsage := processTimeDelta / globalTimeDelta * float64(runtime.NumCPU()) + + info.cpuTime = usrTime + sysTime + + return cpuUsage, nil +} diff --git a/pkg/collector/corechecks/servicediscovery/servicediscovery.go b/pkg/collector/corechecks/servicediscovery/servicediscovery.go index 3e8bf7eb7dab6..61e97ca5de80c 100644 --- a/pkg/collector/corechecks/servicediscovery/servicediscovery.go +++ b/pkg/collector/corechecks/servicediscovery/servicediscovery.go @@ -58,9 +58,7 @@ type osImpl interface { DiscoverServices() (*discoveredServices, error) } -var ( - newOSImpl func(ignoreCfg map[string]bool) (osImpl, error) -) +var newOSImpl func(ignoreCfg map[string]bool) (osImpl, error) type config struct { IgnoreProcesses []string `yaml:"ignore_processes"` diff --git a/test/fakeintake/aggregator/servicediscoveryAggregator.go b/test/fakeintake/aggregator/servicediscoveryAggregator.go index 854f890ea62db..0582205b34b41 100644 --- a/test/fakeintake/aggregator/servicediscoveryAggregator.go +++ b/test/fakeintake/aggregator/servicediscoveryAggregator.go @@ -20,19 +20,20 @@ type ServiceDiscoveryPayload struct { RequestType string `json:"request_type"` APIVersion string `json:"api_version"` Payload struct { - NamingSchemaVersion string `json:"naming_schema_version"` - ServiceName string `json:"service_name"` - GeneratedServiceName string `json:"generated_service_name"` - DDService string `json:"dd_service,omitempty"` - HostName string `json:"host_name"` - Env string `json:"env"` - ServiceLanguage string `json:"service_language"` - ServiceType string `json:"service_type"` - StartTime int64 `json:"start_time"` - LastSeen int64 `json:"last_seen"` - APMInstrumentation string `json:"apm_instrumentation"` - ServiceNameSource string `json:"service_name_source,omitempty"` - RSSMemory uint64 `json:"rss_memory"` + NamingSchemaVersion string `json:"naming_schema_version"` + ServiceName string `json:"service_name"` + GeneratedServiceName string `json:"generated_service_name"` + DDService string `json:"dd_service,omitempty"` + HostName string `json:"host_name"` + Env string `json:"env"` + ServiceLanguage string `json:"service_language"` + ServiceType string `json:"service_type"` + StartTime int64 `json:"start_time"` + LastSeen int64 `json:"last_seen"` + APMInstrumentation string `json:"apm_instrumentation"` + ServiceNameSource string `json:"service_name_source,omitempty"` + RSSMemory uint64 `json:"rss_memory"` + CPUCores float64 `json:"cpu_cores"` } `json:"payload"` } From 7bd0387570fbc17f034f1076c4a4bd53e99704b8 Mon Sep 17 00:00:00 2001 From: Branden Clark Date: Thu, 5 Sep 2024 11:15:54 -0400 Subject: [PATCH 031/128] add startstop test with FIM disabled (#29055) --- .gitlab/e2e/e2e.yml | 14 +++++++++++++ .../fixtures/system-probe-nofim.yaml | 8 +++++++ .../windows/service-test/startstop_test.go | 21 ++++++++++++++++--- 3 files changed, 40 insertions(+), 3 deletions(-) create mode 100644 test/new-e2e/tests/windows/service-test/fixtures/system-probe-nofim.yaml diff --git a/.gitlab/e2e/e2e.yml b/.gitlab/e2e/e2e.yml index 257e8d255a35b..efbed3a2b7422 100644 --- a/.gitlab/e2e/e2e.yml +++ b/.gitlab/e2e/e2e.yml @@ -158,6 +158,20 @@ new-e2e-windows-service-test: TEAM: windows-agent EXTRA_PARAMS: --run TestServiceBehavior +# Temporary job for hunting a crash +new-e2e-windows-service-test-nofim: + extends: .new_e2e_template + needs: + - !reference [.needs_new_e2e_template] + - deploy_windows_testing-a7 + rules: + - !reference [.on_windows_service_or_e2e_changes] + - !reference [.manual] + variables: + TARGETS: ./tests/windows/service-test + TEAM: windows-agent + EXTRA_PARAMS: --run TestNoFIMServiceBehavior + new-e2e-language-detection: extends: .new_e2e_template_needs_deb_x64 rules: diff --git a/test/new-e2e/tests/windows/service-test/fixtures/system-probe-nofim.yaml b/test/new-e2e/tests/windows/service-test/fixtures/system-probe-nofim.yaml new file mode 100644 index 0000000000000..bbfbac7b00d97 --- /dev/null +++ b/test/new-e2e/tests/windows/service-test/fixtures/system-probe-nofim.yaml @@ -0,0 +1,8 @@ +# enable NPM +network_config: + enabled: true + +# enable security agent +runtime_security_config: + enabled: true + fim_enabled: false diff --git a/test/new-e2e/tests/windows/service-test/startstop_test.go b/test/new-e2e/tests/windows/service-test/startstop_test.go index 14bd97dbabfbd..8d7a8f6dacb87 100644 --- a/test/new-e2e/tests/windows/service-test/startstop_test.go +++ b/test/new-e2e/tests/windows/service-test/startstop_test.go @@ -36,13 +36,28 @@ var agentConfig string //go:embed fixtures/system-probe.yaml var systemProbeConfig string +//go:embed fixtures/system-probe-nofim.yaml +var systemProbeNoFIMConfig string + //go:embed fixtures/security-agent.yaml var securityAgentConfig string +// TestServiceBehaviorAgentCommandNoFIM tests the service behavior when controlled by Agent commands +func TestNoFIMServiceBehaviorAgentCommand(t *testing.T) { + s := &agentServiceCommandSuite{} + run(t, s, systemProbeNoFIMConfig) +} + +// TestServiceBehaviorPowerShellNoFIM tests the service behavior when controlled by PowerShell commands +func TestNoFIMServiceBehaviorPowerShell(t *testing.T) { + s := &powerShellServiceCommandSuite{} + run(t, s, systemProbeNoFIMConfig) +} + // TestServiceBehaviorAgentCommand tests the service behavior when controlled by Agent commands func TestServiceBehaviorAgentCommand(t *testing.T) { s := &agentServiceCommandSuite{} - run(t, s) + run(t, s, systemProbeConfig) } type agentServiceCommandSuite struct { @@ -78,7 +93,7 @@ func (s *agentServiceCommandSuite) SetupSuite() { // TestServiceBehaviorAgentCommand tests the service behavior when controlled by PowerShell commands func TestServiceBehaviorPowerShell(t *testing.T) { s := &powerShellServiceCommandSuite{} - run(t, s) + run(t, s, systemProbeConfig) } type powerShellServiceCommandSuite struct { @@ -204,7 +219,7 @@ func (s *powerShellServiceCommandSuite) TestHardExitEventLogEntry() { }, 1*time.Minute, 1*time.Second, "should have hard exit messages in the event log") } -func run[Env any](t *testing.T, s e2e.Suite[Env]) { +func run[Env any](t *testing.T, s e2e.Suite[Env], systemProbeConfig string) { opts := []e2e.SuiteOption{e2e.WithProvisioner(awsHostWindows.ProvisionerNoFakeIntake( awsHostWindows.WithAgentOptions( agentparams.WithAgentConfig(agentConfig), From 6b60c2c3dc69edb1236fa882f4b61808895104ff Mon Sep 17 00:00:00 2001 From: Branden Clark Date: Thu, 5 Sep 2024 11:16:26 -0400 Subject: [PATCH 032/128] use distinct MSI log files (#29068) --- .../tests/installer/windows/base_suite.go | 3 +- .../installer/windows/datadog_installer.go | 43 +++++++++++++------ .../suites/installer-package/base_suite.go | 4 +- .../suites/installer-package/install_test.go | 8 +++- .../suites/installer-package/rollback_test.go | 10 ++++- .../suites/installer-package/upgrade_test.go | 9 +++- 6 files changed, 55 insertions(+), 22 deletions(-) diff --git a/test/new-e2e/tests/installer/windows/base_suite.go b/test/new-e2e/tests/installer/windows/base_suite.go index 3d9a9307c29c6..2daac352d8e14 100644 --- a/test/new-e2e/tests/installer/windows/base_suite.go +++ b/test/new-e2e/tests/installer/windows/base_suite.go @@ -6,7 +6,6 @@ package installer import ( - "fmt" agentVersion "github.com/DataDog/datadog-agent/pkg/version" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" @@ -113,7 +112,7 @@ func (s *BaseInstallerSuite) BeforeTest(suiteName, testName string) { outputDir, err := runner.GetTestOutputDir(runner.GetProfile(), s.T()) s.Require().NoError(err, "should get output dir") s.T().Logf("Output dir: %s", outputDir) - s.installer = NewDatadogInstaller(s.Env(), fmt.Sprintf("%s/install.log", outputDir)) + s.installer = NewDatadogInstaller(s.Env(), outputDir) } // Require instantiates a suiteAssertions for the current suite. diff --git a/test/new-e2e/tests/installer/windows/datadog_installer.go b/test/new-e2e/tests/installer/windows/datadog_installer.go index e246a29a5bbd1..28e7e186d20a3 100644 --- a/test/new-e2e/tests/installer/windows/datadog_installer.go +++ b/test/new-e2e/tests/installer/windows/datadog_installer.go @@ -49,16 +49,20 @@ var ( type DatadogInstaller struct { binaryPath string env *environments.WindowsHost - logPath string + outputDir string } // NewDatadogInstaller instantiates a new instance of the Datadog Installer running // on a remote Windows host. -func NewDatadogInstaller(env *environments.WindowsHost, logPath string) *DatadogInstaller { +func NewDatadogInstaller(env *environments.WindowsHost, outputDir string) *DatadogInstaller { + if outputDir == "" { + outputDir = os.TempDir() + } + return &DatadogInstaller{ binaryPath: path.Join(Path, BinaryName), env: env, - logPath: logPath, + outputDir: outputDir, } } @@ -130,8 +134,9 @@ func (d *DatadogInstaller) RemoveExperiment(packageName string) (string, error) // Params contains the optional parameters for the Datadog Installer Install command type Params struct { - installerURL string - msiArgs []string + installerURL string + msiArgs []string + msiLogFilename string } // Option is an optional function parameter type for the Datadog Installer Install command @@ -153,6 +158,14 @@ func WithMSIArg(arg string) Option { } } +// WithMSILogFile sets the filename for the MSI log file, to be stored in the output directory. +func WithMSILogFile(filename string) Option { + return func(params *Params) error { + params.msiLogFilename = filename + return nil + } +} + // WithInstallerURLFromInstallersJSON uses a specific URL for the Datadog Installer from an installers_v2.json // file. // bucket: The S3 bucket to look for the installers_v2.json file, i.e. "dd-agent-mstesting" @@ -176,7 +189,9 @@ func WithInstallerURLFromInstallersJSON(bucket, channel, version string) Option // Install will attempt to install the Datadog Installer on the remote host. // By default, it will use the installer from the current pipeline. func (d *DatadogInstaller) Install(opts ...Option) error { - params := Params{} + params := Params{ + msiLogFilename: "install.log", + } err := optional.ApplyOptions(¶ms, opts) if err != nil { return nil @@ -201,9 +216,9 @@ func (d *DatadogInstaller) Install(opts ...Option) error { params.installerURL = artifactURL msiPath = params.installerURL } - logPath := d.logPath - if logPath == "" { - logPath = filepath.Join(os.TempDir(), "install.log") + logPath := filepath.Join(d.outputDir, params.msiLogFilename) + if _, err := os.Stat(logPath); err == nil { + return fmt.Errorf("log file %s already exists", logPath) } msiArgs := "" if params.msiArgs != nil { @@ -214,7 +229,9 @@ func (d *DatadogInstaller) Install(opts ...Option) error { // Uninstall will attempt to uninstall the Datadog Installer on the remote host. func (d *DatadogInstaller) Uninstall(opts ...Option) error { - params := Params{} + params := Params{ + msiLogFilename: "uninstall.log", + } err := optional.ApplyOptions(¶ms, opts) if err != nil { return nil @@ -225,9 +242,9 @@ func (d *DatadogInstaller) Uninstall(opts ...Option) error { return err } - logPath := d.logPath - if logPath == "" { - logPath = filepath.Join(os.TempDir(), "uninstall.log") + logPath := filepath.Join(d.outputDir, params.msiLogFilename) + if _, err := os.Stat(logPath); err == nil { + return fmt.Errorf("log file %s already exists", logPath) } msiArgs := "" if params.msiArgs != nil { diff --git a/test/new-e2e/tests/installer/windows/suites/installer-package/base_suite.go b/test/new-e2e/tests/installer/windows/suites/installer-package/base_suite.go index 240fcc1035f51..de9df45e634d8 100644 --- a/test/new-e2e/tests/installer/windows/suites/installer-package/base_suite.go +++ b/test/new-e2e/tests/installer/windows/suites/installer-package/base_suite.go @@ -24,7 +24,9 @@ func (s *baseInstallerSuite) freshInstall() { // Arrange // Act - s.Require().NoError(s.Installer().Install()) + s.Require().NoError(s.Installer().Install( + installerwindows.WithMSILogFile("fresh-install.log"), + )) // Assert s.requireInstalled() diff --git a/test/new-e2e/tests/installer/windows/suites/installer-package/install_test.go b/test/new-e2e/tests/installer/windows/suites/installer-package/install_test.go index 25f3df70dca0b..c4109055ae218 100644 --- a/test/new-e2e/tests/installer/windows/suites/installer-package/install_test.go +++ b/test/new-e2e/tests/installer/windows/suites/installer-package/install_test.go @@ -67,7 +67,9 @@ func (s *testInstallerSuite) installWithExistingConfigFile() { // Arrange // Act - s.Require().NoError(s.Installer().Install()) + s.Require().NoError(s.Installer().Install( + installerwindows.WithMSILogFile("with-config-install.log"), + )) // Assert s.requireInstalled() @@ -82,7 +84,9 @@ func (s *testInstallerSuite) repair() { s.Require().NoError(s.Env().RemoteHost.Remove(installerwindows.BinaryPath)) // Act - s.Require().NoError(s.Installer().Install()) + s.Require().NoError(s.Installer().Install( + installerwindows.WithMSILogFile("repair.log"), + )) // Assert s.requireInstalled() diff --git a/test/new-e2e/tests/installer/windows/suites/installer-package/rollback_test.go b/test/new-e2e/tests/installer/windows/suites/installer-package/rollback_test.go index 9b2dadb31de29..b211dce84ead1 100644 --- a/test/new-e2e/tests/installer/windows/suites/installer-package/rollback_test.go +++ b/test/new-e2e/tests/installer/windows/suites/installer-package/rollback_test.go @@ -33,7 +33,10 @@ func (s *testInstallerRollbackSuite) installRollback() { // Arrange // Act - msiErr := s.Installer().Install(installerwindows.WithMSIArg("WIXFAILWHENDEFERRED=1")) + msiErr := s.Installer().Install( + installerwindows.WithMSIArg("WIXFAILWHENDEFERRED=1"), + installerwindows.WithMSILogFile("install-rollback.log"), + ) s.Require().Error(msiErr) // Assert @@ -45,7 +48,10 @@ func (s *testInstallerRollbackSuite) uninstallRollback() { // Arrange // Act - msiErr := s.Installer().Uninstall(installerwindows.WithMSIArg("WIXFAILWHENDEFERRED=1")) + msiErr := s.Installer().Uninstall( + installerwindows.WithMSIArg("WIXFAILWHENDEFERRED=1"), + installerwindows.WithMSILogFile("uninstall-rollback.log"), + ) s.Require().Error(msiErr) // Assert diff --git a/test/new-e2e/tests/installer/windows/suites/installer-package/upgrade_test.go b/test/new-e2e/tests/installer/windows/suites/installer-package/upgrade_test.go index e9925be832a49..d4db167b2e2c6 100644 --- a/test/new-e2e/tests/installer/windows/suites/installer-package/upgrade_test.go +++ b/test/new-e2e/tests/installer/windows/suites/installer-package/upgrade_test.go @@ -27,7 +27,10 @@ func TestInstallerUpgrades(t *testing.T) { // TestUpgrades tests upgrading the stable version of the Datadog installer to the latest from the pipeline. func (s *testInstallerUpgradesSuite) TestUpgrades() { // Arrange - s.Require().NoError(s.Installer().Install(installerwindows.WithInstallerURLFromInstallersJSON(pipeline.AgentS3BucketTesting, pipeline.StableChannel, s.StableInstallerVersion().PackageVersion()))) + s.Require().NoError(s.Installer().Install( + installerwindows.WithInstallerURLFromInstallersJSON(pipeline.AgentS3BucketTesting, pipeline.StableChannel, s.StableInstallerVersion().PackageVersion())), + installerwindows.WithMSILogFile("install.log"), + ) // sanity check: make sure we did indeed install the stable version s.Require().Host(s.Env().RemoteHost). HasBinary(installerwindows.BinaryPath). @@ -36,7 +39,9 @@ func (s *testInstallerUpgradesSuite) TestUpgrades() { // Act // Install "latest" from the pipeline - s.Require().NoError(s.Installer().Install()) + s.Require().NoError(s.Installer().Install( + installerwindows.WithMSILogFile("upgrade.log"), + )) // Assert s.Require().Host(s.Env().RemoteHost). From d29f884fb51b22947e7ba370ac5a093cda625393 Mon Sep 17 00:00:00 2001 From: Stuart Geipel Date: Thu, 5 Sep 2024 14:02:42 -0400 Subject: [PATCH 033/128] [system-probe] Add packet counting to existing UDP byte counters (#28546) --- pkg/network/ebpf/c/co-re/tracer-fentry.c | 4 +- pkg/network/ebpf/c/tracer.c | 12 +- pkg/network/tracer/tracer_linux_test.go | 140 ++++++++++++++++-- ...-udp-packet-counting-ebce8760bec29f50.yaml | 11 ++ 4 files changed, 141 insertions(+), 26 deletions(-) create mode 100644 releasenotes/notes/npm-udp-packet-counting-ebce8760bec29f50.yaml diff --git a/pkg/network/ebpf/c/co-re/tracer-fentry.c b/pkg/network/ebpf/c/co-re/tracer-fentry.c index 857d2f922cde6..a5242b70662f9 100644 --- a/pkg/network/ebpf/c/co-re/tracer-fentry.c +++ b/pkg/network/ebpf/c/co-re/tracer-fentry.c @@ -201,7 +201,7 @@ int BPF_PROG(udp_sendpage_exit, struct sock *sk, struct page *page, int offset, return 0; } - return handle_message(&t, sent, 0, CONN_DIRECTION_UNKNOWN, 0, 0, PACKET_COUNT_NONE, sk); + return handle_message(&t, sent, 0, CONN_DIRECTION_UNKNOWN, 1, 0, PACKET_COUNT_INCREMENT, sk); } SEC("fexit/tcp_recvmsg") @@ -262,7 +262,7 @@ static __always_inline int handle_udp_send(struct sock *sk, int sent) { if (sent > 0) { log_debug("udp_sendmsg: sent: %d", sent); - handle_message(t, sent, 0, CONN_DIRECTION_UNKNOWN, 1, 0, PACKET_COUNT_NONE, sk); + handle_message(t, sent, 0, CONN_DIRECTION_UNKNOWN, 1, 0, PACKET_COUNT_INCREMENT, sk); } bpf_map_delete_elem(&udp_send_skb_args, &pid_tgid); diff --git a/pkg/network/ebpf/c/tracer.c b/pkg/network/ebpf/c/tracer.c index 66482fb92a092..ad7f5fc0ad048 100644 --- a/pkg/network/ebpf/c/tracer.c +++ b/pkg/network/ebpf/c/tracer.c @@ -187,7 +187,7 @@ int BPF_BYPASSABLE_KRETPROBE(kretprobe__udp_sendpage, int sent) { return 0; } - return handle_message(&t, sent, 0, CONN_DIRECTION_UNKNOWN, 0, 0, PACKET_COUNT_NONE, skp); + return handle_message(&t, sent, 0, CONN_DIRECTION_UNKNOWN, 1, 0, PACKET_COUNT_INCREMENT, skp); } SEC("kprobe/tcp_done") @@ -420,7 +420,7 @@ static __always_inline int handle_ip6_skb(struct sock *sk, size_t size, struct f } log_debug("kprobe/ip6_make_skb: pid_tgid: %llu, size: %zu", pid_tgid, size); - handle_message(&t, size, 0, CONN_DIRECTION_UNKNOWN, 0, 0, PACKET_COUNT_NONE, sk); + handle_message(&t, size, 0, CONN_DIRECTION_UNKNOWN, 1, 0, PACKET_COUNT_INCREMENT, sk); increment_telemetry_count(udp_send_processed); return 0; @@ -597,9 +597,7 @@ static __always_inline int handle_ip_skb(struct sock *sk, size_t size, struct fl log_debug("kprobe/ip_make_skb: pid_tgid: %llu, size: %zu", pid_tgid, size); - // segment count is not currently enabled on prebuilt. - // to enable, change PACKET_COUNT_NONE => PACKET_COUNT_INCREMENT - handle_message(&t, size, 0, CONN_DIRECTION_UNKNOWN, 1, 0, PACKET_COUNT_NONE, sk); + handle_message(&t, size, 0, CONN_DIRECTION_UNKNOWN, 1, 0, PACKET_COUNT_INCREMENT, sk); increment_telemetry_count(udp_send_processed); return 0; @@ -767,9 +765,7 @@ static __always_inline int handle_ret_udp_recvmsg_pre_4_7_0(int copied, void *ud bpf_map_delete_elem(udp_sock_map, &pid_tgid); log_debug("kretprobe/udp_recvmsg: pid_tgid: %llu, return: %d", pid_tgid, copied); - // segment count is not currently enabled on prebuilt. - // to enable, change PACKET_COUNT_NONE => PACKET_COUNT_INCREMENT - handle_message(&t, 0, copied, CONN_DIRECTION_UNKNOWN, 0, 1, PACKET_COUNT_NONE, st->sk); + handle_message(&t, 0, copied, CONN_DIRECTION_UNKNOWN, 0, 1, PACKET_COUNT_INCREMENT, st->sk); return 0; } diff --git a/pkg/network/tracer/tracer_linux_test.go b/pkg/network/tracer/tracer_linux_test.go index f541f252cf8bf..dfa6036127c77 100644 --- a/pkg/network/tracer/tracer_linux_test.go +++ b/pkg/network/tracer/tracer_linux_test.go @@ -1127,31 +1127,61 @@ func (s *TracerSuite) TestSelfConnect() { }, 5*time.Second, 100*time.Millisecond, "could not find expected number of tcp connections, expected: 2") } -func (s *TracerSuite) TestUDPPeekCount() { - t := s.T() - config := testConfig() - tr := setupTracer(t, config) +// sets up two udp sockets talking to each other locally. +// returns (listener, dialer) +func setupUdpSockets(t *testing.T, udpnet, ip string) (*net.UDPConn, *net.UDPConn) { + serverAddr := fmt.Sprintf("%s:%d", ip, 0) - ln, err := net.ListenPacket("udp", "127.0.0.1:0") + laddr, err := net.ResolveUDPAddr(udpnet, serverAddr) require.NoError(t, err) - defer ln.Close() - saddr := ln.LocalAddr().String() + var ln, c *net.UDPConn = nil, nil + t.Cleanup(func() { + if ln != nil { + ln.Close() + } + if c != nil { + c.Close() + } + }) - laddr, err := net.ResolveUDPAddr("udp", "127.0.0.1:0") + ln, err = net.ListenUDP(udpnet, laddr) require.NoError(t, err) - raddr, err := net.ResolveUDPAddr("udp", saddr) + + saddr := ln.LocalAddr().String() + + raddr, err := net.ResolveUDPAddr(udpnet, saddr) require.NoError(t, err) - c, err := net.DialUDP("udp", laddr, raddr) + c, err = net.DialUDP(udpnet, laddr, raddr) require.NoError(t, err) - defer c.Close() + + return ln, c +} + +func (s *TracerSuite) TestUDPPeekCount() { + t := s.T() + t.Run("v4", func(t *testing.T) { + testUDPPeekCount(t, "udp4", "127.0.0.1") + }) + t.Run("v6", func(t *testing.T) { + if !testConfig().CollectUDPv6Conns { + t.Skip("UDPv6 disabled") + } + testUDPPeekCount(t, "udp6", "[::1]") + }) +} +func testUDPPeekCount(t *testing.T, udpnet, ip string) { + config := testConfig() + tr := setupTracer(t, config) + + ln, c := setupUdpSockets(t, udpnet, ip) msg := []byte("asdf") - _, err = c.Write(msg) + _, err := c.Write(msg) require.NoError(t, err) - rawConn, err := ln.(*net.UDPConn).SyscallConn() + rawConn, err := ln.SyscallConn() require.NoError(t, err) err = rawConn.Control(func(fd uintptr) { buf := make([]byte, 1024) @@ -1204,12 +1234,82 @@ func (s *TracerSuite) TestUDPPeekCount() { m := outgoing.Monotonic require.Equal(t, len(msg), int(m.SentBytes)) require.Equal(t, 0, int(m.RecvBytes)) + require.Equal(t, 1, int(m.SentPackets)) + require.Equal(t, 0, int(m.RecvPackets)) require.True(t, outgoing.IntraHost) // make sure the inverse values are seen for the other message m = incoming.Monotonic require.Equal(t, 0, int(m.SentBytes)) require.Equal(t, len(msg), int(m.RecvBytes)) + require.Equal(t, 0, int(m.SentPackets)) + require.Equal(t, 1, int(m.RecvPackets)) + require.True(t, incoming.IntraHost) +} + +func (s *TracerSuite) TestUDPPacketSumming() { + t := s.T() + t.Run("v4", func(t *testing.T) { + testUDPPacketSumming(t, "udp4", "127.0.0.1") + }) + t.Run("v6", func(t *testing.T) { + if !testConfig().CollectUDPv6Conns { + t.Skip("UDPv6 disabled") + } + testUDPPacketSumming(t, "udp6", "[::1]") + }) +} +func testUDPPacketSumming(t *testing.T, udpnet, ip string) { + config := testConfig() + tr := setupTracer(t, config) + + ln, c := setupUdpSockets(t, udpnet, ip) + + msg := []byte("asdf") + // send UDP packets of increasing length + for i := range msg { + _, err := c.Write(msg[:i+1]) + require.NoError(t, err) + } + expectedBytes := 1 + 2 + 3 + 4 + + buf := make([]byte, 256) + recvBytes := 0 + for range msg { + n, _, err := ln.ReadFrom(buf) + require.NoError(t, err) + recvBytes += n + } + // sanity check: did userspace get all four expected packets? + require.Equal(t, recvBytes, expectedBytes) + + var incoming *network.ConnectionStats + var outgoing *network.ConnectionStats + require.Eventuallyf(t, func() bool { + conns := getConnections(t, tr) + if outgoing == nil { + outgoing, _ = findConnection(c.LocalAddr(), c.RemoteAddr(), conns) + } + if incoming == nil { + incoming, _ = findConnection(c.RemoteAddr(), c.LocalAddr(), conns) + } + + return outgoing != nil && incoming != nil + }, 3*time.Second, 100*time.Millisecond, "couldn't find incoming and outgoing connections matching") + + m := outgoing.Monotonic + require.Equal(t, expectedBytes, int(m.SentBytes)) + require.Equal(t, 0, int(m.RecvBytes)) + require.Equal(t, int(len(msg)), int(m.SentPackets)) + require.Equal(t, 0, int(m.RecvPackets)) + require.True(t, outgoing.IntraHost) + + // make sure the inverse values are seen for the other message + m = incoming.Monotonic + require.Equal(t, 0, int(m.SentBytes)) + require.Equal(t, expectedBytes, int(m.RecvBytes)) + require.Equal(t, 0, int(m.SentPackets)) + require.Equal(t, int(len(msg)), int(m.RecvPackets)) require.True(t, incoming.IntraHost) } @@ -1507,10 +1607,18 @@ func (s *TracerSuite) TestSendfileRegression() { }, 3*time.Second, 100*time.Millisecond, "couldn't find connections used by sendfile(2)") if assert.NotNil(t, outConn, "couldn't find outgoing connection used by sendfile(2)") { - assert.Equalf(t, int64(clientMessageSize), int64(outConn.Monotonic.SentBytes), "sendfile send data wasn't properly traced") + assert.Equalf(t, int64(clientMessageSize), int64(outConn.Monotonic.SentBytes), "sendfile sent bytes wasn't properly traced") + if connType == network.UDP { + assert.Equalf(t, int64(1), int64(outConn.Monotonic.SentPackets), "sendfile UDP should send exactly 1 packet") + assert.Equalf(t, int64(0), int64(outConn.Monotonic.RecvPackets), "sendfile outConn shouldn't have any RecvPackets") + } } if assert.NotNil(t, inConn, "couldn't find incoming connection used by sendfile(2)") { - assert.Equalf(t, int64(clientMessageSize), int64(inConn.Monotonic.RecvBytes), "sendfile recv data wasn't properly traced") + assert.Equalf(t, int64(clientMessageSize), int64(inConn.Monotonic.RecvBytes), "sendfile recv bytes wasn't properly traced") + if connType == network.UDP { + assert.Equalf(t, int64(1), int64(inConn.Monotonic.RecvPackets), "sendfile UDP should recv exactly 1 packet") + assert.Equalf(t, int64(0), int64(inConn.Monotonic.SentPackets), "sendfile inConn shouldn't have any SentPackets") + } } } @@ -1541,7 +1649,7 @@ func (s *TracerSuite) TestSendfileRegression() { t.Skip("UDP will fail with prebuilt tracer") } - // Start TCP server + // Start UDP server var rcvd int64 server := &UDPServer{ network: "udp" + strings.TrimPrefix(family.String(), "v"), diff --git a/releasenotes/notes/npm-udp-packet-counting-ebce8760bec29f50.yaml b/releasenotes/notes/npm-udp-packet-counting-ebce8760bec29f50.yaml new file mode 100644 index 0000000000000..19c9c532dfc58 --- /dev/null +++ b/releasenotes/notes/npm-udp-packet-counting-ebce8760bec29f50.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +features: + - | + NPM - adds UDP "Packets Sent" and "Packets Received" to the network telemetry in Linux. From a8df5c5eab15bcb2a649c3475dda9fcba34d53bf Mon Sep 17 00:00:00 2001 From: Thibaud Cheruy Date: Thu, 5 Sep 2024 20:23:20 +0200 Subject: [PATCH 034/128] [SNMP] Recreate GoSNMP session at each check run (#29085) --- .../snmp/internal/devicecheck/devicecheck.go | 15 +++++++++------ .../snmp/internal/devicecheck/devicecheck_test.go | 6 ++++++ .../notes/snmp-session-8e4b5a534a8c9837.yaml | 11 +++++++++++ 3 files changed, 26 insertions(+), 6 deletions(-) create mode 100644 releasenotes/notes/snmp-session-8e4b5a534a8c9837.yaml diff --git a/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck.go b/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck.go index 1ee597fec2e25..08a03370e2b7d 100644 --- a/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck.go +++ b/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck.go @@ -65,6 +65,7 @@ type DeviceCheck struct { config *checkconfig.CheckConfig sender *report.MetricSender session session.Session + sessionFactory session.Factory devicePinger pinger.Pinger sessionCloseErrorCount *atomic.Uint64 savedDynamicTags []string @@ -80,12 +81,8 @@ const cacheKeyPrefix = "snmp-tags" func NewDeviceCheck(config *checkconfig.CheckConfig, ipAddress string, sessionFactory session.Factory) (*DeviceCheck, error) { newConfig := config.CopyWithNewIP(ipAddress) - sess, err := sessionFactory(newConfig) - if err != nil { - return nil, fmt.Errorf("failed to configure session: %s", err) - } - var devicePinger pinger.Pinger + var err error if newConfig.PingEnabled { devicePinger, err = createPinger(newConfig.PingConfig) if err != nil { @@ -98,7 +95,7 @@ func NewDeviceCheck(config *checkconfig.CheckConfig, ipAddress string, sessionFa d := DeviceCheck{ config: newConfig, - session: sess, + sessionFactory: sessionFactory, devicePinger: devicePinger, sessionCloseErrorCount: atomic.NewUint64(0), nextAutodetectMetrics: timeNow(), @@ -160,6 +157,12 @@ func (d *DeviceCheck) Run(collectionTime time.Time) error { startTime := time.Now() staticTags := append(d.config.GetStaticTags(), d.config.GetNetworkTags()...) + var err error + d.session, err = d.sessionFactory(d.config) + if err != nil { + return err + } + // Fetch and report metrics var checkErr error var deviceStatus metadata.DeviceStatus diff --git a/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck_test.go b/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck_test.go index f06bd065e5059..4661a9f28689b 100644 --- a/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck_test.go +++ b/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck_test.go @@ -421,6 +421,9 @@ profiles: deviceCk, err := NewDeviceCheck(config, "1.2.3.4", sessionFactory) assert.Nil(t, err) + deviceCk.session, err = sessionFactory(config) + assert.Nil(t, err) + sender := mocksender.NewMockSender("123") // required to initiate aggregator deviceCk.SetSender(report.NewMetricSender(sender, "", nil, report.MakeInterfaceBandwidthState())) sess.On("GetNext", []string{"1.0"}).Return(session.CreateGetNextPacket("9999", gosnmp.EndOfMibView, nil), nil) @@ -912,6 +915,9 @@ community_string: public deviceCk, err := NewDeviceCheck(config, "1.2.3.4", sessionFactory) assert.Nil(t, err) + deviceCk.session, err = sessionFactory(config) + assert.Nil(t, err) + sender := mocksender.NewMockSender("123") // required to initiate aggregator sender.SetupAcceptAll() diff --git a/releasenotes/notes/snmp-session-8e4b5a534a8c9837.yaml b/releasenotes/notes/snmp-session-8e4b5a534a8c9837.yaml new file mode 100644 index 0000000000000..39e25e452387c --- /dev/null +++ b/releasenotes/notes/snmp-session-8e4b5a534a8c9837.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Fix a bug preventing SNMP V3 reconnection. From b02662b2ba27d3fab6ef265f563fe2ec2ed7b540 Mon Sep 17 00:00:00 2001 From: Stanley Liu Date: Thu, 5 Sep 2024 16:10:12 -0400 Subject: [PATCH 035/128] Assign mapped hostname from OTLP logs in logs agent pipeline (#28870) --- .../components/exporter/logsagentexporter/logs_exporter.go | 3 +++ .../exporter/logsagentexporter/logs_exporter_test.go | 3 +++ 2 files changed, 6 insertions(+) diff --git a/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter.go b/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter.go index 7bf6c54154f23..04f8eaeae152b 100644 --- a/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter.go +++ b/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter.go @@ -94,6 +94,9 @@ func (e *Exporter) ConsumeLogs(ctx context.Context, ld plog.Logs) (err error) { // ingestionTs is an internal field used for latency tracking on the status page, not the actual log timestamp. ingestionTs := time.Now().UnixNano() message := message.NewMessage(content, origin, status, ingestionTs) + if ddLog.Hostname != nil { + message.Hostname = *ddLog.Hostname + } e.logsAgentChannel <- message } diff --git a/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter_test.go b/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter_test.go index 3944c650f7828..839b5a294c1d2 100644 --- a/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter_test.go +++ b/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter_test.go @@ -73,6 +73,7 @@ func TestLogsExporter(t *testing.T) { ldd := lrr.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0) ldd.Attributes().PutStr("message", "hello") ldd.Attributes().PutStr("datadog.log.source", "custom_source") + ldd.Attributes().PutStr("host.name", "test-host") return lrr }(), otelSource: otelSource, @@ -95,6 +96,8 @@ func TestLogsExporter(t *testing.T) { "otel.trace_id": traceIDToHexOrEmptyString(ld.TraceID()), "otel.timestamp": fmt.Sprintf("%d", testutil.TestLogTime.UnixNano()), "resource-attr": "resource-attr-val-1", + "host.name": "test-host", + "hostname": "test-host", }, }, expectedTags: [][]string{{"otel_source:datadog_agent"}}, From 5dfa1cdddb61522ec8d47aee99df72392942a3ad Mon Sep 17 00:00:00 2001 From: Ken Schneider <103530259+ken-schneider@users.noreply.github.com> Date: Thu, 5 Sep 2024 16:30:02 -0400 Subject: [PATCH 036/128] [NETPATH-278] Use default port for Network Traffic UDP traces (#29062) Co-authored-by: DeForest Richards <56796055+drichards-87@users.noreply.github.com> --- .../npcollector/npcollectorimpl/npcollector.go | 7 ++++++- .../npcollector/npcollectorimpl/npcollector_test.go | 2 +- ...orkpath-use-default-udp-port-4145a4b3700e98f4.yaml | 11 +++++++++++ 3 files changed, 18 insertions(+), 2 deletions(-) create mode 100644 releasenotes/notes/networkpath-use-default-udp-port-4145a4b3700e98f4.yaml diff --git a/comp/networkpath/npcollector/npcollectorimpl/npcollector.go b/comp/networkpath/npcollector/npcollectorimpl/npcollector.go index f65bce267ee70..b1f1e089ef8be 100644 --- a/comp/networkpath/npcollector/npcollectorimpl/npcollector.go +++ b/comp/networkpath/npcollector/npcollectorimpl/npcollector.go @@ -120,8 +120,13 @@ func (s *npCollectorImpl) ScheduleConns(conns []*model.Connection) { startTime := s.TimeNowFn() for _, conn := range conns { remoteAddr := conn.Raddr - remotePort := uint16(conn.Raddr.GetPort()) protocol := convertProtocol(conn.GetType()) + var remotePort uint16 + // UDP traces should not be done to the active + // port + if protocol != payload.ProtocolUDP { + remotePort = uint16(conn.Raddr.GetPort()) + } if !shouldScheduleNetworkPathForConn(conn) { s.logger.Tracef("Skipped connection: addr=%s, port=%d, protocol=%s", remoteAddr, remotePort, protocol) continue diff --git a/comp/networkpath/npcollector/npcollectorimpl/npcollector_test.go b/comp/networkpath/npcollector/npcollectorimpl/npcollector_test.go index 9fe8542ab59d2..770e90ed5e188 100644 --- a/comp/networkpath/npcollector/npcollectorimpl/npcollector_test.go +++ b/comp/networkpath/npcollector/npcollectorimpl/npcollector_test.go @@ -372,7 +372,7 @@ func Test_npCollectorImpl_ScheduleConns(t *testing.T) { }, }, expectedPathtests: []*common.Pathtest{ - {Hostname: "10.0.0.6", Port: uint16(161), Protocol: payload.ProtocolUDP, SourceContainerID: "testId1"}, + {Hostname: "10.0.0.6", Port: uint16(0), Protocol: payload.ProtocolUDP, SourceContainerID: "testId1"}, }, }, { diff --git a/releasenotes/notes/networkpath-use-default-udp-port-4145a4b3700e98f4.yaml b/releasenotes/notes/networkpath-use-default-udp-port-4145a4b3700e98f4.yaml new file mode 100644 index 0000000000000..fc143b588792e --- /dev/null +++ b/releasenotes/notes/networkpath-use-default-udp-port-4145a4b3700e98f4.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + The default UDP port for traceroute (port 33434) is now used for Network Traffic based paths, instead of the port detected by NPM. From 84a800fd9c72680b62511f29cedfeb0c92e88fc6 Mon Sep 17 00:00:00 2001 From: Guy Arbitman Date: Fri, 6 Sep 2024 00:08:48 +0300 Subject: [PATCH 037/128] service discovery: Add check configuration by default (#29077) --- .../dist/conf.d/service_discovery.d/conf.yaml.default | 2 ++ omnibus/config/software/datadog-agent-finalize.rb | 3 +++ .../corechecks/servicediscovery/servicediscovery.go | 7 ++++--- tasks/agent.py | 1 + .../agent-subcommands/configcheck/configcheck_nix_test.go | 8 +++++++- test/new-e2e/tests/discovery/linux_test.go | 4 ---- .../tests/discovery/testdata/config/check_config.yaml | 1 - 7 files changed, 17 insertions(+), 9 deletions(-) create mode 100644 cmd/agent/dist/conf.d/service_discovery.d/conf.yaml.default delete mode 100644 test/new-e2e/tests/discovery/testdata/config/check_config.yaml diff --git a/cmd/agent/dist/conf.d/service_discovery.d/conf.yaml.default b/cmd/agent/dist/conf.d/service_discovery.d/conf.yaml.default new file mode 100644 index 0000000000000..00d9a2dbba2c8 --- /dev/null +++ b/cmd/agent/dist/conf.d/service_discovery.d/conf.yaml.default @@ -0,0 +1,2 @@ +instances: + - {} diff --git a/omnibus/config/software/datadog-agent-finalize.rb b/omnibus/config/software/datadog-agent-finalize.rb index b6df0ab46e821..2d545f58b498b 100644 --- a/omnibus/config/software/datadog-agent-finalize.rb +++ b/omnibus/config/software/datadog-agent-finalize.rb @@ -52,6 +52,9 @@ # load isn't supported by windows delete "#{conf_dir}/load.d" + # service_discovery isn't supported by windows + delete "#{conf_dir}/service_discovery.d" + # Remove .pyc files from embedded Python command "del /q /s #{windows_safe_path(install_dir)}\\*.pyc" end diff --git a/pkg/collector/corechecks/servicediscovery/servicediscovery.go b/pkg/collector/corechecks/servicediscovery/servicediscovery.go index 61e97ca5de80c..d395f28599b6b 100644 --- a/pkg/collector/corechecks/servicediscovery/servicediscovery.go +++ b/pkg/collector/corechecks/servicediscovery/servicediscovery.go @@ -101,9 +101,6 @@ func newCheck() check.Check { // Configure parses the check configuration and initializes the check func (c *Check) Configure(senderManager sender.SenderManager, _ uint64, instanceConfig, initConfig integration.Data, source string) error { - if !pkgconfig.SystemProbe().GetBool("discovery.enabled") { - return errors.New("service discovery is disabled") - } if newOSImpl == nil { return errors.New("service_discovery check not implemented on " + runtime.GOOS) } @@ -135,6 +132,10 @@ func (c *Check) Configure(senderManager sender.SenderManager, _ uint64, instance // Run executes the check. func (c *Check) Run() error { + if !pkgconfig.SystemProbe().GetBool("discovery.enabled") { + return nil + } + start := time.Now() defer func() { diff := time.Since(start).Seconds() diff --git a/tasks/agent.py b/tasks/agent.py index c92f5d8399782..8474176fc244f 100644 --- a/tasks/agent.py +++ b/tasks/agent.py @@ -76,6 +76,7 @@ "orchestrator_ecs", "cisco_sdwan", "network_path", + "service_discovery", ] WINDOWS_CORECHECKS = [ diff --git a/test/new-e2e/tests/agent-subcommands/configcheck/configcheck_nix_test.go b/test/new-e2e/tests/agent-subcommands/configcheck/configcheck_nix_test.go index 74562bfff59eb..6f6e744e09bcb 100644 --- a/test/new-e2e/tests/agent-subcommands/configcheck/configcheck_nix_test.go +++ b/test/new-e2e/tests/agent-subcommands/configcheck/configcheck_nix_test.go @@ -25,7 +25,7 @@ func TestLinuxConfigCheckSuite(t *testing.T) { e2e.Run(t, &linuxConfigCheckSuite{}, e2e.WithProvisioner(awshost.ProvisionerNoFakeIntake())) } -// cpu, disk, file_handle, io, load, memory, network, ntp, uptime +// cpu, disk, file_handle, io, load, memory, network, ntp, uptime, service_discovery func (v *linuxConfigCheckSuite) TestDefaultInstalledChecks() { testChecks := []CheckConfigOutput{ { @@ -82,6 +82,12 @@ func (v *linuxConfigCheckSuite) TestDefaultInstalledChecks() { InstanceID: "uptime:", Settings: "{}", }, + { + CheckName: "service_discovery", + Filepath: "file:/etc/datadog-agent/conf.d/service_discovery.d/conf.yaml.default", + InstanceID: "service_discovery:", + Settings: "{}", + }, } output := v.Env().Agent.Client.ConfigCheck() diff --git a/test/new-e2e/tests/discovery/linux_test.go b/test/new-e2e/tests/discovery/linux_test.go index fe0f98a38d142..d48cc30962b7c 100644 --- a/test/new-e2e/tests/discovery/linux_test.go +++ b/test/new-e2e/tests/discovery/linux_test.go @@ -29,9 +29,6 @@ var agentConfigStr string //go:embed testdata/config/system_probe_config.yaml var systemProbeConfigStr string -//go:embed testdata/config/check_config.yaml -var checkConfigStr string - type linuxTestSuite struct { e2e.BaseSuite[environments.Host] } @@ -42,7 +39,6 @@ func TestLinuxTestSuite(t *testing.T) { agentParams := []func(*agentparams.Params) error{ agentparams.WithAgentConfig(agentConfigStr), agentparams.WithSystemProbeConfig(systemProbeConfigStr), - agentparams.WithFile("/etc/datadog-agent/conf.d/service_discovery.d/conf.yaml", checkConfigStr, true), } options := []e2e.SuiteOption{ e2e.WithProvisioner(awshost.Provisioner(awshost.WithAgentOptions(agentParams...))), diff --git a/test/new-e2e/tests/discovery/testdata/config/check_config.yaml b/test/new-e2e/tests/discovery/testdata/config/check_config.yaml deleted file mode 100644 index acab3a6421cab..0000000000000 --- a/test/new-e2e/tests/discovery/testdata/config/check_config.yaml +++ /dev/null @@ -1 +0,0 @@ -instances: [{}] From 5e0f456c15bb26f991cd1cbfdb6978a90fe4a654 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 6 Sep 2024 07:30:37 +0000 Subject: [PATCH 038/128] CWS: sync BTFHub constants (#29096) Co-authored-by: --- .../probe/constantfetch/btfhub/constants.json | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/pkg/security/probe/constantfetch/btfhub/constants.json b/pkg/security/probe/constantfetch/btfhub/constants.json index 6bd99c787014d..7bedf2df1b260 100644 --- a/pkg/security/probe/constantfetch/btfhub/constants.json +++ b/pkg/security/probe/constantfetch/btfhub/constants.json @@ -11855,6 +11855,13 @@ "uname_release": "4.14.350-266.564.amzn2.aarch64", "cindex": 3 }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.352-267.564.amzn2.aarch64", + "cindex": 3 + }, { "distrib": "amzn", "version": "2", @@ -12653,6 +12660,13 @@ "uname_release": "4.14.350-266.564.amzn2.x86_64", "cindex": 8 }, + { + "distrib": "amzn", + "version": "2", + "arch": "x86_64", + "uname_release": "4.14.352-267.564.amzn2.x86_64", + "cindex": 8 + }, { "distrib": "amzn", "version": "2", @@ -20801,6 +20815,13 @@ "uname_release": "4.1.12-124.88.3.el7uek.x86_64", "cindex": 94 }, + { + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.1.12-124.89.4.el7uek.x86_64", + "cindex": 94 + }, { "distrib": "ol", "version": "7", @@ -23657,6 +23678,13 @@ "uname_release": "4.14.35-2047.540.4.el7uek.x86_64", "cindex": 96 }, + { + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.541.1.el7uek.x86_64", + "cindex": 96 + }, { "distrib": "ol", "version": "7", From 411365eabc9e41a88d1c63883adbe9ee809d77b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Guillermo=20Juli=C3=A1n?= Date: Fri, 6 Sep 2024 10:02:23 +0200 Subject: [PATCH 039/128] [EBPF] kmt: improve retry logic and error management (#28998) --- test/new-e2e/go.mod | 1 - test/new-e2e/go.sum | 2 - .../system-probe/connector/metric/metric.go | 25 ++++ test/new-e2e/system-probe/errors.go | 110 ++++++++++++++-- test/new-e2e/system-probe/errors_test.go | 105 ++++++++++++++++ .../system-probe/system-probe-test-env.go | 117 +++++++++++------- 6 files changed, 302 insertions(+), 58 deletions(-) create mode 100644 test/new-e2e/system-probe/errors_test.go diff --git a/test/new-e2e/go.mod b/test/new-e2e/go.mod index dd5a7b64ddc56..5c978c93c1ca3 100644 --- a/test/new-e2e/go.mod +++ b/test/new-e2e/go.mod @@ -52,7 +52,6 @@ require ( github.com/pulumi/pulumi-kubernetes/sdk/v4 v4.17.1 github.com/pulumi/pulumi/sdk/v3 v3.130.0 github.com/samber/lo v1.47.0 - github.com/sethvargo/go-retry v0.2.4 github.com/stretchr/testify v1.9.0 github.com/xeipuuv/gojsonschema v1.2.0 golang.org/x/crypto v0.26.0 diff --git a/test/new-e2e/go.sum b/test/new-e2e/go.sum index b8993893b924a..30a72e12faf32 100644 --- a/test/new-e2e/go.sum +++ b/test/new-e2e/go.sum @@ -448,8 +448,6 @@ github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 h1:lZUw3E0/J3roVtGQ+SCrUrg3ON6Ng github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= -github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec= -github.com/sethvargo/go-retry v0.2.4/go.mod h1:1afjQuvh7s4gflMObvjLPaWgluLLyhA1wmVZ6KLpICw= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= diff --git a/test/new-e2e/system-probe/connector/metric/metric.go b/test/new-e2e/system-probe/connector/metric/metric.go index 4c674154402cd..dc90a56bfe270 100644 --- a/test/new-e2e/system-probe/connector/metric/metric.go +++ b/test/new-e2e/system-probe/connector/metric/metric.go @@ -15,6 +15,7 @@ import ( "os" "github.com/DataDog/datadog-api-client-go/v2/api/datadog" + "github.com/DataDog/datadog-api-client-go/v2/api/datadogV1" "github.com/DataDog/datadog-api-client-go/v2/api/datadogV2" ) @@ -41,3 +42,27 @@ func SubmitExecutionMetric(metricBody datadogV2.MetricPayload) error { return nil } + +// SubmitExecutionEvent accepts events and sends it to Datadog. +func SubmitExecutionEvent(eventBody datadogV1.EventCreateRequest) error { + if _, ok := os.LookupEnv("DD_API_KEY"); !ok { + fmt.Fprintf(os.Stderr, "skipping sending metric because DD_API_KEY not present") + return nil + } + + ctx := datadog.NewDefaultContext(context.Background()) + configuration := datadog.NewConfiguration() + apiClient := datadog.NewAPIClient(configuration) + api := datadogV1.NewEventsApi(apiClient) + resp, r, err := api.CreateEvent(ctx, eventBody) + + if err != nil { + fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", r) + return fmt.Errorf("error when calling `MetricsApi.SubmitMetrics`: %v", err) + } + + responseContent, _ := json.MarshalIndent(resp, "", " ") + fmt.Fprintf(os.Stdout, "Response from `MetricsApi.SubmitMetrics`:\n%s\n", responseContent) + + return nil +} diff --git a/test/new-e2e/system-probe/errors.go b/test/new-e2e/system-probe/errors.go index 2fb1b57595e9a..91cb123602fe2 100644 --- a/test/new-e2e/system-probe/errors.go +++ b/test/new-e2e/system-probe/errors.go @@ -13,13 +13,16 @@ import ( "log" "os" "path" + "regexp" "strings" "time" "github.com/DataDog/datadog-api-client-go/api/v1/datadog" "github.com/DataDog/datadog-api-client-go/v2/api/datadogV2" - "github.com/sethvargo/go-retry" + "github.com/pulumi/pulumi/sdk/v3/go/auto" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/infra" "github.com/DataDog/datadog-agent/test/new-e2e/system-probe/connector/metric" ) @@ -27,6 +30,7 @@ const ( // bitmap of actions to take for an error retryStack = 0x1 // 0b01 emitMetric = 0x2 // 0b10 + changeAZ = 0x4 // 0b100 aria2cMissingStatusErrorStr = "error: wait: remote command exited without exit status or exit signal: running \" aria2c" @@ -66,7 +70,7 @@ var handledErrorsLs = []handledError{ errorType: insufficientCapacityError, errorString: "InsufficientInstanceCapacity", metric: "insufficient-capacity", - action: retryStack | emitMetric, + action: retryStack | emitMetric | changeAZ, }, // Retry when ssh thinks aria2c exited without status. This may happen // due to network connectivity issues if ssh keepalive mecahnism fails. @@ -80,7 +84,7 @@ var handledErrorsLs = []handledError{ errorType: ec2StateChangeTimeoutError, errorString: "timeout while waiting for state to become 'running'", metric: "ec2-timeout-state-change", - action: retryStack | emitMetric, + action: retryStack | emitMetric | changeAZ, }, { errorType: ioTimeout, @@ -102,6 +106,15 @@ var handledErrorsLs = []handledError{ }, } +type retryHandler struct { + currentAZ int + maxRetries int + retryDelay time.Duration + allErrors []error + configMap runner.ConfigMap + infraEnv string +} + func errorMetric(errType string) datadogV2.MetricPayload { tags := []string{ fmt.Sprintf("error:%s", errType), @@ -123,15 +136,29 @@ func errorMetric(errType string) datadogV2.MetricPayload { } } -func handleScenarioFailure(err error, changeRetryState func(handledError)) error { +func (r *retryHandler) HandleError(err error, retryCount int) (infra.RetryType, []infra.GetStackOption) { + r.allErrors = append(r.allErrors, err) + + if retryCount > r.maxRetries { + log.Printf("environment setup error: %v. Maximum number of retries (%d) exceeded, failing setup.\n", err, r.maxRetries) + return infra.NoRetry, nil + } + + var newOpts []infra.GetStackOption + retry := infra.NoRetry errStr := err.Error() for _, e := range handledErrorsLs { if !strings.Contains(errStr, e.errorString) { continue } - // modify any state within the retry block - changeRetryState(e) + if (e.action & changeAZ) != 0 { + r.currentAZ++ + if az := getAvailabilityZone(r.infraEnv, r.currentAZ); az != "" { + r.configMap["ddinfra:aws/defaultSubnets"] = auto.ConfigValue{Value: az} + newOpts = append(newOpts, infra.WithConfigMap(r.configMap)) + } + } if (e.action & emitMetric) != 0 { submitError := metric.SubmitExecutionMetric(errorMetric(e.metric)) @@ -145,15 +172,19 @@ func handleScenarioFailure(err error, changeRetryState func(handledError)) error } if (e.action & retryStack) != 0 { - log.Printf("environment setup error: %v. Retrying stack.\n", err) - return retry.RetryableError(err) + retry = infra.ReUp } break } - log.Printf("environment setup error: %v. Failing stack.\n", err) - return err + log.Printf("environment setup error. Retry strategy: %s.\n", retry) + if retry != infra.NoRetry { + log.Printf("waiting %s before retrying...\n", r.retryDelay) + time.Sleep(r.retryDelay) + } + + return retry, newOpts } func storeErrorReasonForCITags(reason string) error { @@ -177,3 +208,62 @@ func storeNumberOfRetriesForCITags(retries int) error { _, err = f.WriteString(fmt.Sprintf("%d", retries)) return err } + +type pulumiError struct { + command string + arch string + vmCommand string + errorMessage string + vmName string +} + +var commandRegex = regexp.MustCompile(`^ command:remote:Command \(([^\)]+)\):$`) +var archRegex = regexp.MustCompile(`distro_(arm64|x86_64)`) +var vmCmdRegex = regexp.MustCompile(`-cmd-.+-ddvm-\d+-\d+-(.+)$`) +var vmNameRegex = regexp.MustCompile(`-([^-]+)-distro`) + +func parsePulumiDiagnostics(message string) *pulumiError { + var perr pulumiError + lines := strings.Split(message, "\n") + inDiagnostics := false + for _, line := range lines { + if !inDiagnostics { + if line == "Diagnostics:" { + // skip until next line + inDiagnostics = true + } + continue + } + + if len(line) == 0 || line[0] != ' ' { + // Finished reading diagnostics, break out of the loop + return &perr + } + + if perr.command == "" { + commandMatch := commandRegex.FindStringSubmatch(line) + if commandMatch != nil { + perr.command = commandMatch[1] + + archMatch := archRegex.FindStringSubmatch(perr.command) + if archMatch != nil { + perr.arch = archMatch[1] + } + + vmCmdMatch := vmCmdRegex.FindStringSubmatch(perr.command) + if vmCmdMatch != nil { + perr.vmCommand = vmCmdMatch[1] + } + + vmNameMatch := vmNameRegex.FindStringSubmatch(perr.command) + if vmNameMatch != nil { + perr.vmName = vmNameMatch[1] + } + } + } else { + perr.errorMessage += strings.Trim(line, " ") + "\n" + } + } + + return nil +} diff --git a/test/new-e2e/system-probe/errors_test.go b/test/new-e2e/system-probe/errors_test.go new file mode 100644 index 0000000000000..eeee52d43fb4f --- /dev/null +++ b/test/new-e2e/system-probe/errors_test.go @@ -0,0 +1,105 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build !windows + +package systemprobe + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +const output = ` +Updating (gjulian-guillermo.julian-e2e-report-all-errors-ddvm): + + pulumi:pulumi:Stack e2elocal-gjulian-guillermo.julian-e2e-report-all-errors-ddvm running + pulumi:providers:random random +@ updating.... + dd:Host aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64 + pulumi:providers:aws aws + pulumi:providers:command command + random:index:RandomShuffle aws-rnd-subnet + random:index:RandomString random-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-random-string-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192 + command:local:Command local-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-gen-libvirt-sshkey + aws:ec2:Instance aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64 + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-wait-cloud-init + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-write-ssh-key + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-ubuntu_22.04-distro_arm64-arm64-write-vol-xml + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-add-microvm-ssh-dir + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-allow-ssh-env + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-distro_arm64-download-with-curl + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-docker-arm64.qcow2-distro_arm64-arm64-write-vol-xml + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-add-microvm-ssh-config + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-reload sshd + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-docker-arm64.qcow2-distro_arm64-arm64-extract-base-volume-package + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-ubuntu_22.04-distro_arm64-arm64-extract-base-volume-package + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-read-microvm-ssh-key + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-write-pool-xml + pulumi:providers:libvirt gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-libvirt-provider + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-define-libvirt-pool + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-build-libvirt-pool + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-start-libvirt-pool + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-refresh-libvirt-pool + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-docker-arm64.qcow2-distro_arm64-arm64-build-libvirt-basevolume + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-ubuntu_22.04-distro_arm64-arm64-build-libvirt-basevolume +@ updating..... + libvirt:index:Volume gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-ubuntu_22.04-distro_arm64-arm64-overlay-ubuntu_22.04-4-8192 + libvirt:index:Volume gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-docker-arm64.qcow2-distro_arm64-arm64-overlay-ubuntu_22.04-4-8192 + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-create-nvram + libvirt:index:Network gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-network-distro_arm64-arm64 + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-allow-nfs-ports-tcp-100.1.0.0/24 + libvirt:index:Domain arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192 + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-allow-nfs-ports-udp-100.1.0.0/24 + + command:remote:Command remote-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-conn-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-cmd-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-mount-disk-dev-vdb creating (0s) +@ updating..... + + command:remote:Command remote-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-conn-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-cmd-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-mount-disk-dev-vdb creating (2s) bash: line 1: caca: command not found + + command:remote:Command remote-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-conn-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-cmd-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-mount-disk-dev-vdb creating (2s) error: Process exited with status 127: running " caca /mnt/docker && mount /dev/vdb /mnt/docker": + + command:remote:Command remote-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-conn-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-cmd-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-mount-disk-dev-vdb **creating failed** error: Process exited with status 127: running " caca /mnt/docker && mount /dev/vdb /mnt/docker": + pulumi:pulumi:Stack e2elocal-gjulian-guillermo.julian-e2e-report-all-errors-ddvm running error: update failed + pulumi:pulumi:Stack e2elocal-gjulian-guillermo.julian-e2e-report-all-errors-ddvm **failed** 1 error +Diagnostics: + command:remote:Command (remote-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-conn-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-cmd-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-mount-disk-dev-vdb): + error: Process exited with status 127: running " nocommand /mnt/docker && mount /dev/vdb /mnt/docker": + bash: line 1: nocommand: command not found + + pulumi:pulumi:Stack (e2elocal-gjulian-guillermo.julian-e2e-report-all-errors-ddvm): + error: update failed + +Outputs: + kmt-stack: (json) { + arm64: { + ip : "172.29.176.14" + microvms: [ + [0]: { + id : "arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192" + ip : "100.1.0.2" + ssh-key-path: "/home/kernel-version-testing/ddvm_rsa" + tag : "ubuntu_22.04" + vmset-tags : [ + [0]: "distro_arm64" + ] + } + ] + } + } + + +Resources: + 36 unchanged + +Duration: 6s +` + +func TestParseDiagnostics(t *testing.T) { + result := parsePulumiDiagnostics(output) + require.NotNil(t, result) + require.Equal(t, "remote-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-conn-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-cmd-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-mount-disk-dev-vdb", result.command) + require.Equal(t, "arm64", result.arch) + require.Equal(t, "mount-disk-dev-vdb", result.vmCommand) + require.Equal(t, "error: Process exited with status 127: running \" nocommand /mnt/docker && mount /dev/vdb /mnt/docker\":\nbash: line 1: nocommand: command not found\n", result.errorMessage) + require.Equal(t, "ubuntu_22.04", result.vmName) +} diff --git a/test/new-e2e/system-probe/system-probe-test-env.go b/test/new-e2e/system-probe/system-probe-test-env.go index 5a7a00321707d..516f47d76244a 100644 --- a/test/new-e2e/system-probe/system-probe-test-env.go +++ b/test/new-e2e/system-probe/system-probe-test-env.go @@ -21,12 +21,13 @@ import ( "syscall" "time" + "github.com/DataDog/datadog-api-client-go/v2/api/datadogV1" "github.com/DataDog/test-infra-definitions/scenarios/aws/microVMs/microvms" - "github.com/sethvargo/go-retry" "golang.org/x/term" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/infra" + "github.com/DataDog/datadog-agent/test/new-e2e/system-probe/connector/metric" "github.com/pulumi/pulumi/sdk/v3/go/auto" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" @@ -176,6 +177,10 @@ func NewTestEnv(name, x86InstanceType, armInstanceType string, opts *EnvOpts) (* return nil, fmt.Errorf("No API Key for datadog-agent provided") } + ciJob := getEnv("CI_JOB_ID", "") + ciPipeline := getEnv("CI_PIPELINE_ID", "") + ciBranch := getEnv("CI_COMMIT_REF_NAME", "") + var customAMILocalWorkingDir string // Remote AMI working dir is always on Linux @@ -239,60 +244,43 @@ func NewTestEnv(name, x86InstanceType, armInstanceType string, opts *EnvOpts) (* config["ddinfra:extraResourcesTags"] = auto.ConfigValue{Value: envVars} } - var upResult auto.UpResult - var pulumiStack *auto.Stack - ctx := context.Background() - currentAZ := 0 // PrimaryAZ - b := retry.NewConstant(3 * time.Second) - // Retry 4 times. This allows us to cycle through all AZs, and handle libvirt - // connection issues in the worst case. - b = retry.WithMaxRetries(4, b) - numRetries := 0 - retryErr := retry.Do(ctx, b, func(_ context.Context) error { - if az := getAvailabilityZone(opts.InfraEnv, currentAZ); az != "" { - config["ddinfra:aws/defaultSubnets"] = auto.ConfigValue{Value: az} - } - - pulumiStack, upResult, err = stackManager.GetStackNoDeleteOnFailure( - systemProbeTestEnv.context, - systemProbeTestEnv.name, - func(ctx *pulumi.Context) error { - if err := microvms.Run(ctx); err != nil { - return fmt.Errorf("setup micro-vms in remote instance: %w", err) - } - return nil - }, - infra.WithFailOnMissing(opts.FailOnMissing), - infra.WithConfigMap(config), - ) + retryHandler := retryHandler{ + currentAZ: 0, + maxRetries: 4, + retryDelay: 3 * time.Second, + configMap: config, + infraEnv: opts.InfraEnv, + } - if err != nil { - numRetries++ - return handleScenarioFailure(err, func(possibleError handledError) { - // handle the following errors by trying in a different availability zone - if possibleError.errorType == insufficientCapacityError || - possibleError.errorType == ec2StateChangeTimeoutError { - currentAZ++ - } - }) - } + stackManager.RetryStrategy = retryHandler.HandleError + pulumiStack, upResult, pulumiErr := stackManager.GetStackNoDeleteOnFailure( + systemProbeTestEnv.context, + systemProbeTestEnv.name, + func(ctx *pulumi.Context) error { + if err := microvms.Run(ctx); err != nil { + return fmt.Errorf("setup micro-vms in remote instance: %w", err) + } + return nil + }, + infra.WithFailOnMissing(opts.FailOnMissing), + infra.WithConfigMap(config), + ) + if pulumiErr != nil { // Mark the test as successful, just in case we succeeded after a retry - err = storeErrorReasonForCITags("") + err := storeErrorReasonForCITags("") if err != nil { log.Printf("failed to store error reason for CI tags: %v", err) } + } - return nil - }) - - err = storeNumberOfRetriesForCITags(numRetries) + err = storeNumberOfRetriesForCITags(len(retryHandler.allErrors)) if err != nil { log.Printf("failed to store number of retries for CI tags: %v", err) } outputs := upResult.Outputs - if retryErr != nil { + if pulumiErr != nil { // pulumi does not populate `UpResult` with the stack output if the // update process failed. In this case we must manually fetch the outputs. outputs, err = pulumiStack.Outputs(context.Background()) @@ -305,8 +293,47 @@ func NewTestEnv(name, x86InstanceType, armInstanceType string, opts *EnvOpts) (* if err != nil { err = fmt.Errorf("failed to write stack output to file: %w", err) } - if retryErr != nil { - return nil, errors.Join(fmt.Errorf("failed to create stack: %w", retryErr), err) + if pulumiErr != nil { + for i, retryErr := range retryHandler.allErrors { + pulumiError := parsePulumiDiagnostics(retryErr.Error()) + if pulumiError != nil { + log.Printf("pulumi error on retry %d:\n\tcommand: %s\n\tvm-command: %s\n\terror message:\n%s\n\n", i, pulumiError.command, pulumiError.vmCommand, pulumiError.errorMessage) + + // Send the error as a DD event so we can track it + event := datadogV1.EventCreateRequest{ + Title: "[KMT] Environment setup error", + Text: pulumiError.errorMessage, + Tags: []string{ + "test:kmt", + "source:pulumi", + "repository:datadog/datadog-agent", + "team:ebpf-platform", + fmt.Sprintf("vm.name:%s", pulumiError.vmName), + fmt.Sprintf("vm.arch:%s", pulumiError.arch), + fmt.Sprintf("vm.command:%s", pulumiError.vmCommand), + }, + } + + if ciJob != "" { + event.Tags = append(event.Tags, fmt.Sprintf("ci.job.id:%s", ciJob)) + } + + if ciPipeline != "" { + event.Tags = append(event.Tags, fmt.Sprintf("ci.pipeline.id:%s", ciPipeline)) + } + + if ciBranch != "" { + event.Tags = append(event.Tags, fmt.Sprintf("ci.branch:%s", ciBranch)) + } + + if err = metric.SubmitExecutionEvent(event); err != nil { + log.Printf("failed to submit environment setup error event: %v", err) + } + } else { + log.Printf("unknown/couldn't parse error on retry %d", i) + } + } + return nil, errors.Join(fmt.Errorf("failed to create stack: %w", pulumiErr), err) } systemProbeTestEnv.StackOutput = upResult From aa350c1d12e57942b024ea2fc0448dabaa382a80 Mon Sep 17 00:00:00 2001 From: David Ortiz Date: Fri, 6 Sep 2024 10:45:15 +0200 Subject: [PATCH 040/128] [clusteragent/admission/mutate] Add option to create socket volumes (#29076) --- .../mutate/agent_sidecar/providers.go | 48 +++---- .../mutate/agent_sidecar/providers_test.go | 133 +++++++++++++++++- .../admission/mutate/config/config.go | 71 ++++++++-- .../admission/mutate/config/config_test.go | 69 ++++++++- pkg/config/config_template.yaml | 9 ++ pkg/config/setup/config.go | 1 + ...ols-with-type-socket-dd57e8c0d3bb2c51.yaml | 17 +++ 7 files changed, 308 insertions(+), 40 deletions(-) create mode 100644 releasenotes-dca/notes/admin-controller-vols-with-type-socket-dd57e8c0d3bb2c51.yaml diff --git a/pkg/clusteragent/admission/mutate/agent_sidecar/providers.go b/pkg/clusteragent/admission/mutate/agent_sidecar/providers.go index 8e8d424e17eef..d05d123a852f2 100644 --- a/pkg/clusteragent/admission/mutate/agent_sidecar/providers.go +++ b/pkg/clusteragent/admission/mutate/agent_sidecar/providers.go @@ -9,6 +9,7 @@ package agentsidecar import ( "fmt" + "slices" corev1 "k8s.io/api/core/v1" @@ -33,6 +34,12 @@ const dogstatsdSocket = socketDir + "/dsd.socket" // webhook to distinguish them easily. const ddSocketsVolumeName = "ddsockets" +var volumeNamesInjectedByConfigWebhook = []string{ + configWebhook.DatadogVolumeName, + configWebhook.DogstatsdSocketVolumeName, + configWebhook.TraceAgentSocketVolumeName, +} + // providerIsSupported indicates whether the provider is supported by agent sidecar injection func providerIsSupported(provider string) bool { switch provider { @@ -85,10 +92,7 @@ func applyFargateOverrides(pod *corev1.Pod) (bool, error) { return false, fmt.Errorf("can't apply profile overrides to nil pod") } - mutated := false - - deleted := deleteConfigWebhookVolumeAndMounts(pod) - mutated = mutated || deleted + mutated := deleteConfigWebhookVolumesAndMounts(pod) volume, volumeMount := socketsVolume() injected := common.InjectVolume(pod, volume, volumeMount) @@ -174,20 +178,19 @@ func socketsVolume() (corev1.Volume, corev1.VolumeMount) { return volume, volumeMount } -// deleteConfigWebhookVolumeAndMounts deletes the volume and volumeMounts added +// deleteConfigWebhookVolumesAndMounts deletes the volume and volumeMounts added // by the config webhook. Returns a boolean that indicates if the pod was // mutated. -func deleteConfigWebhookVolumeAndMounts(pod *corev1.Pod) bool { - mutated := false - +func deleteConfigWebhookVolumesAndMounts(pod *corev1.Pod) bool { + originalNumberOfVolumes := len(pod.Spec.Volumes) // Delete the volume added by the config webhook - for i, vol := range pod.Spec.Volumes { - if vol.Name == configWebhook.DatadogVolumeName { - pod.Spec.Volumes = append(pod.Spec.Volumes[:i], pod.Spec.Volumes[i+1:]...) - mutated = true - break - } - } + pod.Spec.Volumes = slices.DeleteFunc( + pod.Spec.Volumes, + func(volume corev1.Volume) bool { + return slices.Contains(volumeNamesInjectedByConfigWebhook, volume.Name) + }, + ) + mutated := len(pod.Spec.Volumes) != originalNumberOfVolumes deleted := deleteConfigWebhookVolumeMounts(pod.Spec.Containers) mutated = mutated || deleted @@ -204,16 +207,11 @@ func deleteConfigWebhookVolumeMounts(containers []corev1.Container) bool { mutated := false for i, container := range containers { - for j, volMount := range container.VolumeMounts { - if volMount.Name == configWebhook.DatadogVolumeName { - containers[i].VolumeMounts = append( - containers[i].VolumeMounts[:j], - containers[i].VolumeMounts[j+1:]..., - ) - mutated = true - break - } - } + originalNumberOfVolMounts := len(container.VolumeMounts) + containers[i].VolumeMounts = slices.DeleteFunc(container.VolumeMounts, func(volMount corev1.VolumeMount) bool { + return slices.Contains(volumeNamesInjectedByConfigWebhook, volMount.Name) + }) + mutated = mutated || len(container.VolumeMounts) != originalNumberOfVolMounts } return mutated diff --git a/pkg/clusteragent/admission/mutate/agent_sidecar/providers_test.go b/pkg/clusteragent/admission/mutate/agent_sidecar/providers_test.go index ee8ceb1a544f5..236d9d3257031 100644 --- a/pkg/clusteragent/admission/mutate/agent_sidecar/providers_test.go +++ b/pkg/clusteragent/admission/mutate/agent_sidecar/providers_test.go @@ -59,7 +59,6 @@ func TestProviderIsSupported(t *testing.T) { func TestApplyProviderOverrides(t *testing.T) { mockConfig := configmock.New(t) - hostPathType := corev1.HostPathDirectoryOrCreate tests := []struct { name string @@ -170,7 +169,7 @@ func TestApplyProviderOverrides(t *testing.T) { { // This test checks that the volume and volume mounts set by the // config webhook are replaced by ones that works on Fargate. - name: "fargate provider - with volume set by the config webhook", + name: "fargate provider - with volume set by the config webhook (when the type is not socket)", provider: "fargate", basePod: &corev1.Pod{ Spec: corev1.PodSpec{ @@ -201,7 +200,7 @@ func TestApplyProviderOverrides(t *testing.T) { Name: "datadog", VolumeSource: corev1.VolumeSource{ HostPath: &corev1.HostPathVolumeSource{ - Type: &hostPathType, + Type: pointer.Ptr(corev1.HostPathDirectoryOrCreate), Path: "/var/run/datadog", }, }, @@ -276,6 +275,134 @@ func TestApplyProviderOverrides(t *testing.T) { expectError: false, expectMutated: true, }, + { + // Same as the previous test, but this time the injected volumes are + // of socket type. + name: "fargate provider - with volumes set by the config webhook (when the type is socket)", + provider: "fargate", + basePod: &corev1.Pod{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "app-container", + VolumeMounts: []corev1.VolumeMount{ + { + Name: "datadog-dogstatsd", + MountPath: "/var/run/datadog/dsd.socket", + ReadOnly: true, + }, + { + Name: "datadog-trace-agent", + MountPath: "/var/run/datadog/apm.socket", + ReadOnly: true, + }, + }, + }, + { + Name: agentSidecarContainerName, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "datadog-dogstatsd", + MountPath: "/var/run/datadog/dsd.socket", + ReadOnly: true, + }, + { + Name: "datadog-trace-agent", + MountPath: "/var/run/datadog/apm.socket", + ReadOnly: true, + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "datadog-dogstatsd", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/var/run/datadog/dsd.socket", + Type: pointer.Ptr(corev1.HostPathSocket), + }, + }, + }, + { + Name: "datadog-trace-agent", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/var/run/datadog/apm.socket", + Type: pointer.Ptr(corev1.HostPathSocket), + }, + }, + }, + }, + }, + }, + expectedPodAfterOverride: &corev1.Pod{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{ + mutatecommon.K8sAutoscalerSafeToEvictVolumesAnnotation: "ddsockets", + }, + }, + Spec: corev1.PodSpec{ + ShareProcessNamespace: pointer.Ptr(true), + Containers: []corev1.Container{ + { + Name: "app-container", + Env: []corev1.EnvVar{ + { + Name: "DD_TRACE_AGENT_URL", + Value: "unix:///var/run/datadog/apm.socket", + }, + { + Name: "DD_DOGSTATSD_URL", + Value: "unix:///var/run/datadog/dsd.socket", + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "ddsockets", + MountPath: "/var/run/datadog", + ReadOnly: false, + }, + }, + }, + { + Name: agentSidecarContainerName, + Env: []corev1.EnvVar{ + { + Name: "DD_EKS_FARGATE", + Value: "true", + }, + { + Name: "DD_APM_RECEIVER_SOCKET", + Value: "/var/run/datadog/apm.socket", + }, + { + Name: "DD_DOGSTATSD_SOCKET", + Value: "/var/run/datadog/dsd.socket", + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "ddsockets", + MountPath: "/var/run/datadog", + ReadOnly: false, + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "ddsockets", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + }, + }, + }, + expectError: false, + expectMutated: true, + }, { name: "unsupported provider", provider: "foo-provider", diff --git a/pkg/clusteragent/admission/mutate/config/config.go b/pkg/clusteragent/admission/mutate/config/config.go index 0d43bdfb3c229..8321f8f531e15 100644 --- a/pkg/clusteragent/admission/mutate/config/config.go +++ b/pkg/clusteragent/admission/mutate/config/config.go @@ -50,9 +50,15 @@ const ( socket = "socket" service = "service" - // DatadogVolumeName is the name of the volume used to mount the socket + // DatadogVolumeName is the name of the volume used to mount the sockets when the volume source is a directory DatadogVolumeName = "datadog" + // TraceAgentSocketVolumeName is the name of the volume used to mount the trace agent socket + TraceAgentSocketVolumeName = "datadog-trace-agent" + + // DogstatsdSocketVolumeName is the name of the volume used to mount the dogstatsd socket + DogstatsdSocketVolumeName = "datadog-dogstatsd" + webhookName = "agent_config" ) @@ -184,15 +190,10 @@ func (w *Webhook) inject(pod *corev1.Pod, _ string, _ dynamic.Interface) (bool, case service: injectedConfig = common.InjectEnv(pod, agentHostServiceEnvVar) case socket: - volume, volumeMount := buildVolume(DatadogVolumeName, config.Datadog().GetString("admission_controller.inject_config.socket_path"), true) - injectedVol := common.InjectVolume(pod, volume, volumeMount) - if injectedVol { - common.MarkVolumeAsSafeToEvictForAutoscaler(pod, DatadogVolumeName) - } - + injectedVolumes := injectSocketVolumes(pod) injectedEnv := common.InjectEnv(pod, traceURLSocketEnvVar) injectedEnv = common.InjectEnv(pod, dogstatsdURLSocketEnvVar) || injectedEnv - injectedConfig = injectedEnv || injectedVol + injectedConfig = injectedVolumes || injectedEnv default: log.Errorf("invalid injection mode %q", w.mode) return false, errors.New(metrics.InvalidInput) @@ -249,14 +250,13 @@ func injectExternalDataEnvVar(pod *corev1.Pod) (injected bool) { return } -func buildVolume(volumeName, path string, readOnly bool) (corev1.Volume, corev1.VolumeMount) { - pathType := corev1.HostPathDirectoryOrCreate +func buildVolume(volumeName, path string, hostpathType corev1.HostPathType, readOnly bool) (corev1.Volume, corev1.VolumeMount) { volume := corev1.Volume{ Name: volumeName, VolumeSource: corev1.VolumeSource{ HostPath: &corev1.HostPathVolumeSource{ Path: path, - Type: &pathType, + Type: &hostpathType, }, }, } @@ -269,3 +269,52 @@ func buildVolume(volumeName, path string, readOnly bool) (corev1.Volume, corev1. return volume, volumeMount } + +// injectSocketVolumes injects the volumes for the dogstatsd and trace agent +// sockets. +// +// The type of the volume injected can be either a directory or a socket +// depending on the configuration. They offer different trade-offs. Using a +// socket ensures no lost traces or dogstatsd metrics but can cause the pod to +// wait if the agent has issues that prevent it from creating the sockets. +// +// This function returns true if at least one volume was injected. +func injectSocketVolumes(pod *corev1.Pod) bool { + var injectedVolNames []string + + if config.Datadog().GetBool("admission_controller.inject_config.type_socket_volumes") { + volumes := map[string]string{ + DogstatsdSocketVolumeName: strings.TrimPrefix( + config.Datadog().GetString("admission_controller.inject_config.dogstatsd_socket"), "unix://", + ), + TraceAgentSocketVolumeName: strings.TrimPrefix( + config.Datadog().GetString("admission_controller.inject_config.trace_agent_socket"), "unix://", + ), + } + + for volumeName, volumePath := range volumes { + volume, volumeMount := buildVolume(volumeName, volumePath, corev1.HostPathSocket, true) + injectedVol := common.InjectVolume(pod, volume, volumeMount) + if injectedVol { + injectedVolNames = append(injectedVolNames, volumeName) + } + } + } else { + volume, volumeMount := buildVolume( + DatadogVolumeName, + config.Datadog().GetString("admission_controller.inject_config.socket_path"), + corev1.HostPathDirectoryOrCreate, + true, + ) + injectedVol := common.InjectVolume(pod, volume, volumeMount) + if injectedVol { + injectedVolNames = append(injectedVolNames, DatadogVolumeName) + } + } + + for _, volName := range injectedVolNames { + common.MarkVolumeAsSafeToEvictForAutoscaler(pod, volName) + } + + return len(injectedVolNames) > 0 +} diff --git a/pkg/clusteragent/admission/mutate/config/config_test.go b/pkg/clusteragent/admission/mutate/config/config_test.go index c8dd5437edf85..6321412ac1d0b 100644 --- a/pkg/clusteragent/admission/mutate/config/config_test.go +++ b/pkg/clusteragent/admission/mutate/config/config_test.go @@ -10,6 +10,7 @@ package config import ( "encoding/json" "os" + "strings" "testing" "github.com/stretchr/testify/assert" @@ -307,6 +308,7 @@ func TestInjectSocket(t *testing.T) { injected, err := webhook.inject(pod, "", nil) assert.Nil(t, err) assert.True(t, injected) + assert.Contains(t, pod.Spec.Containers[0].Env, mutatecommon.FakeEnvWithValue("DD_TRACE_AGENT_URL", "unix:///var/run/datadog/apm.socket")) assert.Contains(t, pod.Spec.Containers[0].Env, mutatecommon.FakeEnvWithValue("DD_DOGSTATSD_URL", "unix:///var/run/datadog/dsd.socket")) assert.Equal(t, pod.Spec.Containers[0].VolumeMounts[0].MountPath, "/var/run/datadog") @@ -318,6 +320,67 @@ func TestInjectSocket(t *testing.T) { assert.Equal(t, "datadog", pod.Annotations[mutatecommon.K8sAutoscalerSafeToEvictVolumesAnnotation]) } +func TestInjectSocket_VolumeTypeSocket(t *testing.T) { + pod := mutatecommon.FakePodWithContainer("foo-pod", corev1.Container{}) + pod = mutatecommon.WithLabels(pod, map[string]string{"admission.datadoghq.com/enabled": "true", "admission.datadoghq.com/config.mode": "socket"}) + wmeta := fxutil.Test[workloadmeta.Component]( + t, + core.MockBundle(), + workloadmetafxmock.MockModule(workloadmeta.NewParams()), + fx.Replace(config.MockParams{ + Overrides: map[string]interface{}{"admission_controller.inject_config.type_socket_volumes": true}, + }), + ) + webhook := NewWebhook(wmeta, autoinstrumentation.GetInjectionFilter()) + injected, err := webhook.inject(pod, "", nil) + assert.Nil(t, err) + assert.True(t, injected) + + assert.Contains(t, pod.Spec.Containers[0].Env, mutatecommon.FakeEnvWithValue("DD_TRACE_AGENT_URL", "unix:///var/run/datadog/apm.socket")) + assert.Contains(t, pod.Spec.Containers[0].Env, mutatecommon.FakeEnvWithValue("DD_DOGSTATSD_URL", "unix:///var/run/datadog/dsd.socket")) + + expectedVolumeMounts := []corev1.VolumeMount{ + { + Name: "datadog-dogstatsd", + MountPath: "/var/run/datadog/dsd.socket", + ReadOnly: true, + }, + { + Name: "datadog-trace-agent", + MountPath: "/var/run/datadog/apm.socket", + ReadOnly: true, + }, + } + assert.ElementsMatch(t, pod.Spec.Containers[0].VolumeMounts, expectedVolumeMounts) + + expectedVolumes := []corev1.Volume{ + { + Name: "datadog-dogstatsd", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/var/run/datadog/dsd.socket", + Type: pointer.Ptr(corev1.HostPathSocket), + }, + }, + }, + { + Name: "datadog-trace-agent", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/var/run/datadog/apm.socket", + Type: pointer.Ptr(corev1.HostPathSocket), + }, + }, + }, + } + assert.ElementsMatch(t, pod.Spec.Volumes, expectedVolumes) + + safeToEvictVolumes := strings.Split(pod.Annotations[mutatecommon.K8sAutoscalerSafeToEvictVolumesAnnotation], ",") + assert.Len(t, safeToEvictVolumes, 2) + assert.Contains(t, safeToEvictVolumes, "datadog-dogstatsd") + assert.Contains(t, safeToEvictVolumes, "datadog-trace-agent") +} + func TestInjectSocketWithConflictingVolumeAndInitContainer(t *testing.T) { pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -339,7 +402,11 @@ func TestInjectSocketWithConflictingVolumeAndInitContainer(t *testing.T) { VolumeMounts: []corev1.VolumeMount{ { Name: "foo", - MountPath: "/var/run/datadog", + MountPath: "/var/run/datadog/dsd.socket", + }, + { + Name: "bar", + MountPath: "/var/run/datadog/apm.socket", }, }, }, diff --git a/pkg/config/config_template.yaml b/pkg/config/config_template.yaml index e01a2e747831b..7b085ec603a1b 100644 --- a/pkg/config/config_template.yaml +++ b/pkg/config/config_template.yaml @@ -2987,6 +2987,15 @@ api_key: # # trace_agent_socket: unix:///var/run/datadog/apm.socket + ## @param type_socket_volumes - boolean - optional - default: false + ## @env DD_ADMISSION_CONTROLLER_INJECT_CONFIG_TYPE_SOCKET_VOLUMES - boolean - optional - default: false + ## When enabled, injected volumes are of type "Socket". This means that + ## injected pods will not start until the Agent creates the dogstatsd and + ## trace-agent sockets. This ensures no lost traces or dogstatsd metrics but + ## can cause the pod to wait if the agent has issues creating the sockets. + # + # type_socket_volumes: false + ## @param inject_tags - custom object - optional ## Tags injection parameters. # diff --git a/pkg/config/setup/config.go b/pkg/config/setup/config.go index 85df73f4c7eed..fd4555ba702ba 100644 --- a/pkg/config/setup/config.go +++ b/pkg/config/setup/config.go @@ -716,6 +716,7 @@ func InitConfig(config pkgconfigmodel.Config) { config.BindEnvAndSetDefault("admission_controller.inject_config.socket_path", "/var/run/datadog") config.BindEnvAndSetDefault("admission_controller.inject_config.trace_agent_socket", "unix:///var/run/datadog/apm.socket") config.BindEnvAndSetDefault("admission_controller.inject_config.dogstatsd_socket", "unix:///var/run/datadog/dsd.socket") + config.BindEnvAndSetDefault("admission_controller.inject_config.type_socket_volumes", false) config.BindEnvAndSetDefault("admission_controller.inject_tags.enabled", true) config.BindEnvAndSetDefault("admission_controller.inject_tags.endpoint", "/injecttags") config.BindEnvAndSetDefault("admission_controller.inject_tags.pod_owners_cache_validity", 10) // in minutes diff --git a/releasenotes-dca/notes/admin-controller-vols-with-type-socket-dd57e8c0d3bb2c51.yaml b/releasenotes-dca/notes/admin-controller-vols-with-type-socket-dd57e8c0d3bb2c51.yaml new file mode 100644 index 0000000000000..807f941327e69 --- /dev/null +++ b/releasenotes-dca/notes/admin-controller-vols-with-type-socket-dd57e8c0d3bb2c51.yaml @@ -0,0 +1,17 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + Added a new option for the Cluster Agent + ("admission_controller.inject_config.type_socket_volumes") to specify that + injected volumes should be of type "Socket". This option is disabled by + default. When set to true, injected pods will not start until the Agent + creates the DogstatsD and trace-agent sockets. This ensures no traces or + DogstatsD metrics are lost, but it can cause the pod to wait if the Agent + has issues creating the sockets. From 6b23fbe47c508ec9ecf826d18120210262b208c7 Mon Sep 17 00:00:00 2001 From: Kangyi LI Date: Fri, 6 Sep 2024 11:26:22 +0200 Subject: [PATCH 041/128] add service tag to container (#28660) --- cmd/process-agent/command/main_common.go | 5 ++++- .../tagger/taggerimpl/collectors/workloadmeta_extract.go | 4 ++++ comp/core/tagger/taggerimpl/collectors/workloadmeta_test.go | 2 ++ comp/core/tagger/tags/tags.go | 2 ++ 4 files changed, 12 insertions(+), 1 deletion(-) diff --git a/cmd/process-agent/command/main_common.go b/cmd/process-agent/command/main_common.go index 73daed01b341b..d9d9c208b8d59 100644 --- a/cmd/process-agent/command/main_common.go +++ b/cmd/process-agent/command/main_common.go @@ -174,7 +174,10 @@ func runApp(ctx context.Context, globalParams *GlobalParams) error { // Provide the corresponding tagger Params to configure the tagger fx.Provide(func(c config.Component) tagger.Params { - if c.GetBool("process_config.remote_tagger") { + if c.GetBool("process_config.remote_tagger") || + // If the agent is running in ECS or ECS Fargate and the ECS task collection is enabled, use the remote tagger + // as remote tagger can return more tags than the local tagger. + ((env.IsECS() || env.IsECSFargate()) && c.GetBool("ecs_task_collection_enabled")) { return tagger.NewNodeRemoteTaggerParams() } return tagger.NewTaggerParams() diff --git a/comp/core/tagger/taggerimpl/collectors/workloadmeta_extract.go b/comp/core/tagger/taggerimpl/collectors/workloadmeta_extract.go index d7753c7f495f2..9dfa487f002f4 100644 --- a/comp/core/tagger/taggerimpl/collectors/workloadmeta_extract.go +++ b/comp/core/tagger/taggerimpl/collectors/workloadmeta_extract.go @@ -462,6 +462,10 @@ func (c *WorkloadMetaCollector) handleECSTask(ev workloadmeta.Event) []*types.Ta addResourceTags(taskTags, task.Tags) } + if task.ServiceName != "" { + taskTags.AddLow(tags.EcsServiceName, strings.ToLower(task.ServiceName)) + } + tagInfos := make([]*types.TagInfo, 0, len(task.Containers)) for _, taskContainer := range task.Containers { container, err := c.store.GetContainer(taskContainer.ID) diff --git a/comp/core/tagger/taggerimpl/collectors/workloadmeta_test.go b/comp/core/tagger/taggerimpl/collectors/workloadmeta_test.go index 63c2a66adc4af..c2ab0bb1714d8 100644 --- a/comp/core/tagger/taggerimpl/collectors/workloadmeta_test.go +++ b/comp/core/tagger/taggerimpl/collectors/workloadmeta_test.go @@ -1426,6 +1426,7 @@ func TestHandleECSTask(t *testing.T) { Name: containerName, }, }, + ServiceName: "datadog-agent-service", }, expected: []*types.TagInfo{ { @@ -1444,6 +1445,7 @@ func TestHandleECSTask(t *testing.T) { "task_family:datadog-agent", "task_name:datadog-agent", "task_version:1", + "ecs_service:datadog-agent-service", }, StandardTags: []string{}, }, diff --git a/comp/core/tagger/tags/tags.go b/comp/core/tagger/tags/tags.go index 2307447fbe789..fa24321003010 100644 --- a/comp/core/tagger/tags/tags.go +++ b/comp/core/tagger/tags/tags.go @@ -108,6 +108,8 @@ const ( EcsContainerName = "ecs_container_name" // EcsClusterName is the tag for the ECS cluster name EcsClusterName = "ecs_cluster_name" + // EcsServiceName is the tag for the ECS service name + EcsServiceName = "ecs_service" // Language is the tag for the process language Language = "language" From 87ec563c84eabe5788abc8da96c03f1fb529daae Mon Sep 17 00:00:00 2001 From: Yoann Ghigoff Date: Fri, 6 Sep 2024 11:31:19 +0200 Subject: [PATCH 042/128] [CWS] Generate policy JSON schema (#28135) --- .../generators/schemas/policy/main.go | 70 +++ pkg/security/secl/go.mod | 4 + pkg/security/secl/go.sum | 10 + pkg/security/secl/rules/model.go | 98 ++--- pkg/security/secl/rules/policy_test.go | 188 ++++++++ pkg/security/tests/schemas/policy.schema.json | 407 ++++++++++++++++++ 6 files changed, 728 insertions(+), 49 deletions(-) create mode 100644 pkg/security/generators/schemas/policy/main.go create mode 100644 pkg/security/tests/schemas/policy.schema.json diff --git a/pkg/security/generators/schemas/policy/main.go b/pkg/security/generators/schemas/policy/main.go new file mode 100644 index 0000000000000..0dc6181eabead --- /dev/null +++ b/pkg/security/generators/schemas/policy/main.go @@ -0,0 +1,70 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:generate go run github.com/DataDog/datadog-agent/pkg/security/generators/schemas/policy -output ../../../tests/schemas/policy.schema.json + +// Package main holds main related files +package main + +import ( + "encoding/json" + "flag" + "os" + "reflect" + "time" + + "github.com/invopop/jsonschema" + + "github.com/DataDog/datadog-agent/pkg/security/secl/rules" +) + +func main() { + var output string + flag.StringVar(&output, "output", "", "output file") + flag.Parse() + + if output == "" { + panic("an output file argument is required") + } + + reflector := jsonschema.Reflector{ + ExpandedStruct: true, + Mapper: func(t reflect.Type) *jsonschema.Schema { + switch t { + case reflect.TypeOf(time.Duration(0)): + return &jsonschema.Schema{ + OneOf: []*jsonschema.Schema{ + { + Type: "string", + Format: "duration", + Description: "Duration in Go format (e.g. 1h30m, see https://pkg.go.dev/time#ParseDuration)", + }, + { + Type: "integer", + Description: "Duration in nanoseconds", + }, + }, + } + } + return nil + }, + } + + if err := reflector.AddGoComments("github.com/DataDog/datadog-agent/pkg/security/secl/rules/model.go", "../../../secl/rules"); err != nil { + panic(err) + } + + schema := reflector.Reflect(&rules.PolicyDef{}) + schema.ID = "https://github.com/DataDog/datadog-agent/tree/main/pkg/security/secl/rules" + + data, err := json.MarshalIndent(schema, "", " ") + if err != nil { + panic(err) + } + + if err := os.WriteFile(output, data, 0644); err != nil { + panic(err) + } +} diff --git a/pkg/security/secl/go.mod b/pkg/security/secl/go.mod index 22d7931b1557d..87b83d5e5f01c 100644 --- a/pkg/security/secl/go.mod +++ b/pkg/security/secl/go.mod @@ -16,12 +16,14 @@ require ( github.com/skydive-project/go-debouncer v1.0.0 github.com/spf13/cast v1.7.0 github.com/stretchr/testify v1.9.0 + github.com/xeipuuv/gojsonschema v1.2.0 golang.org/x/sys v0.24.0 golang.org/x/text v0.17.0 golang.org/x/tools v0.24.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 modernc.org/mathutil v1.6.0 + sigs.k8s.io/yaml v1.4.0 ) require ( @@ -35,6 +37,8 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/shopspring/decimal v1.4.0 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect golang.org/x/crypto v0.26.0 // indirect golang.org/x/mod v0.20.0 // indirect golang.org/x/sync v0.8.0 // indirect diff --git a/pkg/security/secl/go.sum b/pkg/security/secl/go.sum index d111f0dc627d3..4d9aec07ce4cb 100644 --- a/pkg/security/secl/go.sum +++ b/pkg/security/secl/go.sum @@ -18,6 +18,7 @@ github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHk github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -56,9 +57,16 @@ github.com/skydive-project/go-debouncer v1.0.0/go.mod h1:7pK+5HBlYCD8W2cXhvMRsMs github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= @@ -83,3 +91,5 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4= modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/pkg/security/secl/rules/model.go b/pkg/security/secl/rules/model.go index d347ec32a895f..30783522b272c 100644 --- a/pkg/security/secl/rules/model.go +++ b/pkg/security/secl/rules/model.go @@ -41,18 +41,18 @@ const ( // OverrideOptions defines combine options type OverrideOptions struct { - Fields []OverrideField `yaml:"fields"` + Fields []OverrideField `yaml:"fields" json:"fields" jsonschema:"enum=all,enum=expression,enum=actions,enum=every,enum=tags"` } // MacroDefinition holds the definition of a macro type MacroDefinition struct { - ID MacroID `yaml:"id"` - Expression string `yaml:"expression"` - Description string `yaml:"description"` - AgentVersionConstraint string `yaml:"agent_version"` - Filters []string `yaml:"filters"` - Values []string `yaml:"values"` - Combine CombinePolicy `yaml:"combine"` + ID MacroID `yaml:"id" json:"id"` + Expression string `yaml:"expression" json:"expression,omitempty" jsonschema:"oneof_required=MacroWithExpression"` + Description string `yaml:"description" json:"description,omitempty"` + AgentVersionConstraint string `yaml:"agent_version" json:"agent_version,omitempty"` + Filters []string `yaml:"filters" json:"filters,omitempty"` + Values []string `yaml:"values" json:"values,omitempty" jsonschema:"oneof_required=MacroWithValues"` + Combine CombinePolicy `yaml:"combine" json:"combine,omitempty" jsonschema:"enum=merge,enum=override"` } // RuleID represents the ID of a rule @@ -60,20 +60,20 @@ type RuleID = string // RuleDefinition holds the definition of a rule type RuleDefinition struct { - ID RuleID `yaml:"id"` - Version string `yaml:"version"` - Expression string `yaml:"expression"` - Description string `yaml:"description"` - Tags map[string]string `yaml:"tags"` - AgentVersionConstraint string `yaml:"agent_version"` - Filters []string `yaml:"filters"` - Disabled bool `yaml:"disabled"` - Combine CombinePolicy `yaml:"combine"` - OverrideOptions OverrideOptions `yaml:"override_options"` - Actions []*ActionDefinition `yaml:"actions"` - Every time.Duration `yaml:"every"` - Silent bool `yaml:"silent"` - GroupID string `yaml:"group_id"` + ID RuleID `yaml:"id" json:"id"` + Version string `yaml:"version" json:"version,omitempty"` + Expression string `yaml:"expression" json:"expression,omitempty"` + Description string `yaml:"description" json:"description,omitempty"` + Tags map[string]string `yaml:"tags" json:"tags,omitempty"` + AgentVersionConstraint string `yaml:"agent_version" json:"agent_version,omitempty"` + Filters []string `yaml:"filters" json:"filters,omitempty"` + Disabled bool `yaml:"disabled" json:"disabled,omitempty"` + Combine CombinePolicy `yaml:"combine" json:"combine,omitempty" jsonschema:"enum=override"` + OverrideOptions OverrideOptions `yaml:"override_options" json:"override_options,omitempty"` + Actions []*ActionDefinition `yaml:"actions" json:"actions,omitempty"` + Every time.Duration `yaml:"every" json:"every,omitempty"` + Silent bool `yaml:"silent" json:"silent,omitempty"` + GroupID string `yaml:"group_id" json:"group_id,omitempty"` } // GetTag returns the tag value associated with a tag key @@ -95,11 +95,11 @@ const ( // ActionDefinition describes a rule action section type ActionDefinition struct { - Filter *string `yaml:"filter"` - Set *SetDefinition `yaml:"set"` - Kill *KillDefinition `yaml:"kill"` - CoreDump *CoreDumpDefinition `yaml:"coredump"` - Hash *HashDefinition `yaml:"hash"` + Filter *string `yaml:"filter" json:"filter,omitempty"` + Set *SetDefinition `yaml:"set" json:"set,omitempty" jsonschema:"oneof_required=SetAction"` + Kill *KillDefinition `yaml:"kill" json:"kill,omitempty" jsonschema:"oneof_required=KillAction"` + CoreDump *CoreDumpDefinition `yaml:"coredump" json:"coredump,omitempty" jsonschema:"oneof_required=CoreDumpAction"` + Hash *HashDefinition `yaml:"hash" json:"hash,omitempty" jsonschema:"oneof_required=HashAction"` } // Scope describes the scope variables @@ -107,27 +107,27 @@ type Scope string // SetDefinition describes the 'set' section of a rule action type SetDefinition struct { - Name string `yaml:"name"` - Value interface{} `yaml:"value"` - Field string `yaml:"field"` - Append bool `yaml:"append"` - Scope Scope `yaml:"scope"` - Size int `yaml:"size"` - TTL time.Duration `yaml:"ttl"` + Name string `yaml:"name" json:"name"` + Value interface{} `yaml:"value" json:"value,omitempty" jsonschema:"oneof_required=SetWithValue"` + Field string `yaml:"field" json:"field,omitempty" jsonschema:"oneof_required=SetWithField"` + Append bool `yaml:"append" json:"append,omitempty"` + Scope Scope `yaml:"scope" json:"scope,omitempty" jsonschema:"enum=process,enum=container"` + Size int `yaml:"size" json:"size,omitempty"` + TTL time.Duration `yaml:"ttl" json:"ttl,omitempty"` } // KillDefinition describes the 'kill' section of a rule action type KillDefinition struct { - Signal string `yaml:"signal"` - Scope string `yaml:"scope"` + Signal string `yaml:"signal" json:"signal" jsonschema:"description=A valid signal name,example=SIGKILL,example=SIGTERM"` + Scope string `yaml:"scope" json:"scope,omitempty" jsonschema:"enum=process,enum=container"` } // CoreDumpDefinition describes the 'coredump' action type CoreDumpDefinition struct { - Process bool `yaml:"process"` - Mount bool `yaml:"mount"` - Dentry bool `yaml:"dentry"` - NoCompression bool `yaml:"no_compression"` + Process bool `yaml:"process" json:"process,omitempty" jsonschema:"anyof_required=CoreDumpWithProcess"` + Mount bool `yaml:"mount" json:"mount,omitempty" jsonschema:"anyof_required=CoreDumpWithMount"` + Dentry bool `yaml:"dentry" json:"dentry,omitempty" jsonschema:"anyof_required=CoreDumpWithDentry"` + NoCompression bool `yaml:"no_compression" json:"no_compression,omitempty"` } // HashDefinition describes the 'hash' section of a rule action @@ -135,21 +135,21 @@ type HashDefinition struct{} // OnDemandHookPoint represents a hook point definition type OnDemandHookPoint struct { - Name string `yaml:"name"` - IsSyscall bool `yaml:"syscall"` - Args []HookPointArg `yaml:"args"` + Name string `yaml:"name" json:"name"` + IsSyscall bool `yaml:"syscall" json:"syscall,omitempty"` + Args []HookPointArg `yaml:"args" json:"args,omitempty"` } // HookPointArg represents the definition of a hook point argument type HookPointArg struct { - N int `yaml:"n"` - Kind string `yaml:"kind"` + N int `yaml:"n" json:"n" jsonschema:"description=Zero-based argument index"` + Kind string `yaml:"kind" json:"kind" jsonschema:"enum=uint,enum=null-terminated-string"` } // PolicyDef represents a policy file definition type PolicyDef struct { - Version string `yaml:"version"` - Macros []*MacroDefinition `yaml:"macros"` - Rules []*RuleDefinition `yaml:"rules"` - OnDemandHookPoints []OnDemandHookPoint `yaml:"hooks"` + Version string `yaml:"version" json:"version"` + Macros []*MacroDefinition `yaml:"macros" json:"macros,omitempty"` + Rules []*RuleDefinition `yaml:"rules" json:"rules"` + OnDemandHookPoints []OnDemandHookPoint `yaml:"hooks" json:"hooks,omitempty"` } diff --git a/pkg/security/secl/rules/policy_test.go b/pkg/security/secl/rules/policy_test.go index 112001e8aeea3..286b0fc1036da 100644 --- a/pkg/security/secl/rules/policy_test.go +++ b/pkg/security/secl/rules/policy_test.go @@ -10,6 +10,7 @@ package rules import ( "fmt" + "net/http" "os" "path/filepath" "strings" @@ -18,11 +19,14 @@ import ( "time" "github.com/google/go-cmp/cmp" + "github.com/xeipuuv/gojsonschema" "github.com/Masterminds/semver/v3" "github.com/hashicorp/go-multierror" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "gopkg.in/yaml.v3" + yamlk8s "sigs.k8s.io/yaml" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/model" @@ -928,3 +932,187 @@ broken }) } } + +// go test -v github.com/DataDog/datadog-agent/pkg/security/secl/rules --run="TestPolicySchema" +func TestPolicySchema(t *testing.T) { + tests := []struct { + name string + policy string + schemaResultCb func(*testing.T, *gojsonschema.Result) + }{ + { + name: "valid", + policy: policyValid, + schemaResultCb: func(t *testing.T, result *gojsonschema.Result) { + if !assert.True(t, result.Valid(), "schema validation failed") { + for _, err := range result.Errors() { + t.Errorf("%s", err) + } + } + }, + }, + { + name: "missing required rule ID", + policy: policyWithMissingRequiredRuleID, + schemaResultCb: func(t *testing.T, result *gojsonschema.Result) { + require.False(t, result.Valid(), "schema validation should fail") + require.Len(t, result.Errors(), 1) + assert.Contains(t, result.Errors()[0].String(), "id is required") + }, + }, + { + name: "unknown field", + policy: policyWithUnknownField, + schemaResultCb: func(t *testing.T, result *gojsonschema.Result) { + require.False(t, result.Valid(), "schema validation should fail") + require.Len(t, result.Errors(), 1) + assert.Contains(t, result.Errors()[0].String(), "Additional property unknown_field is not allowed") + }, + }, + { + name: "invalid field type", + policy: policyWithInvalidFieldType, + schemaResultCb: func(t *testing.T, result *gojsonschema.Result) { + require.False(t, result.Valid(), "schema validation should fail") + require.Len(t, result.Errors(), 1) + assert.Contains(t, result.Errors()[0].String(), "Invalid type") + + }, + }, + { + name: "multiple actions", + policy: policyWithMultipleActions, + schemaResultCb: func(t *testing.T, result *gojsonschema.Result) { + require.False(t, result.Valid(), "schema validation should fail") + require.Len(t, result.Errors(), 1) + assert.Contains(t, result.Errors()[0].String(), "Must validate one and only one schema") + }, + }, + } + + fs := os.DirFS("../../../../pkg/security/tests/schemas") + schemaLoader := gojsonschema.NewReferenceLoaderFileSystem("file:///policy.schema.json", http.FS(fs)) + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + json, err := yamlk8s.YAMLToJSON([]byte(test.policy)) + require.NoErrorf(t, err, "failed to convert yaml to json: %v", err) + documentLoader := gojsonschema.NewBytesLoader(json) + result, err := gojsonschema.Validate(schemaLoader, documentLoader) + require.NoErrorf(t, err, "failed to validate schema: %v", err) + test.schemaResultCb(t, result) + }) + } +} + +const policyValid = ` +version: 1.2.3 +rules: + - id: basic + expression: exec.file.name == "foo" + - id: with_tags + description: Rule with tags + expression: exec.file.name == "foo" + tags: + tagA: a + tagB: b + - id: disabled + description: Disabled rule + expression: exec.file.name == "foo" + disabled: true + - id: with_tags + description: Rule with combine + expression: exec.file.name == "bar" + combine: override + override_options: + fields: + - expression + - id: with_filters + description: Rule with a filter and agent_version field + expression: exec.file.name == "foo" + agent_version: ">= 7.38" + filters: + - os == "linux" + - id: with_every_silent_group_id + description: Rule with a silent/every/group_id field + expression: exec.file.name == "foo" + silent: true + every: 10s + group_id: "baz_group" + - id: with_set_action_with_field + description: Rule with a set action using an event field + expression: exec.file.name == "foo" + actions: + - set: + name: process_names + field: process.file.name + append: true + size: 10 + ttl: 10s + - id: with_set_action_with_value + description: Rule with a set action using a value + expression: exec.file.name == "foo" + actions: + - set: + name: global_var_set + value: true + - id: with_set_action_use + description: Rule using a variable set by a previous action + expression: open.file.path == "/tmp/bar" && ${global_var_set} + - id: with_kill_action + description: Rule with a kill action + expression: exec.file.name == "foo" + actions: + - kill: + signal: SIGKILL + scope: process + - id: with_coredump_action + description: Rule with a coredump action + expression: exec.file.name == "foo" + actions: + - coredump: + process: true + dentry: true + mount: true + no_compression: true + - id: with_hash_action + description: Rule with a hash action + expression: exec.file.name == "foo" + actions: + - hash: {} +` +const policyWithMissingRequiredRuleID = ` +version: 1.2.3 +rules: + - description: Rule with missing ID + expression: exec.file.name == "foo" +` + +const policyWithUnknownField = ` +version: 1.2.3 +rules: + - id: rule with unknown field + expression: exec.file.name == "foo" + unknown_field: "bar" +` + +const policyWithInvalidFieldType = ` +version: 1.2.3 +rules: + - id: 2 + expression: exec.file.name == "foo" +` + +const policyWithMultipleActions = ` +version: 1.2.3 +rules: + - id: rule with missing action + expression: exec.file.name == "foo" + actions: + - set: + name: global_var_set + value: true + kill: + signal: SIGKILL + scope: process +` diff --git a/pkg/security/tests/schemas/policy.schema.json b/pkg/security/tests/schemas/policy.schema.json new file mode 100644 index 0000000000000..bb300a7571a98 --- /dev/null +++ b/pkg/security/tests/schemas/policy.schema.json @@ -0,0 +1,407 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://github.com/DataDog/datadog-agent/tree/main/pkg/security/secl/rules", + "$defs": { + "ActionDefinition": { + "oneOf": [ + { + "required": [ + "set" + ], + "title": "SetAction" + }, + { + "required": [ + "kill" + ], + "title": "KillAction" + }, + { + "required": [ + "coredump" + ], + "title": "CoreDumpAction" + }, + { + "required": [ + "hash" + ], + "title": "HashAction" + } + ], + "properties": { + "filter": { + "type": "string" + }, + "set": { + "$ref": "#/$defs/SetDefinition" + }, + "kill": { + "$ref": "#/$defs/KillDefinition" + }, + "coredump": { + "$ref": "#/$defs/CoreDumpDefinition" + }, + "hash": { + "$ref": "#/$defs/HashDefinition" + } + }, + "additionalProperties": false, + "type": "object", + "description": "ActionDefinition describes a rule action section" + }, + "CoreDumpDefinition": { + "anyOf": [ + { + "required": [ + "process" + ], + "title": "CoreDumpWithProcess" + }, + { + "required": [ + "mount" + ], + "title": "CoreDumpWithMount" + }, + { + "required": [ + "dentry" + ], + "title": "CoreDumpWithDentry" + } + ], + "properties": { + "process": { + "type": "boolean" + }, + "mount": { + "type": "boolean" + }, + "dentry": { + "type": "boolean" + }, + "no_compression": { + "type": "boolean" + } + }, + "additionalProperties": false, + "type": "object", + "description": "CoreDumpDefinition describes the 'coredump' action" + }, + "HashDefinition": { + "properties": {}, + "additionalProperties": false, + "type": "object", + "description": "HashDefinition describes the 'hash' section of a rule action" + }, + "HookPointArg": { + "properties": { + "n": { + "type": "integer", + "description": "Zero-based argument index" + }, + "kind": { + "type": "string", + "enum": [ + "uint", + "null-terminated-string" + ] + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "n", + "kind" + ], + "description": "HookPointArg represents the definition of a hook point argument" + }, + "KillDefinition": { + "properties": { + "signal": { + "type": "string", + "description": "A valid signal name", + "examples": [ + "SIGKILL", + "SIGTERM" + ] + }, + "scope": { + "type": "string", + "enum": [ + "process", + "container" + ] + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "signal" + ], + "description": "KillDefinition describes the 'kill' section of a rule action" + }, + "MacroDefinition": { + "oneOf": [ + { + "required": [ + "expression" + ], + "title": "MacroWithExpression" + }, + { + "required": [ + "values" + ], + "title": "MacroWithValues" + } + ], + "properties": { + "id": { + "type": "string" + }, + "expression": { + "type": "string" + }, + "description": { + "type": "string" + }, + "agent_version": { + "type": "string" + }, + "filters": { + "items": { + "type": "string" + }, + "type": "array" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + }, + "combine": { + "type": "string", + "enum": [ + "merge", + "override" + ] + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "id" + ], + "description": "MacroDefinition holds the definition of a macro" + }, + "OnDemandHookPoint": { + "properties": { + "name": { + "type": "string" + }, + "syscall": { + "type": "boolean" + }, + "args": { + "items": { + "$ref": "#/$defs/HookPointArg" + }, + "type": "array" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "name" + ], + "description": "OnDemandHookPoint represents a hook point definition" + }, + "OverrideOptions": { + "properties": { + "fields": { + "items": { + "type": "string", + "enum": [ + "all", + "expression", + "actions", + "every", + "tags" + ] + }, + "type": "array" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "fields" + ], + "description": "OverrideOptions defines combine options" + }, + "RuleDefinition": { + "properties": { + "id": { + "type": "string" + }, + "version": { + "type": "string" + }, + "expression": { + "type": "string" + }, + "description": { + "type": "string" + }, + "tags": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "agent_version": { + "type": "string" + }, + "filters": { + "items": { + "type": "string" + }, + "type": "array" + }, + "disabled": { + "type": "boolean" + }, + "combine": { + "type": "string", + "enum": [ + "override" + ] + }, + "override_options": { + "$ref": "#/$defs/OverrideOptions" + }, + "actions": { + "items": { + "$ref": "#/$defs/ActionDefinition" + }, + "type": "array" + }, + "every": { + "oneOf": [ + { + "type": "string", + "format": "duration", + "description": "Duration in Go format (e.g. 1h30m, see https://pkg.go.dev/time#ParseDuration)" + }, + { + "type": "integer", + "description": "Duration in nanoseconds" + } + ] + }, + "silent": { + "type": "boolean" + }, + "group_id": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "id" + ], + "description": "RuleDefinition holds the definition of a rule" + }, + "SetDefinition": { + "oneOf": [ + { + "required": [ + "value" + ], + "title": "SetWithValue" + }, + { + "required": [ + "field" + ], + "title": "SetWithField" + } + ], + "properties": { + "name": { + "type": "string" + }, + "value": true, + "field": { + "type": "string" + }, + "append": { + "type": "boolean" + }, + "scope": { + "type": "string", + "enum": [ + "process", + "container" + ] + }, + "size": { + "type": "integer" + }, + "ttl": { + "oneOf": [ + { + "type": "string", + "format": "duration", + "description": "Duration in Go format (e.g. 1h30m, see https://pkg.go.dev/time#ParseDuration)" + }, + { + "type": "integer", + "description": "Duration in nanoseconds" + } + ] + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "name" + ], + "description": "SetDefinition describes the 'set' section of a rule action" + } + }, + "properties": { + "version": { + "type": "string" + }, + "macros": { + "items": { + "$ref": "#/$defs/MacroDefinition" + }, + "type": "array" + }, + "rules": { + "items": { + "$ref": "#/$defs/RuleDefinition" + }, + "type": "array" + }, + "hooks": { + "items": { + "$ref": "#/$defs/OnDemandHookPoint" + }, + "type": "array" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "version", + "rules" + ], + "description": "PolicyDef represents a policy file definition" +} \ No newline at end of file From 60db53447d9a23d2fc955db1c1f57790a2a27664 Mon Sep 17 00:00:00 2001 From: Paul Cacheux Date: Fri, 6 Sep 2024 11:40:40 +0200 Subject: [PATCH 043/128] [CWS] bump security agent policies to v0.58.0 (#29098) --- release.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/release.json b/release.json index 6d80d1d5fccc1..43d540aa6b09e 100644 --- a/release.json +++ b/release.json @@ -49,7 +49,7 @@ "OMNIBUS_RUBY_VERSION": "7.56.0-rc.1", "JMXFETCH_VERSION": "0.49.3", "JMXFETCH_HASH": "258085a94d529a6bdf914db36dd50faf6fde2cebc44b1f54a60eb209a5d8917c", - "SECURITY_AGENT_POLICIES_VERSION": "v0.57.0", + "SECURITY_AGENT_POLICIES_VERSION": "v0.58.0", "MACOS_BUILD_VERSION": "6.56.0-rc.3", "WINDOWS_DDNPM_DRIVER": "release-signed", "WINDOWS_DDNPM_VERSION": "2.7.0", @@ -64,7 +64,7 @@ "OMNIBUS_RUBY_VERSION": "7.56.0-rc.1", "JMXFETCH_VERSION": "0.49.3", "JMXFETCH_HASH": "258085a94d529a6bdf914db36dd50faf6fde2cebc44b1f54a60eb209a5d8917c", - "SECURITY_AGENT_POLICIES_VERSION": "v0.57.0", + "SECURITY_AGENT_POLICIES_VERSION": "v0.58.0", "MACOS_BUILD_VERSION": "7.56.0-rc.3", "WINDOWS_DDNPM_DRIVER": "release-signed", "WINDOWS_DDNPM_VERSION": "2.7.0", From 956ce5b07bbb18c889517ce39d093d94c4ac2407 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Guillermo=20Juli=C3=A1n?= Date: Fri, 6 Sep 2024 11:42:21 +0200 Subject: [PATCH 044/128] [EBPF-557] Force KMT CI to run when ebpf jobs are modified (#29040) --- .gitlab-ci.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index c50880e298007..a87ad8e0c6eaa 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -698,6 +698,7 @@ workflow: - .gitlab/functional_test/security_agent.yml - .gitlab/kernel_matrix_testing/security_agent.yml - .gitlab/kernel_matrix_testing/common.yml + - .gitlab/source_test/ebpf.yml - test/new-e2e/system-probe/**/* - test/new-e2e/scenarios/system-probe/**/* - test/new-e2e/pkg/runner/**/* @@ -741,6 +742,7 @@ workflow: - pkg/util/kernel/**/* - .gitlab/kernel_matrix_testing/system_probe.yml - .gitlab/kernel_matrix_testing/common.yml + - .gitlab/source_test/ebpf.yml - test/new-e2e/system-probe/**/* - test/new-e2e/scenarios/system-probe/**/* - test/new-e2e/pkg/runner/**/* From d76790c3e5d80f917dba7b25bd976d9cb1003875 Mon Sep 17 00:00:00 2001 From: Nicolas Schweitzer Date: Fri, 6 Sep 2024 12:27:51 +0200 Subject: [PATCH 045/128] feat(ci): add a slack channel for apm reliability and performance (#29100) --- tasks/libs/pipeline/github_slack_map.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/tasks/libs/pipeline/github_slack_map.yaml b/tasks/libs/pipeline/github_slack_map.yaml index 42f19b76c9bb4..5e8ce75acd72d 100644 --- a/tasks/libs/pipeline/github_slack_map.yaml +++ b/tasks/libs/pipeline/github_slack_map.yaml @@ -41,3 +41,4 @@ '@datadog/agent-devx-infra': '#agent-devx-ops' '@datadog/agent-devx-loops': '#agent-devx-ops' '@datadog/apm-onboarding': '#apm-onboarding' +'@datadog/apm-reliability-and-performance': '#apm-ecosystems-reliability-and-performance' From b81443ab66114c34685e946a2c2f94362fc4860f Mon Sep 17 00:00:00 2001 From: Adel Haj Hassan <41540817+adel121@users.noreply.github.com> Date: Fri, 6 Sep 2024 12:57:57 +0200 Subject: [PATCH 046/128] [CONTP-393] add filters to tagger (#28988) --- .../generic_store/composite_store.go | 14 ++- .../taggerimpl/generic_store/default_store.go | 28 ++++-- .../generic_store/store_bench_test.go | 41 ++++++-- .../taggerimpl/generic_store/store_test.go | 24 +++-- .../core/tagger/taggerimpl/remote/tagstore.go | 8 +- .../tagger/taggerimpl/tagstore/tagstore.go | 8 +- comp/core/tagger/types/entity_id.go | 14 +++ comp/core/tagger/types/filter_builder.go | 82 +++++++++++++++ comp/core/tagger/types/filter_builder_test.go | 99 +++++++++++++++++++ comp/core/tagger/types/filters.go | 60 +++++++++++ comp/core/tagger/types/filters_test.go | 45 +++++++++ comp/core/tagger/types/types.go | 8 +- 12 files changed, 394 insertions(+), 37 deletions(-) create mode 100644 comp/core/tagger/types/filter_builder.go create mode 100644 comp/core/tagger/types/filter_builder_test.go create mode 100644 comp/core/tagger/types/filters.go create mode 100644 comp/core/tagger/types/filters_test.go diff --git a/comp/core/tagger/taggerimpl/generic_store/composite_store.go b/comp/core/tagger/taggerimpl/generic_store/composite_store.go index 13215d6fea371..1a1b7306265c7 100644 --- a/comp/core/tagger/taggerimpl/generic_store/composite_store.go +++ b/comp/core/tagger/taggerimpl/generic_store/composite_store.go @@ -5,7 +5,9 @@ package genericstore -import "github.com/DataDog/datadog-agent/comp/core/tagger/types" +import ( + "github.com/DataDog/datadog-agent/comp/core/tagger/types" +) type compositeObjectStore[T any] struct { data map[types.EntityIDPrefix]map[string]T @@ -64,10 +66,11 @@ func (os *compositeObjectStore[T]) Size() int { } // ListObjects implements ObjectStore#ListObjects -func (os *compositeObjectStore[T]) ListObjects() []T { +func (os *compositeObjectStore[T]) ListObjects(filter *types.Filter) []T { objects := make([]T, 0, os.Size()) - for _, idToObjects := range os.data { + for prefix := range filter.GetPrefixes() { + idToObjects := os.data[prefix] for _, object := range idToObjects { objects = append(objects, object) } @@ -77,8 +80,9 @@ func (os *compositeObjectStore[T]) ListObjects() []T { } // ForEach implements ObjectStore#ForEach -func (os *compositeObjectStore[T]) ForEach(apply types.ApplyFunc[T]) { - for prefix, idToObjects := range os.data { +func (os *compositeObjectStore[T]) ForEach(filter *types.Filter, apply types.ApplyFunc[T]) { + for prefix := range filter.GetPrefixes() { + idToObjects := os.data[prefix] for id, object := range idToObjects { apply(types.NewEntityID(prefix, id), object) } diff --git a/comp/core/tagger/taggerimpl/generic_store/default_store.go b/comp/core/tagger/taggerimpl/generic_store/default_store.go index d0112b8c0c8eb..66e383de8a1c2 100644 --- a/comp/core/tagger/taggerimpl/generic_store/default_store.go +++ b/comp/core/tagger/taggerimpl/generic_store/default_store.go @@ -35,19 +35,35 @@ func (os defaulObjectStore[T]) Size() int { } // ListObjects implements ObjectStore#ListObjects -func (os defaulObjectStore[T]) ListObjects() []T { +func (os defaulObjectStore[T]) ListObjects(filter *types.Filter) []T { objects := make([]T, 0) - for _, object := range os { - objects = append(objects, object) + if filter == nil { + for _, object := range os { + objects = append(objects, object) + } + } else { + for entityID, object := range os { + if filter.MatchesPrefix(entityID.GetPrefix()) { + objects = append(objects, object) + } + } } return objects } // ForEach implements ObjectStore#ForEach -func (os defaulObjectStore[T]) ForEach(apply types.ApplyFunc[T]) { - for id, object := range os { - apply(id, object) +func (os defaulObjectStore[T]) ForEach(filter *types.Filter, apply types.ApplyFunc[T]) { + if filter == nil { + for id, object := range os { + apply(id, object) + } + } else { + for id, object := range os { + if filter.MatchesPrefix(id.GetPrefix()) { + apply(id, object) + } + } } } diff --git a/comp/core/tagger/taggerimpl/generic_store/store_bench_test.go b/comp/core/tagger/taggerimpl/generic_store/store_bench_test.go index 13715c69de459..0a07b142ee1b3 100644 --- a/comp/core/tagger/taggerimpl/generic_store/store_bench_test.go +++ b/comp/core/tagger/taggerimpl/generic_store/store_bench_test.go @@ -14,7 +14,7 @@ import ( configmock "github.com/DataDog/datadog-agent/pkg/config/mock" ) -const samples int = 1000000 +const samples int = 10000000 var weightedPrefixes = map[string]int{ "container_image_metadata": 60, @@ -24,7 +24,7 @@ var weightedPrefixes = map[string]int{ "deployment": 15, "kubernetes_metadata": 30, "kubernetes_pod_uid": 30, - "process": 30, + "process": 60, } // getWeightedPrefix selects a prefix based on the provided weights. @@ -55,6 +55,19 @@ func initStore(store types.ObjectStore[int]) { } } +func initFilter() *types.Filter { + fb := types.NewFilterBuilder() + + numberOfPrefixes := rand.Intn(len(weightedPrefixes)) + + for range numberOfPrefixes { + prefix := getNextPrefix() + fb.Include(prefix) + } + + return fb.Build(types.HighCardinality) +} + // Mock ApplyFunc for testing purposes func mockApplyFunc[T any](_ types.EntityID, _ T) {} @@ -166,7 +179,10 @@ func BenchmarkDefaultObjectStore_ForEach(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - store.ForEach(mockApplyFunc[int]) + b.StopTimer() + filter := initFilter() + b.StartTimer() + store.ForEach(filter, mockApplyFunc[int]) } } @@ -178,11 +194,14 @@ func BenchmarkCompositeObjectStore_ForEach(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - store.ForEach(mockApplyFunc[int]) + b.StopTimer() + filter := initFilter() + b.StartTimer() + store.ForEach(filter, mockApplyFunc[int]) } } -func BenchmarkDefaultObjectStore_ListAll(b *testing.B) { +func BenchmarkDefaultObjectStore_ListObjects(b *testing.B) { cfg := configmock.New(b) cfg.SetWithoutSource("tagger.tagstore_use_composite_entity_id", false) store := NewObjectStore[int](cfg) @@ -191,11 +210,14 @@ func BenchmarkDefaultObjectStore_ListAll(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - _ = store.ListObjects() + b.StopTimer() + filter := initFilter() + b.StartTimer() + _ = store.ListObjects(filter) } } -func BenchmarkCompositeObjectStore_ListAll(b *testing.B) { +func BenchmarkCompositeObjectStore_ListObjects(b *testing.B) { cfg := configmock.New(b) cfg.SetWithoutSource("tagger.tagstore_use_composite_entity_id", true) store := NewObjectStore[int](cfg) @@ -204,6 +226,9 @@ func BenchmarkCompositeObjectStore_ListAll(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - _ = store.ListObjects() + b.StopTimer() + filter := initFilter() + b.StartTimer() + _ = store.ListObjects(filter) } } diff --git a/comp/core/tagger/taggerimpl/generic_store/store_test.go b/comp/core/tagger/taggerimpl/generic_store/store_test.go index 0a72a87fecb2a..48a3331d181ff 100644 --- a/comp/core/tagger/taggerimpl/generic_store/store_test.go +++ b/comp/core/tagger/taggerimpl/generic_store/store_test.go @@ -114,20 +114,26 @@ func TestObjectStore_ListObjects(t *testing.T) { cfg.SetWithoutSource("tagger.tagstore_use_composite_entity_id", isComposite) store := NewObjectStore[any](cfg) + // build some filter + fb := types.NewFilterBuilder() + fb.Include(types.EntityIDPrefix("prefix1"), types.EntityIDPrefix("prefix2")) + filter := fb.Build(types.HighCardinality) + // list should return empty - list := store.ListObjects() + list := store.ListObjects(filter) assert.Equalf(t, len(list), 0, "ListObjects should return an empty list") // add some items ids := []string{"prefix1://id1", "prefix2://id2", "prefix3://id3", "prefix4://id4"} for _, id := range ids { entityID, _ := types.NewEntityIDFromString(id) - store.Set(entityID, struct{}{}) + store.Set(entityID, id) } // list should return empty - list = store.ListObjects() - assert.Equalf(t, len(list), len(ids), "ListObjects should return a list of size %d", len(ids)) + list = store.ListObjects(filter) + expectedListing := []any{"prefix1://id1", "prefix2://id2"} + assert.ElementsMatch(t, expectedListing, list) } // default store @@ -152,10 +158,16 @@ func TestObjectStore_ForEach(t *testing.T) { } accumulator := []string{} - store.ForEach(func(id types.EntityID, _ any) { accumulator = append(accumulator, id.String()) }) + + // build some filter + fb := types.NewFilterBuilder() + fb.Include(types.EntityIDPrefix("prefix1"), types.EntityIDPrefix("prefix2")) + filter := fb.Build(types.HighCardinality) + + store.ForEach(filter, func(id types.EntityID, _ any) { accumulator = append(accumulator, id.String()) }) // list should return empty - assert.ElementsMatch(t, accumulator, ids) + assert.ElementsMatch(t, accumulator, []string{"prefix1://id1", "prefix2://id2"}) } // default store diff --git a/comp/core/tagger/taggerimpl/remote/tagstore.go b/comp/core/tagger/taggerimpl/remote/tagstore.go index 5f3d12de8eac4..3ac87a5151d66 100644 --- a/comp/core/tagger/taggerimpl/remote/tagstore.go +++ b/comp/core/tagger/taggerimpl/remote/tagstore.go @@ -81,7 +81,7 @@ func (s *tagStore) getEntity(entityID types.EntityID) *types.Entity { func (s *tagStore) listEntities() []*types.Entity { s.mutex.RLock() defer s.mutex.RUnlock() - return s.store.ListObjects() + return s.store.ListObjects(types.NewMatchAllFilter()) } func (s *tagStore) collectTelemetry() { @@ -93,7 +93,7 @@ func (s *tagStore) collectTelemetry() { s.mutex.Lock() defer s.mutex.Unlock() - s.store.ForEach(func(_ types.EntityID, e *types.Entity) { s.telemetry[string(e.ID.GetPrefix())]++ }) + s.store.ForEach(nil, func(_ types.EntityID, e *types.Entity) { s.telemetry[string(e.ID.GetPrefix())]++ }) for prefix, storedEntities := range s.telemetry { s.telemetryStore.StoredEntities.Set(storedEntities, remoteSource, prefix) @@ -107,7 +107,7 @@ func (s *tagStore) subscribe(cardinality types.TagCardinality) chan []types.Enti events := make([]types.EntityEvent, 0, s.store.Size()) - s.store.ForEach(func(_ types.EntityID, e *types.Entity) { + s.store.ForEach(nil, func(_ types.EntityID, e *types.Entity) { events = append(events, types.EntityEvent{ EventType: types.EventTypeAdded, Entity: *e, @@ -138,7 +138,7 @@ func (s *tagStore) reset() { events := make([]types.EntityEvent, 0, s.store.Size()) - s.store.ForEach(func(_ types.EntityID, e *types.Entity) { + s.store.ForEach(nil, func(_ types.EntityID, e *types.Entity) { events = append(events, types.EntityEvent{ EventType: types.EventTypeDeleted, Entity: types.Entity{ID: e.ID}, diff --git a/comp/core/tagger/taggerimpl/tagstore/tagstore.go b/comp/core/tagger/taggerimpl/tagstore/tagstore.go index ddd07cb17252b..020128c06b2df 100644 --- a/comp/core/tagger/taggerimpl/tagstore/tagstore.go +++ b/comp/core/tagger/taggerimpl/tagstore/tagstore.go @@ -163,7 +163,7 @@ func (s *TagStore) collectTelemetry() { s.Lock() defer s.Unlock() - s.store.ForEach(func(_ types.EntityID, et EntityTags) { + s.store.ForEach(nil, func(_ types.EntityID, et EntityTags) { prefix := string(et.getEntityID().GetPrefix()) for _, source := range et.sources() { @@ -192,7 +192,7 @@ func (s *TagStore) Subscribe(cardinality types.TagCardinality) chan []types.Enti events := make([]types.EntityEvent, 0, s.store.Size()) - s.store.ForEach(func(_ types.EntityID, et EntityTags) { + s.store.ForEach(nil, func(_ types.EntityID, et EntityTags) { events = append(events, types.EntityEvent{ EventType: types.EventTypeAdded, Entity: et.toEntity(), @@ -220,7 +220,7 @@ func (s *TagStore) Prune() { now := s.clock.Now() events := []types.EntityEvent{} - s.store.ForEach(func(eid types.EntityID, et EntityTags) { + s.store.ForEach(nil, func(eid types.EntityID, et EntityTags) { changed := et.deleteExpired(now) if !changed && !et.shouldRemove() { @@ -283,7 +283,7 @@ func (s *TagStore) List() types.TaggerListResponse { s.RLock() defer s.RUnlock() - for _, et := range s.store.ListObjects() { + for _, et := range s.store.ListObjects(types.NewMatchAllFilter()) { r.Entities[et.getEntityID().String()] = types.TaggerListEntity{ Tags: et.tagsBySource(), } diff --git a/comp/core/tagger/types/entity_id.go b/comp/core/tagger/types/entity_id.go index 62e44eb75f2f9..e049d2825c799 100644 --- a/comp/core/tagger/types/entity_id.go +++ b/comp/core/tagger/types/entity_id.go @@ -129,3 +129,17 @@ const ( // Process is the prefix `process` Process EntityIDPrefix = "process" ) + +// AllPrefixesSet returns a set of all possible entity id prefixes that can be used in the tagger +func AllPrefixesSet() map[EntityIDPrefix]struct{} { + return map[EntityIDPrefix]struct{}{ + ContainerID: {}, + ContainerImageMetadata: {}, + ECSTask: {}, + Host: {}, + KubernetesDeployment: {}, + KubernetesMetadata: {}, + KubernetesPodUID: {}, + Process: {}, + } +} diff --git a/comp/core/tagger/types/filter_builder.go b/comp/core/tagger/types/filter_builder.go new file mode 100644 index 0000000000000..233b495d168a5 --- /dev/null +++ b/comp/core/tagger/types/filter_builder.go @@ -0,0 +1,82 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package types + +import ( + "maps" +) + +// FilterBuilder builds a tagger subscriber filter based on include/exclude rules +type FilterBuilder struct { + prefixesToInclude map[EntityIDPrefix]struct{} + + prefixesToExclude map[EntityIDPrefix]struct{} +} + +// NewFilterBuilder returns a new empty filter builder +func NewFilterBuilder() *FilterBuilder { + return &FilterBuilder{ + prefixesToInclude: make(map[EntityIDPrefix]struct{}), + prefixesToExclude: make(map[EntityIDPrefix]struct{}), + } +} + +// Include includes the specified prefixes in the filter +func (fb *FilterBuilder) Include(prefixes ...EntityIDPrefix) *FilterBuilder { + if fb == nil { + panic("filter builder should not be nil") + } + + for _, prefix := range prefixes { + fb.prefixesToInclude[prefix] = struct{}{} + } + + return fb +} + +// Exclude excludes the specified prefixes from the filter +func (fb *FilterBuilder) Exclude(prefixes ...EntityIDPrefix) *FilterBuilder { + if fb == nil { + panic("filter builder should not be nil") + } + + for _, prefix := range prefixes { + fb.prefixesToExclude[prefix] = struct{}{} + } + + return fb +} + +// Build builds a new Filter object based on the calls to Include and Exclude +// If the builder only excludes prefixes, the created filter will match any prefix except for the excluded ones. +// If the builder only includes prefixes, the created filter will match only the prefixes included in the builder. +// If the builder includes prefixes and excludes prefixes, the created filter will match only prefixes that are included but a not excluded in the builder +// If the builder has neither included nor excluded prefixes, it will match all prefixes by default +func (fb *FilterBuilder) Build(card TagCardinality) *Filter { + if fb == nil { + panic("filter builder should not be nil") + } + + if len(fb.prefixesToInclude)+len(fb.prefixesToExclude) == 0 { + return newFilter(AllPrefixesSet(), card) + } + + var prefixSet map[EntityIDPrefix]struct{} + + // initialise prefixSet with what should be included + if len(fb.prefixesToInclude) == 0 { + prefixSet = maps.Clone(AllPrefixesSet()) + } else { + prefixSet = maps.Clone(fb.prefixesToInclude) + } + + // exclude unwanted prefixes + for prefix := range fb.prefixesToExclude { + delete(prefixSet, prefix) + } + + return newFilter(prefixSet, card) +} diff --git a/comp/core/tagger/types/filter_builder_test.go b/comp/core/tagger/types/filter_builder_test.go new file mode 100644 index 0000000000000..73d5424103fae --- /dev/null +++ b/comp/core/tagger/types/filter_builder_test.go @@ -0,0 +1,99 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package types + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestFilterBuilderOps(t *testing.T) { + tests := []struct { + name string + do func(*FilterBuilder) + buildCard TagCardinality + expectBuildFilter Filter + }{ + { + name: "do nothing", + do: func(_ *FilterBuilder) {}, + buildCard: HighCardinality, + expectBuildFilter: Filter{ + prefixes: AllPrefixesSet(), + cardinality: HighCardinality, + }, + }, + { + name: "only includes", + do: func(fb *FilterBuilder) { + fb.Include(KubernetesDeployment, ContainerID) + fb.Include(Host) + }, + buildCard: HighCardinality, + expectBuildFilter: Filter{ + prefixes: map[EntityIDPrefix]struct{}{ + KubernetesDeployment: {}, + ContainerID: {}, + Host: {}, + }, + cardinality: HighCardinality, + }, + }, + { + name: "only excludes", + do: func(fb *FilterBuilder) { + fb.Exclude(KubernetesDeployment, ContainerID) + fb.Exclude(Host) + }, + buildCard: HighCardinality, + expectBuildFilter: Filter{ + prefixes: map[EntityIDPrefix]struct{}{ + ContainerImageMetadata: {}, + ECSTask: {}, + KubernetesMetadata: {}, + KubernetesPodUID: {}, + Process: {}, + }, + cardinality: HighCardinality, + }, + }, + { + name: "both includes and excludes", + do: func(fb *FilterBuilder) { + fb.Include(ContainerImageMetadata) + fb.Exclude(KubernetesDeployment, ContainerID) + fb.Include(ContainerID) + fb.Exclude(Host, KubernetesMetadata) + fb.Include(Host, Process) + }, + buildCard: HighCardinality, + expectBuildFilter: Filter{ + prefixes: map[EntityIDPrefix]struct{}{ + ContainerImageMetadata: {}, + Process: {}, + }, + cardinality: HighCardinality, + }, + }, + } + + for _, test := range tests { + fb := NewFilterBuilder() + test.do(fb) + filter := fb.Build(test.buildCard) + assert.Truef(t, reflect.DeepEqual(*filter, test.expectBuildFilter), "expected %v, found %v", test.expectBuildFilter, filter) + } +} + +func TestNilFilterBuilderOps(t *testing.T) { + var fb *FilterBuilder + + assert.Panics(t, func() { fb.Include(ContainerID) }) + assert.Panics(t, func() { fb.Exclude(ContainerID) }) + assert.Panics(t, func() { fb.Build(HighCardinality) }) +} diff --git a/comp/core/tagger/types/filters.go b/comp/core/tagger/types/filters.go new file mode 100644 index 0000000000000..436b334e3bff2 --- /dev/null +++ b/comp/core/tagger/types/filters.go @@ -0,0 +1,60 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package types + +import ( + "maps" +) + +// NewMatchAllFilter returns a filter that matches any prefix +func NewMatchAllFilter() *Filter { + return nil +} + +// Filter represents a subscription filter for the tagger +type Filter struct { + prefixes map[EntityIDPrefix]struct{} + cardinality TagCardinality +} + +func newFilter(prefixes map[EntityIDPrefix]struct{}, cardinality TagCardinality) *Filter { + return &Filter{ + prefixes: maps.Clone(prefixes), + cardinality: cardinality, + } +} + +// GetPrefixes returns the prefix set of the filter +// If the filter is nil, a set containing all possible prefixes is returned +func (f *Filter) GetPrefixes() map[EntityIDPrefix]struct{} { + if f == nil { + return AllPrefixesSet() + } + + return maps.Clone(f.prefixes) +} + +// GetCardinality returns the filter cardinality +// If the filter is nil, High cardinality is returned +func (f *Filter) GetCardinality() TagCardinality { + if f == nil { + return HighCardinality + } + + return f.cardinality +} + +// MatchesPrefix returns whether or not the filter matches the prefix passed as argument +func (f *Filter) MatchesPrefix(prefix EntityIDPrefix) bool { + // A nil filter should match everything + if f == nil { + return true + } + + _, found := f.prefixes[prefix] + + return found +} diff --git a/comp/core/tagger/types/filters_test.go b/comp/core/tagger/types/filters_test.go new file mode 100644 index 0000000000000..97827d218a069 --- /dev/null +++ b/comp/core/tagger/types/filters_test.go @@ -0,0 +1,45 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package types + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestFilterOps(t *testing.T) { + f := Filter{ + prefixes: map[EntityIDPrefix]struct{}{ + KubernetesDeployment: {}, + KubernetesPodUID: {}, + }, + cardinality: OrchestratorCardinality, + } + + // assert cardinality is correct + cardinality := f.GetCardinality() + assert.Equal(t, OrchestratorCardinality, cardinality) + + // assert GetPrefixes + expectedPrefixes := map[EntityIDPrefix]struct{}{ + KubernetesDeployment: {}, + KubernetesPodUID: {}, + } + assert.Truef(t, reflect.DeepEqual(expectedPrefixes, f.GetPrefixes()), "expected %v, found %v", expectedPrefixes, f.GetPrefixes()) +} + +func TestNilFilter(t *testing.T) { + var f *Filter + + assert.Truef(t, reflect.DeepEqual(f.GetPrefixes(), AllPrefixesSet()), "expected %v, found %v", AllPrefixesSet(), f.GetPrefixes()) + assert.Equalf(t, HighCardinality, f.GetCardinality(), "nil filter should have cardinality HIGH, found %v", f.GetCardinality()) + + for prefix := range AllPrefixesSet() { + assert.Truef(t, f.MatchesPrefix(prefix), "nil filter should match any prefix, didn't match %v", prefix) + } +} diff --git a/comp/core/tagger/types/types.go b/comp/core/tagger/types/types.go index e9e16653cf9aa..f39cdf375b71f 100644 --- a/comp/core/tagger/types/types.go +++ b/comp/core/tagger/types/types.go @@ -28,10 +28,10 @@ type ObjectStore[V any] interface { Unset(EntityID) // Size returns the total number of objects in the store Size() int - // ListObjects returns a slice containing all objects of the store - ListObjects() []V - // ForEach applies a given function to each object in the store - ForEach(ApplyFunc[V]) + // ListObjects returns a slice containing objects of the store matching the filter + ListObjects(*Filter) []V + // ForEach applies a given function to each object in the store matching the filter + ForEach(*Filter, ApplyFunc[V]) } // TaggerListResponse holds the tagger list response From 0c64c3475d44a350fd1b1cfea402cff9892de248 Mon Sep 17 00:00:00 2001 From: Raphael Gavache Date: Fri, 6 Sep 2024 13:47:27 +0200 Subject: [PATCH 047/128] [service discovery] read inject process metadata (#29046) Co-authored-by: Guy Arbitman --- .../servicediscovery/module/envs.go | 130 ++++++++++++------ .../servicediscovery/module/envs_test.go | 24 ++-- .../module/injected_process.go | 18 +++ .../module/injected_process_gen.go | 115 ++++++++++++++++ .../module/injected_process_gen_test.go | 67 +++++++++ tasks/libs/types/copyright.py | 3 +- 6 files changed, 297 insertions(+), 60 deletions(-) create mode 100644 pkg/collector/corechecks/servicediscovery/module/injected_process.go create mode 100644 pkg/collector/corechecks/servicediscovery/module/injected_process_gen.go create mode 100644 pkg/collector/corechecks/servicediscovery/module/injected_process_gen_test.go diff --git a/pkg/collector/corechecks/servicediscovery/module/envs.go b/pkg/collector/corechecks/servicediscovery/module/envs.go index 08f5c54994565..5ae6ebf797e96 100644 --- a/pkg/collector/corechecks/servicediscovery/module/envs.go +++ b/pkg/collector/corechecks/servicediscovery/module/envs.go @@ -15,24 +15,40 @@ import ( "strings" "github.com/DataDog/datadog-agent/pkg/util/kernel" + "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/shirou/gopsutil/v3/process" ) const ( - // injectorMemFdName is the name the injector (Datadog/auto_inject) uses. - injectorMemFdName = "dd_environ" - injectorMemFdPath = "/memfd:" + injectorMemFdName + " (deleted)" + injectorMemFdName = "dd_process_inject_info.msgpack" + injectorMemFdPath = "/memfd:" + injectorMemFdName // memFdMaxSize is used to limit the amount of data we read from the memfd. // This is for safety to limit our memory usage in the case of a corrupt // file. - memFdMaxSize = 4096 + // matches limit in the [auto injector](https://github.com/DataDog/auto_inject/blob/5ae819d01d8625c24dcf45b8fef32a7d94927d13/librouter.c#L52) + memFdMaxSize = 65536 ) -// readEnvsFile reads the env file created by the auto injector. The file -// contains the variables in a format similar to /proc/$PID/environ: ENV=VAL, -// separated by \000. -func readEnvsFile(path string) ([]string, error) { +// getInjectionMeta gets metadata from auto injector injection, if +// present. The auto injector creates a memfd file where it writes +// injection metadata such as injected environment variables, or versions +// of the auto injector and the library. +func getInjectionMeta(proc *process.Process) (*InjectedProcess, bool) { + path, found := findInjectorFile(proc) + if !found { + return nil, false + } + injectionMeta, err := extractInjectionMeta(path) + if err != nil { + log.Warnf("failed extracting injected envs: %s", err) + return nil, false + } + return injectionMeta, true + +} + +func extractInjectionMeta(path string) (*InjectedProcess, error) { reader, err := os.Open(path) if err != nil { return nil, err @@ -43,13 +59,19 @@ func readEnvsFile(path string) ([]string, error) { if err != nil { return nil, err } + if len(data) == memFdMaxSize { + return nil, io.ErrShortBuffer + } - return strings.Split(string(data), "\000"), nil + var injectedProc InjectedProcess + if _, err = injectedProc.UnmarshalMsg(data); err != nil { + return nil, err + } + return &injectedProc, nil } -// getInjectedEnvs gets environment variables injected by the auto injector, if -// present. The auto injector creates a memfd file with a specific name into which -// it writes the environment variables. In order to find the correct file, we +// findInjectorFile searches for the injector file in the process open file descriptors. +// In order to find the correct file, we // need to iterate the list of files (named after file descriptor numbers) in // /proc/$PID/fd and get the name from the target of the symbolic link. // @@ -59,57 +81,75 @@ func readEnvsFile(path string) ([]string, error) { // lrwx------ 1 foo foo 64 Aug 13 14:24 0 -> /dev/pts/6 // lrwx------ 1 foo foo 64 Aug 13 14:24 1 -> /dev/pts/6 // lrwx------ 1 foo foo 64 Aug 13 14:24 2 -> /dev/pts/6 -// lrwx------ 1 foo foo 64 Aug 13 14:24 3 -> '/memfd:dd_environ (deleted)' +// lrwx------ 1 foo foo 64 Aug 13 14:24 3 -> '/dd_process_inject_info.msgpac (deleted)' // ``` -func getInjectedEnvs(proc *process.Process) []string { +func findInjectorFile(proc *process.Process) (string, bool) { fdsPath := kernel.HostProc(strconv.Itoa(int(proc.Pid)), "fd") - entries, err := os.ReadDir(fdsPath) + // quick path, the shadow file is the first opened file by the process + // unless there are inherited fds + path := filepath.Join(fdsPath, "3") + if isInjectorFile(path) { + return path, true + } + fdDir, err := os.Open(fdsPath) if err != nil { - return nil + log.Warnf("failed to open %s: %s", fdsPath, err) + return "", false } - - for _, entry := range entries { - path := filepath.Join(fdsPath, entry.Name()) - name, err := os.Readlink(path) - if err != nil { - continue - } - - if name != injectorMemFdPath { + defer fdDir.Close() + fds, err := fdDir.Readdirnames(-1) + if err != nil { + log.Warnf("failed to read %s: %s", fdsPath, err) + return "", false + } + for _, fd := range fds { + switch fd { + case "0", "1", "2", "3": continue + default: + path := filepath.Join(fdsPath, fd) + if isInjectorFile(path) { + return path, true + } } - - envs, _ := readEnvsFile(path) - return envs } + return "", false +} - return nil +func isInjectorFile(path string) bool { + name, err := os.Readlink(path) + if err != nil { + return false + } + return strings.HasPrefix(name, injectorMemFdPath) } -// envsToMap splits a list of strings containing environment variables of the +// addEnvToMap splits a list of strings containing environment variables of the // format NAME=VAL to a map. -func envsToMap(envs ...string) map[string]string { - envMap := make(map[string]string, len(envs)) - for _, env := range envs { - name, val, found := strings.Cut(env, "=") - if !found { - continue - } - - envMap[name] = val +func addEnvToMap(env string, envs map[string]string) { + name, val, found := strings.Cut(env, "=") + if found { + envs[name] = val } - - return envMap } // getEnvs gets the environment variables for the process, both the initial // ones, and if present, the ones injected via the auto injector. func getEnvs(proc *process.Process) (map[string]string, error) { - envs, err := proc.Environ() + procEnvs, err := proc.Environ() if err != nil { return nil, err } - - envs = append(envs, getInjectedEnvs(proc)...) - return envsToMap(envs...), nil + envs := make(map[string]string, len(procEnvs)) + for _, env := range procEnvs { + addEnvToMap(env, envs) + } + injectionMeta, ok := getInjectionMeta(proc) + if !ok { + return envs, nil + } + for _, env := range injectionMeta.InjectedEnv { + addEnvToMap(string(env), envs) + } + return envs, nil } diff --git a/pkg/collector/corechecks/servicediscovery/module/envs_test.go b/pkg/collector/corechecks/servicediscovery/module/envs_test.go index 7ef168b963f67..9a56755c097b6 100644 --- a/pkg/collector/corechecks/servicediscovery/module/envs_test.go +++ b/pkg/collector/corechecks/servicediscovery/module/envs_test.go @@ -8,7 +8,6 @@ package module import ( - "bytes" "fmt" "os" "strings" @@ -23,8 +22,9 @@ func TestInjectedEnvBasic(t *testing.T) { curPid := os.Getpid() proc, err := process.NewProcess(int32(curPid)) require.NoError(t, err) - envs := getInjectedEnvs(proc) - require.Nil(t, envs) + injectionMeta, ok := getInjectionMeta(proc) + require.Nil(t, injectionMeta) + require.False(t, ok) // Provide an injected replacement for some already-present env variable first := os.Environ()[0] @@ -49,12 +49,10 @@ func TestInjectedEnvLimit(t *testing.T) { full := []string{env} createEnvsMemfd(t, full) - expected := []string{full[0][:memFdMaxSize]} - proc, err := process.NewProcess(int32(os.Getpid())) require.NoError(t, err) - envs := getInjectedEnvs(proc) - require.Equal(t, expected, envs) + _, ok := getInjectionMeta(proc) + require.False(t, ok) } // createEnvsMemfd creates an memfd in the current process with the specified @@ -62,16 +60,14 @@ func TestInjectedEnvLimit(t *testing.T) { func createEnvsMemfd(t *testing.T, envs []string) { t.Helper() - var b bytes.Buffer + var injectionMeta InjectedProcess for _, env := range envs { - _, err := b.WriteString(env) - require.NoError(t, err) - - err = b.WriteByte(0) - require.NoError(t, err) + injectionMeta.InjectedEnv = append(injectionMeta.InjectedEnv, []byte(env)) } + encodedInjectionMeta, err := injectionMeta.MarshalMsg(nil) + require.NoError(t, err) - memfd, err := memfile(injectorMemFdName, b.Bytes()) + memfd, err := memfile(injectorMemFdName, encodedInjectionMeta) require.NoError(t, err) t.Cleanup(func() { unix.Close(memfd) }) } diff --git a/pkg/collector/corechecks/servicediscovery/module/injected_process.go b/pkg/collector/corechecks/servicediscovery/module/injected_process.go new file mode 100644 index 0000000000000..5c99b0775e5ee --- /dev/null +++ b/pkg/collector/corechecks/servicediscovery/module/injected_process.go @@ -0,0 +1,18 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:generate go run github.com/tinylib/msgp -io=false + +package module + +// InjectedProcess represents the data injected by the auto-injector into the +// process. +type InjectedProcess struct { + LocalHostname string `msgp:"local_hostname"` + InjectedEnv [][]byte `msgp:"injected_envs"` + LanguageName string `msgp:"language_name"` + TracerVersion string `msgp:"tracer_version"` + InjectorVersion string `msgp:"injector_version"` +} diff --git a/pkg/collector/corechecks/servicediscovery/module/injected_process_gen.go b/pkg/collector/corechecks/servicediscovery/module/injected_process_gen.go new file mode 100644 index 0000000000000..6ecd5ef86d4c4 --- /dev/null +++ b/pkg/collector/corechecks/servicediscovery/module/injected_process_gen.go @@ -0,0 +1,115 @@ +package module + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "github.com/tinylib/msgp/msgp" +) + +// MarshalMsg implements msgp.Marshaler +func (z *InjectedProcess) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 5 + // string "LocalHostname" + o = append(o, 0x85, 0xad, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65) + o = msgp.AppendString(o, z.LocalHostname) + // string "InjectedEnv" + o = append(o, 0xab, 0x49, 0x6e, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x45, 0x6e, 0x76) + o = msgp.AppendArrayHeader(o, uint32(len(z.InjectedEnv))) + for za0001 := range z.InjectedEnv { + o = msgp.AppendBytes(o, z.InjectedEnv[za0001]) + } + // string "LanguageName" + o = append(o, 0xac, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65) + o = msgp.AppendString(o, z.LanguageName) + // string "TracerVersion" + o = append(o, 0xad, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.TracerVersion) + // string "InjectorVersion" + o = append(o, 0xaf, 0x49, 0x6e, 0x6a, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.InjectorVersion) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *InjectedProcess) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "LocalHostname": + z.LocalHostname, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "LocalHostname") + return + } + case "InjectedEnv": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "InjectedEnv") + return + } + if cap(z.InjectedEnv) >= int(zb0002) { + z.InjectedEnv = (z.InjectedEnv)[:zb0002] + } else { + z.InjectedEnv = make([][]byte, zb0002) + } + for za0001 := range z.InjectedEnv { + z.InjectedEnv[za0001], bts, err = msgp.ReadBytesBytes(bts, z.InjectedEnv[za0001]) + if err != nil { + err = msgp.WrapError(err, "InjectedEnv", za0001) + return + } + } + case "LanguageName": + z.LanguageName, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "LanguageName") + return + } + case "TracerVersion": + z.TracerVersion, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "TracerVersion") + return + } + case "InjectorVersion": + z.InjectorVersion, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "InjectorVersion") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *InjectedProcess) Msgsize() (s int) { + s = 1 + 14 + msgp.StringPrefixSize + len(z.LocalHostname) + 12 + msgp.ArrayHeaderSize + for za0001 := range z.InjectedEnv { + s += msgp.BytesPrefixSize + len(z.InjectedEnv[za0001]) + } + s += 13 + msgp.StringPrefixSize + len(z.LanguageName) + 14 + msgp.StringPrefixSize + len(z.TracerVersion) + 16 + msgp.StringPrefixSize + len(z.InjectorVersion) + return +} diff --git a/pkg/collector/corechecks/servicediscovery/module/injected_process_gen_test.go b/pkg/collector/corechecks/servicediscovery/module/injected_process_gen_test.go new file mode 100644 index 0000000000000..dbbe388c42a8b --- /dev/null +++ b/pkg/collector/corechecks/servicediscovery/module/injected_process_gen_test.go @@ -0,0 +1,67 @@ +package module + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "testing" + + "github.com/tinylib/msgp/msgp" +) + +func TestMarshalUnmarshalInjectedProcess(t *testing.T) { + v := InjectedProcess{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgInjectedProcess(b *testing.B) { + v := InjectedProcess{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgInjectedProcess(b *testing.B) { + v := InjectedProcess{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalInjectedProcess(b *testing.B) { + v := InjectedProcess{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/tasks/libs/types/copyright.py b/tasks/libs/types/copyright.py index a19773744f811..301db649f32b0 100755 --- a/tasks/libs/types/copyright.py +++ b/tasks/libs/types/copyright.py @@ -66,6 +66,7 @@ '^// This file is licensed under the MIT License.', '^// Copyright \\(C\\) 2017 ScyllaDB', '^// Copyright \\(c\\) Tailscale Inc & AUTHORS', + '^// Code generated by github.com/tinylib/msgp DO NOT EDIT.', ] @@ -144,7 +145,7 @@ def _is_excluded_header(header, exclude=None): exclude = [] for matcher in exclude: - if re.search(matcher, header[0]): + if re.search(matcher, header[0]) or re.search(matcher, header[2]): return True return False From 07fe5d3e9d3397dd5e66106f36e138dd537b403a Mon Sep 17 00:00:00 2001 From: Mackenzie <63265430+mackjmr@users.noreply.github.com> Date: Fri, 6 Sep 2024 14:11:41 +0200 Subject: [PATCH 048/128] Document datadog extension (#28629) Co-authored-by: Pablo Baeyens --- comp/otelcol/ddflareextension/README.md | 64 ++++++++++++++++++ .../image/ddflareextensiondiagram.png | Bin 0 -> 300010 bytes 2 files changed, 64 insertions(+) create mode 100644 comp/otelcol/ddflareextension/README.md create mode 100644 comp/otelcol/ddflareextension/image/ddflareextensiondiagram.png diff --git a/comp/otelcol/ddflareextension/README.md b/comp/otelcol/ddflareextension/README.md new file mode 100644 index 0000000000000..79a9677e4207e --- /dev/null +++ b/comp/otelcol/ddflareextension/README.md @@ -0,0 +1,64 @@ +# ddflare Extension + +The ddflare extension component allows inclusion of otel-agent data in the datadog-agent [flare](https://docs.datadoghq.com/agent/troubleshooting/send_a_flare/?tab=agent). A flare can be triggered by the core agent process, or remote-config. + +The ddflare extension also provides the relevant metadata for otel-agent configuration and inventory tracking in Fleet Automation. This metadata is periodically collected by the core-agent which then submits that data to the backend. + + + +## Extension Configuration + +The datadogextension will be added automatically by the [converter component](../converter/README.md). If you opted out of the converter, or you want to change the defaults, you are able to configure the extension as so: + +*Collector config:* +``` +extensions: + datadog: + port: 7777 +``` + +*Agent Config:* +``` +otel-agent: + enabled: true + flare_port: 7777 +``` + +The port is the location in which the otel-agent will expose the data required to build the flare. The core agent then fetches the data from this port. + +## Data collected by flare + +### Configurations + +The flare will collect both the provided collector config and the enhanced config (enhanced via [converter](../converter/README.md)). + +The provided collector configs can be found in `otel/otel-flare/customer.cfg` and the enhanced config can be found in `otel/otel-flare/customer.cfg`. + +### Environment variables + +The flare will collect all environment variables, and these can be found in `otel/otel-flare/environment.json`. + +### Extension data + +The flare also adds data collected from extensions. These extensions are added automatically by the [converter component](../converter/README.md). The data collected is from extensions: +- health_check: Found in `otel/otel-flare/health_check`. + +Will contain a JSON of the latest health check, for example: + +``` +{"status":"Server available","upSince":"2024-08-14T14:54:00.575804+02:00","uptime":"28.470434291s"} +``` +- pprof: Found in `otel/otel-flare/pprof` +- zpages: Found in `otel/otel-flare/zpages` + +### Logs + +The flare will collect the otel-agent logs which can be found in `logs/otel-agent.log`. + +### Raw data + +The raw response can be found in `otel-response.json`. This corresponds to the data that is exposed at the datadog extensions port. + +## Data collected for inventory + +The ddflare extension submits a variety of metadata for fleet automation including version, command, configuration. You can find more information about the Inventory Agent Payload in [comp/metadata/inventoryotel/README.md](../../metadata/inventoryotel/README.md). diff --git a/comp/otelcol/ddflareextension/image/ddflareextensiondiagram.png b/comp/otelcol/ddflareextension/image/ddflareextensiondiagram.png new file mode 100644 index 0000000000000000000000000000000000000000..02894b083267b23cfdd175e84bed7b4c884df521 GIT binary patch literal 300010 zcmeFZc{tR4^ghlo(MTw?$yRun_O^M0as`8+dbH-?6WhFMwZ zj5-Ys+LVTd{ypO+_{vimz8?64)<*rjB28)?*AV=V<1HQK+bSwFLhyG+8hYB@G-%{o z;Fmn@o`3&-j#iL{j{0>J4UMlE4gH_*xeR|I{{_J>|kD{!hPMz3b&CzQEbR`#=9P zucs#yYs2(8EDbF^>;Ldu`2j3;55J?7-~Z3s!)v+!hnG>`>VIGR_o4jnz4|Z0{x>%M z-;a$r-+w^khHCF?BlisB%78b;kGRhVa_4;XWeG1Hz8C*9)=w(3Vx0JToxTn}H*SK

1 zzEp25bQU3$S=9VIE5V#Jzc@3H_wXTo+FfS3^L*y7M077d%q6(vJc*RVCarZX=|cBa z=brAW6t>xuKDixPCV7?}nNHV;X+9W^OcWZU(~)^g)VjNnyVtxkra?+O^{T*}a5j(Y z*?-QRn$O-scN?czbk{|LHxCQ$+VT6YUJ=6!wKDIKYXAD?sMYwl54kr!WArm`K1tEb zsdlL{cf{P_2qL(7MWuO+)E9a;d83*1ikvCQH>+M4RuS-;d`abPY7~MV=iq>VIKdIl zVLH}MC<1qwXlD^l_=aQTA`BW7I%cF86twb?E744xj-B*!Mx7dmlPSY4R@yynNg9>& zNoSsH(@MR1LRc?55tpnprR_CUwOls;SDnPN(leSqxXZqa?w@n_Ca@$Eu2lx?wMZ+R zep(wQSyP#0-llfTLXa0{m#nCPi^;WUt0QP8eOIu|F()N!CaJHv${mmJBe)5uMp=%u zrp#1U|G<@C-k5w1?MvavwkDtVDHITYZD9YV>+VP9*7x^}d7{b31$DHd^>G7)vx@I4 z?WfnK<^GDtpl!4$@=MdHtH0BsNe!FA#GXws@Fd(csgJU9Tb`p-6|jFHG->v|y$}CmUg-;=&Ly1G4_XM3fHcJ)t3ljp?tC#5x2i^4$Gl`zhozd6hB= zhT?E$H*UnLgiFFJ!(F7qv0L2Fe!l;F{+p$aYDHW{ z3|@~f1(L!dR)Ug(W6?h$Gm+TXj)I9|i;J(t zC~8_U-%||=Tu1$POJ<(uacdFMPPP1S|EJ8oiyq1Inz#kR@b|6)2N$~(J%z;c6M?7J z201bbiMVAtQRh3RnG!zl+YB5c4V-?4*f0Kky*#nV@^=gzLSo?lfp!*Z41}^a&|h0$ zs5~_~me-Rz+L7ggPFzaVO4ezFbHAl+$*&k5Y+|04 zVl(%`KP9V8bf~EjpM1#{WVi0feE0eD=SasoYs*JQe1FwPGchbS{MRQ->aE~yMYC@s znA_JyoS2)g5$}A=wqFEL%8-&Ir0VkwiV&qZDZ}ufOX>s%1s{3RA(eTrOv}>zgtbL$ z@|1FOOU?0{LKZp7rrm`fx@?b42~949vUn`KeP#0_rqHbFA^LFJ-i&nP_H=I_SSCiT7%eImbicxV`eO@5kCR zKI-qiS^cu=0lC2j!y$BYtTS((@N>7MxubQ@F|w%E-^_X?o`!bJdV2EW?^vr-!7AYS z*P0$`(J^ezRu1NOa?~kuwwv(LD{*%VKVcKE-7G)XZfsb)D{OH1TkaR4A}X`zgH1wh z`0@HpEZmmc`7fnI=zS$jvYB+_};~vky z^;b?j#+5DfQgSNHxy5{61^`RP>`=4YcS$QPh4Ho3WfdG0Du=mT2EfhlI&pmx*GU_} zc#=v1EXq}X^F(PAuOgRuJph=Z*`|x!n}&OE1kEW-t#Io5a+j#6tA~g|ll>miGmo75 zcA*2f<%ShTGjG+5ju%Yi>So>61sYJ4;x)?AKYXRo@m|6D+VU8In7S`c?k#`G1{k67 zYC*2q>PoBcpKHb}w5A$Z@VGC;P{umjtYg~7rkQsin^a4`_oL#~!Hcg#v!A?F5Z=<0 zZ5Hg$Ek`b1nZ6her&YyJWtO3pVg6}*F@ASO5Ha`7j_aU zIu4h1u_4J-isn>HPk55#Od0;Udi&_@uY(-nfwI$&?S`5@Uw9>AHq`X4pw#8_uet;H z!&;((>`eq_AM&M}^-*0P^>Cfzoq6?o*{0W7WG6jIDf+LC9r%^p12(p6N&6sqgq|hS zBm9C_3vEBqJr=BXC?Zn&UaabokfSD-T?S)(N5d>qr<4TWmXy zlNXM|Nv6Eod(vJ9Xu*nfu4PBl_wV0L&$3l9ymUMLA2{(o1w4Mc(3>$rJ=4DJY8>&X z;=FF>nnx@=;~Z}=3xMSVZkH_DQmx>8n0vNn{=B(4QSx)HRp%(B%_uM0eW^;oR>P4v z#~ya=lABpt$(nP);;--&wHU>CuVzgm;93(@d8{q) zb7&wx^Y)vNc&omsxBFps?2i#IItI&%XS?RoJXUMY1#p>R%t!>qF=4Udg&Wb*$@+PB zx~H+HO#TL3^^3e+M=M{q=UuMn6Sn*2ky*Nl26 z`#ws!uQ<9a4JBB07dquag&yfHDwq!!HON245kl*l0Mxo-ZTfXdd&&B$bxrv3v|E5` z0xZJXsS%X%u1@pTmS9oF&0XF0IZADnk*TpAufb|@#2MLZ>d*nmWgV(q{S-^VXd$3^94Oll;q-) z!C1f8`&UP8r|BV+ZfM06E;SHVy_{R_kh7}@o!p5>IZ9r_)c4T9=@<43|N(LK>aj--n>P9AH*?R+5~1Z6fYDpmd6)4eC|e}X9gfy-iG z;m)RvwCVSi`=ppQ#y*4#{P||X@$gQrLpd+x_=C5~z3myshN7KBZSm#7w}G+|z}5+J zlokzy3b?Q|1xMB8NgN0gGm!D~^UH$n$ED{wC5CkK9wBaZaUfdMsMyu;)J5^zSgdB91@x#BYaquC}4m=i;>`3k@CO(<+l_w^p%&(IXnz zN^j3@0hjr43I?LkrSrwXi-oz43@i9hZyXGwBZ0@NxZQ9SLC^;bn?`UY)=ejrJ0k`8!12 ztoWZ_-vkFqb%e>X{nti%wX-Yngnzu<^83s~=R+gqTyl%%$QDx0v!*p6!mrnp0ghW^ zPBYZJILLH~F#?Aj+X~?|p@6iMR!g%(QmzYkCzLzio@a@ceRhGIb!3`;!^z&z0>}!L zo83Zf=CP}al6>unSKbyT3<3vD20q$a;?LkV67rQBJ^b`TQ!wWT6fFVygU@VekmkZWWyUnxlMwYPk1_Mja&zG zFr45r zi}&@5oO6(#x%%ZHW9UTIX0pG@kDS|(N?wC(cyB4^01lU@$dA1ng+e42dAB7Upkb=r zan%(S{(JP{Yoac5qXQr84FgKk(7>Wm~jpAF?l$R}p zH(mi&1W>eMy3u45&IG$x7FTigmq?s5bx6u0lxm|l?m{4YXc&Xj54V{y^050M&cPO%JeZD-S6CUdYDMlqx zK~ka(x&j*j;R-(h!YzhpUDU2X>4WD|T`@3}T(f3=K=KEN(#O7i$N~~lv)mBx>R*WR z>Ss#4p9^`Qm3dP+`tYTYoaT6ynWy8B;QStEWw-yAs}ow7K1@5vy=4zTysDa=0R4#!^vfE)DBn2V%cWgk!>aTg+m&xO zHk1%OHy_E*+$duUe$Bde)R5((ty1uN1+*=5jv}a ziuar=ULMbS#JneJR;2iBZ#iRP(K#sTMFL|$#YD~ra2a;3c9=wW0LF`dy`0Xm0a{oe zOT#-qK6zl26*&N``R0|Cnflfe508oE57s^MfLoDWt~0gqQFswX;28!71fdDZxzk1d zsB3VzA?`X`y%IF%Scc#j zDj2LldX9ekU}FVL*I>T$8DGCe#Qv_!&q1CSQ}ww2>YA~m zjF8=R8Mot*6T8G{iO?e!o;w(G4rdZkyB?|SHDQt#m!3r{ob&Z0?;dnZ%H3mbfp^bl z*vf_USD-l7gdP@=*}C85$$y^}OMv%{$LtrTGVsUrv^hsZkD0wtiIgD`O>jw$KSQ+j zp6@$*`P&8V&C6Btyu4nZdTMZ3B~B!Z2_}9n`}AbHT=Ln?8?)`kX*epWfTM5EAV8{! z-7dPi+_RnwVzR!v`U}ZB-`WwDHN05mjXi+LHYZi<x%y`8$Z*vR3u{ zef{Y6jX@BX3Z-kq$W@2Fq z<_smeHsVBwi#Dyn!|cJw>^itDL>0 zMXfNb++HA^bo{Cfx2k$^Fc(;buzqf`YVH|}2a{YZxnW1VxK7+(`26t5t?E#rGU!rG z&D&yx^!|!d#$vei$v}zy$W29wZ%(Syh{_H5VDm7cPl2jBkHn;%6elJcWyDxjV5_f+hF*3pR+Lok6HxX-`IFYIk) zzy)5A=){EtLsy}6iU4jRos7mQz18r)Bz13Li1%;CHD8drAIhrc2w))-F6)$;G_BTW z86t#RSq`PS7UaeHB}|eiY2NY$YV@evOH$Nn(UZr9x?)cIq9o33^7=0M28Cf+J1yK3tx$Scq)MF#P=OLkKFun4fs$7 zXs2ZlH@+n;s1eMQWw%U`6cS+^?J4Q$_E>kjc;UjdrD`rdJ9UZmboa z?S6hEu#P_ny0K;#X%N!lh4gTwJJ56qf8uD*{7bhuj9#GM^Yuq{BJ~!sg&w}{t-a@! z5U!e>XKj7&-aUz<_M3K#&>HBdg+D-)+q0W;CcZ#m)`m)av>zjLB$pl8y%E@?`LI|Q zZ*OaA$%|FNS~35si?j$+O9CZ3er{8k;4`$BDaV)B;?X&#jpdzt1o1c?G7?izWn%4= zMvreyyoj@&o^vADmEB05^WH~mrRe<(egM3B6k^X(Gf;h0ACw1DuXdjiHiiT}^`$&75EIDXfBbmE zB{I#j$0c+NZO*8YlG0&=hid2)ZegwD!$kwS)Yts^jn@&5wd`d4Ge&ATfTRGeqZ4?y zYckLUNy_+51ztfEL>6T&Yj68N2404)S#I+KYbsTZJ&nzC7T3-E zByd^X?mEPl2(+k;88f2?QB|z)-j6sAsedAxa(u0ObF>nG=lgZ5BZhSve|@f}aDi_J zuVoOa@wU#^4pOB5p5t{Ni7*1xg#`5G>ddXJwCn%E=G?Gpobf2{hM-u3 zvQ%@DmU@W8^!B6e*P&0Gi^{^7LWSIAXMD|u5n(^`YUJt{w%aUI2hVxLKO)on86R;wIFe{w4J@dMHa?u-PNb9w%RIqLuKk= zdPa?7DmUkCM-E)R_}NzEz#~ed*-t*eB>xd4(STgYO zZt^vtq4H*-<^Kml2LM9f4LG-N(iJ;bE4H)Zm&p7HV%%l`rbpNL6a zgrR7$Ri{)Z^l+@c)u9`X`mq_@lu-GVE!^;d9*Y6)5%66wm)$Gt*syKew!-Se`RB^I z_XW9n!O&9&D&H!4&+$9iAja+H$gZ0Iu~CFL^8h~5F|LDi)W)?Gtr&a|H1c{n@`Vr~ zT}ZSB1>STN0goux+707f1#La^UVBC8HY$(jfb<*BmL9c45`Qa z#GH;_ivVsigXiR;hk+*yveyYLDS9pq-+2?yhu{0Js(vD1mvr_@|DZNikLBUc44)!$ zGE7ZP0R>D=+-izrlLeZN5G`$GKqwM26F^3aJFe$5M)s8&|}N_6BL zCZjn)b;8U30M|X0`;;nyXvc%5P~1~o7pGqVnNVuadHju`gB>)k(`01i3cy?B&6V|7 z_Ub7627DXu6J^DyZ{5QJ>oWuyFj^&}7Q^OHx?O737RI2vGZ z+5vs8F@#Z8>{&nO!wEvwS)yUGR-YcT&2_s>=z5{h$h=mIs3i(qG|Cqr)2kg)*>@G% zG-HqmC)~XAF1M3q%vUSZZBk4Jxc$A+cGvNe03Q&4mS%Xj?lD6`7oEW^Cj}2|<`2l!#kz&-rI=J9t3{nw1nF2HfAw zGZTCCS_0>*h10Un`f+vu^Rt2xu3_X!%l5-*@g#e(qFHsKPDbpZoy={qSUrxS>;$E5 zHpZOkhR=*}R%OX)f2jqmTuAnFT|9?;RC+zE+i^RWmDUvfq8%w@nnAW&+LSvQZK7-S zvNO-LYcInbRG&Q=`fF$zZe*F%=USJnw(4_!bLeq1#1XH}*!mIF!jGAK5BZF_#qm(g zu&_N_%qJ&UKs6WGOj8?W@*|7OPje6yFXs`CEx%b}uy{sJEgu5n6_@CLI&4hmVVd;K zck2wc&%L>`+1yLpH#Rluf*|uN9*&}F|E%1!KI)hS1gB5pvFfx|+2RC#Twr6tczz8z z*UEy&Vo3mvc z#z#Z439h*12cXe8GSO4+ra(2y!!Q=D^`~WC2{gp%3(=1?z4ObH+6faksO9wn+cj`j zd3U;2Vhr=l_P!L-S($BCU0q1}?46rXOsE=4bD3_xG3B!NW4RC0asx-5y22j62~@=f z(7)-GRWGxxzws!Q#naO7Q|pC>nY)-^G`c$9Gg0=7NxZN(Bo|}B<2=ZR&~@KPlTJ4u zr(w8`Y`w}hBM+Y6Vxn^?ATN+%KrMT?HCOXQGVh{oS-N}l zi&f>JnflwDW+mE>Ma4z)v1NZ}#x~GobSi@zsdeQ{hG%)T2Hiy;n|BvNGP&iPVIoLL z)=o2&vK>@&nXVFW%M@2xv5Pm%)O&51Hhfjo*$}F^>>!E$=0x#ws&71ImA%{iWRgK6 z!A>>wa2cJRRS8V$=038VT9D2$5(v~*Ao6=@oZRGA3j!g~r9eS!Q_E2VxH}2@ zE!NuClbxSe_km211tLy|%NGEfMQw!y!?ejG*$wm4YA#&>wwC3!<%zLM9*@~x zoG4FAbqo+AEhzQTI-~uO>f&7r4>lpANflh>?$<&-P?60)`|UWjm~>^e$a_q!q#G*L z(oLr;a=i9Ph5NT9aX*N~E(>p6qc#DoRj9B7mY^;P+?fyPcawYVreWRHAUYazm(ouW zOazimNMaZRz07KMuyJm_WW5WK6+t>bxtO$8)S6J4*IQOb`a17!t_7YPZr_Q&t_J78S|p z^jNHrC$79bxA+6*pL5E!?3(6Rq={NAVY8QT*)an|$^%6MpV|^qLYwxsv+K`abKma* zDTRIPL;+Rk`)As2vK`Mb*70zGYDU4HVSMO)$mgB_g9}@RO;Yyu-EUDi(Ivw`jU8RT zQneV4_qb-QU+5bMl`y{S7PhV*^R6{vxTzUkOsE)ZNea#Zn+%mG4uf*4S^!s~)~Ttp z1d{?6ynClro-S~>%h_s-6dbcp00M%bs(jMlBgiYJl^kkdQ!8nB;v^`iF3{b(mLK!U z9X2CMvB2#Kh z7DCO8;q`|pz5Q4%IUN=d70Gg6K_WJ|_nAX0MiI>-C^^+umBw78WEg{;xuo#UITuEy zT}Ej^bI|Tz4pFmdj6FLy*J1H%mlXx5F|X(gs!90kqNm; zlv72N^Y9@SGcW;NUtbwGWd-E2TkJXrnBC(F*NY5o#&X)oM(a!3X#`Rz=mSNSsY@My zu~-^t&&wR)T!T(O(!TJ%T4GWIE+i3S zqsn}ZqfY3(W&w@QJKz%uxTgGBXE~$Xr8QUoJgd&zh?J+D4%4U4h{Qqoy4w4IWi+SF za7MWhdbC?<#Y=>unYxqRxi`R|(`~HPnFd-?*Zmq%WGrX>@$N*?{E|hBu(m>~VjGl( zrF!-ofIeM!iYQu$hp5Z(hd)${gL|?$F~1n1{?q;24Q#V@84d6;Vb{AHl@Q@ra!`E#k`16 zjx)MS$*zTtay|yfk(~RERd@N_l-V*Niez(a3djzzP|nMFlJk^&*=u)hy<MG5y)drxIikw6}fz5KCS~mSeK-z z(U+V%HC4&e3BCScNGahF?rySiyr<-`=N&7HGpqAtL*V>wSIr&pr8pLJ1}M@;(OH*?MmnQIh{c_x@kumy!r#>k`2@I3sqSFlacvM* zC%wW#{3_$#5bUZhlqKq+1drix#MXnyeZnx7I2?591{PovLP!dCe0+;$r#dc+%;5?E zSUiTPEkH-)UgFe}?a|~{6^%I-+AOyuwf0{02{@ugN=E7WG>aEbdTX-@x9pWPe+hiC zqB3dfp`v!q12xk$Oy5A1OC6m-ifT+&Me116K4~SUF=lVuW4ks6?K*JOpOI9EVdX%@ za56xDF*mG)Q;M=qYq`sEm4w$wxSWG{9_TKhBX_q!^H4Z#`!!U;@DLYf4=EE5;|;@I zL>f&}Y79XW9gR#9S+&w$`?n^PYI%(Up#E5g{yKr7)O>$K>{%uvFdamTq|-!44P*-L zT!V}nE1K_~xo{Po4y-bdLsYMYPTMCn=4>A!D($XTOsd{q)2|qUWYk6RqLxmvOA*qI z0!vL69MWycy3MHt<1!WvcrX9GCtHo)(!7UyA$7MGnAK$Q){bP|qa(-wBS}Lb zdnj>rH(__{I-PR_xo)31Lq)(wEn~yhz~kMtXbsFLyL!^_5%%$+p2qirXK9;VsCL~)6#C)`_j97a zd%7UQ9z`hEv_e6a*k86{&Sv&k2q`jujrYuhv9SG2?Qs`rm;4{GQd`LmKB?w7R>C7v zK0CL=U_;r1A22y{_9R}oF-DXF=n+{nq9^s9CMSSeZ(ndAfa_Zv6HE-EhsQIP2y18t&RQ_uU=5jHx1|< z*@iTI4Jb^!=2+6Ol3Uv3<-p*NL{oY-USfCJK!M((>T-w!H*BQeP`V)RyFey`Jh_-Eh;G zc3_HCuP+DQul|vkI=$PsXDt8GIxii_YX6Y^wbCGjdC@Q?HmS>Lk*skc!Gu^j{m-{( z9_?M=OO#Xq(SAj04fL!v^Y7^z3~6`OIpV@bEum(BAoNHhdfR&s;Ruxr7c$?T4@AZh zvJqRq@$RBdul}e>5+Ya@@=p1MEqmdoKbE4!sR-e5pWofi)keQ77k)L!`2`vuhU{Q| zl}wNX^nvTtOS8Q~>B$*QJOFn=XQ9(fviiFVCa&(bH7h0)r)g8f&Y7woGf2Gl-TG`;a1^=S9CrhA96@K>OuV$i*ulm296#h4tycjvu!gJa7pKH%HYhK~9o*(bZ zz5nw0F{ECH$f&|iGpQuP3`7*X<|nUcj04qvhRJZsvlDQgVXr)ny@&4sIYNtcU#WsW)CsW^>!-VlvmDoGRK% zE%4ZnKb9CTQ`QC(7N2*^^Ao`Kau>NUFjAcVz-DU_U2No3xZ55fGfJ@%LtK{~o|$d+ z31(uojY{zKHjq0^F{N;cE=6+dy(W7mDbD*J@17hc z3*iW%b$OD}UgKvpye=v#dILSFIjwJ{I%2Xp8sxl%(T*dC^n}HrZEtMLk37#8dHI~} zHccQX?5R%@&WCw=&tHy|S$r~76+&l1R*h8ab{dFu-eEo9N z1Ly32b_y8Jgn6RWa0H=eq!S(AKl{l&!wGxC9)=^9=p?~jEwqy~5TbTcDiV5Lu@-GD z+SIxqvo5tMG4)562+>2XS_7pq7s&8B7d&y`pQ&f@fLjZfB~6I{Il>j=@j{@Z`ydF(2v zrZ`BxK6bT(M8$gG@d!b4yP*f&1uIeOD&Xrwp@kNc7iy0QGr4L$V^uQ@f~?FrrW!oD z6hEN26FApxOK%i4e6sbTi-*7I^raXQ(_-r7&EMe1fXb6#)cVUSx%|dhxc3dYS+_wh z^}hS2JULtC96-yIKLPFIm@s{-d{U$q2xuw;kznwIb&|AO$}Y4oo4Z3rsd__PtY@mw z2&ZG@)xn#wMkVfZ{lTj7-#K+SP|x?{2}~5-R^?*76N#~2|G_jb1+96AcunKFCAdc1 zpjNwc$77{hv{bIYD{oZ$NvvyBbi2l3KU^p86CRd$FATXVrgEr5-ap)RC6KQRj=&dJ*40-snoO_20)fMh0 zUeFo{1MlrLrVoy4X$H(fgM9aa;1&g1J=u29bqjrt$w`@kc^4FUX6DCl@9_>Dtib7f z^mzeEnO#jB*Em7m#aC~MRI+IF*-Q~@R)%pK1Tq!COBXi_MzowUT#}6$Zbz&Ipoi=l|kRZ#h zX`ycDHrGU7WH;QBVNN1AwR0WmwHSseX?)Z{3n~J&Ig23KAhZJU__{l3)jO@kc+2#LTUYody9kf<)JbZIhR?+K0Z`&DgXMYoLhz3I5i{fpDt1KL+9^asK zQ&Y3^y;Go(xU>YbzN&2Ev7L!5Ub zWe3kZMSI~H2Os#zDQqPNe! z;XBs&ntylei4b@99=Cbjgv*gDvKPmgXHV>J*~gnRFRtE}fY&nCu95ahhD?|q*1?f3 zN6B_9PY*}ff~sR^CUVNBb15Gj$S^aCKt{*J=N$HVX3Z4{o|RcAPTYwLkb5!b=PC6f zVAC>HDh(2$jXZV;Puh|M41iLsymSE@E+QYreh{@3lmYIn^+|k(jg!auDlg;Xe>NU^ zy)lEaxnico=@|$Xil;r?Uw2|L;S|TdVc83~SZK^e%%_EM?%nRoV}?^s3J%iV$K$a$ z2VwJlM^qH!33^?%2laCvs#!2nh#?8`!LVZ17@cqty>Rt~36W!N!kmF|lDf zOhxrSS+_St`Rz`lQZ>c`?A5Cghq9YU_aZ%qFaRWtqur+KN8a(ok zw?f*87M%q{-1*`p%qCyvJ-_7Ca|}V{2*vTNBZB@%U7VY_7?V#BVe*oa(q!f0_<2^N zv>8>EZT$NwJvi1*0!}#ka57-G)U{F?7IU1g{)%JYGqS=<*Z>no98bTtzfp;n?~<_} zH3eg3_9o`tIv`hUHuv~Zd%tmN?RLpZ#l#}VZy;nlgeUg(xG$$55?wdb5MoyxfzI+% zGLh{=GmM-Lu{JzpKHly47L4Q_Yjb%eiz&BduKc&hbVC>0N912pkEtm2qALnffMLlc z7L^oQPKbc`|j$|M)nK6B#k*Pd9o2!q{o66`Ti+YtXNuH{x~_O z23gA!-NLUDb`z#4GZ%Exu3@7Wt+pUTl^7#rpX1w;C%ERdTKtpG{VALbus1aCo)hh{ z_MtXPuG0lI!X*X%B122Od@iOD4DT0N?%BQ8feM?flX0V)n{RAhYB~>C>|*6(ImGkB zD*eDSwHr25+nwjf$AW1GU)_~l6Zj%SxARoMZrZM>gwBdtZt#Jrq?Z0W;rR5_(_5Dk zgd_cgsmWN8Do4|>iTPrjnpR+qc*!@I`2FZHse0j4PkY_*!50IY2kams)a!dJ^f4gt zpe;@Gygg7CY1qX@(9=3Q1~Y{bT+Xls@kc`a-Hh&P0EsQuhEh~BS&%ad*leZY(Y#^v zv3IWxhXrVo^3bghOMP~z*?j?nz@?DQ`RGe|;~D?f#tdFOcfzm%q4PUOzo=gJm->0T zI-|Gem|^e{hFFA3%=h6aQMWdiK%j7Bci*eritlUo?a%fy8Fn@$4OnmS=K?Sq^w`0noQEyQX8$Q*;A!l@2mvm`fA=1cm?m&@s zWQU*aLUh?6&(w0n37D2X!cy$qI6SwTUlY@y5RefAAy6(Zo#>1u@nOvV?fkuXnMPqb zTOe!+q$-u`QJgtP6R&>AR28v9oQ}{d`%7(VoDpjnpssT05E1m>x<~43xATZ7KwQrg zI)7>pChy%$+D%FV?<193jBRSC(%(^gU$@V$}v`Rkj3UgIAkG@pQFt z5u*Y`xI}WjHRdPBC!utbjr{tG4Pxy^M0%vguRD3}ynK=!MQ+ZV{B8R+-KMs_57z+h zzBc|#p}kp!!}ei1R)=|1Dg)eB(>#T49|u+0t<5&P4F&-tN4Krd?{fX8Jz+eP=Q+;V z6v1X3SkEJI=bJQmSYN_OvAX)T_{5Hc0*8t5;5DH{4ZckmC+S-}q(F{GAe^X?5I~ve z=>f*n=kOe<1;lq8?Xb1q?$QdW(HKqK+~@tRLJZli>fL+i9MuH}Yxqm2Q-o8)(3;zQ zhL&N*|M^o$tBy>a0uS+a)?Vqmc(>)TJVW$ayK$f{OjI7oTmIWdN$kVhL;`ko$S?M9 zcNzaMUqcjijzSabrpeF_2eWUErQ_KHaSR<6NG2w*UP&KX6o@q`ZlgNVAxy~_o&1}$R zw+owZ*B{vVEqWv?LQi)vD}0H009Xq*+p+|s%xv%h=uCb8OsD(!wBLi? z;>umSAi7yKQ0CTn{hPKvxIEwWafgyDumV@Sq!R{NoN{m>!)qZWurqQM z(cSpHnSgM<^&L8ScOpSfcA2X^-s9L1e?c!Y!D64F3fDV=l3wtX{gnlPUkxIT_)5EF zZkSYa8y4EFm|J$A_K}8h@UdDFwFV|Kzt~M{6%@d4j8K~!I#@RXSa)veX5P#IpZgN7 z4g4PZP=1Vcd}7Y~c{*ndNu5%86*ElRHC)+$t?~YByXqgINIKF}Q8MS)`>BdO$P$`+ zR4SpP^G-o+Dr^EaN3e-7Mu_0ovO#mtg#i-+CS%03;>4&UxE49J+Y{MfOr!Q=yC zkzslUFJBxegvFl|m%$TZIoJ?`bj!|@9xwsM;u{}GvHCt&pcWS;tTv9LB>=qECBuR5 zQJo-p)Bma#Ixpi0$2kec{tjfeZN&`QD`!cUDhK_)wiN8K%@7EmI(NFlR#vyQmc!;l z5irb%U!4l{m@VjRv3P~{>Sr7js{(^!2ZC*3Zxw6;29HK9HHpFU80@x=1p6g*MUKn+ z{o<~m&#rPhKgmVy0TNjocy1b8F@Oz4#9%Ww-ZfJ61f-t+J}b>F-jUBv(00X=3LLC8 zJfw1@-uFG)v*N~8D-bH-z`E=3ADmqsn6C-hSahML-zb?f_yeo zp}=VX_Ve0Jgj2sR$+K3S7n7QYR?G?+HlF&>60)vuT|_dGh+ns~EaurM`TG+!ir}A~ zGqB}8e9cq)gpeAt>FTN|fX=z<8u07wo;5e^_eQXqL4*=j^17u>r!1LlHt3RbwqZiD zL+4m=8>8vRb#7x8>Ihl=3YO0jutIzPWw1?{EVf*VhAwSx*A0z(8p#;18BR6*2M*m% zm1%P$b1C4l$Yc!+!k9)EQ%+?0jSOz7qb&TA$~j$kJOF!>ut%F;e~!3%MH(5UPt)>$WAGjO zGt9w0rAMBlg7}x*7W#bMx!-P9wF`KgmS5nH_G)2QQwSj3#3}28$E!Kc7TH}%JGHj( z);AmEhp7S~4Vwb9A~6&>oAdT6RV2C2rgrfGqKq`XyQmU;_)s>kM;M)le3okf0Pak^ zu&eK?4SnrxRXNJbr1FRVR=~%bXwk-qQ56;&OAVce4xXyN)0vx6v@%_zRqX17*&o}- zdu~de5Ub^Ym$Yne@VkF3?ScG^C>U17w+zZ9GSN5QHvt%8FWJ>Wo#A-(aHO#P_r z1c}cI&_MorXlnM66J^!~Q=Qoy4a--8+o=wyqZhaDO&mTW@LJO`Aw|yOJk@atTj6XC z^eTu#STDc)73QEnNPg78KCCD%d!D-}_)2DX^1%pHn8%sxqllr+kRd0px&L~;vC3K2 zMs++;yE+H9PO1%1=_qX^`!87TN{b~NZ;V$7Uoj`}Z_$`>!*4%}vm4~pVwOYmdJx?P(I{f>(;y$?LKqY>5}B)c~!Qw_kp`_N$H@7KO0 z`F0d&FXE+;au~nboT3f7px+m_+X>#H={5tGC;V67*%FJR6|gHN?{y9u)G+8*j5qrD7I}z9S81RiAcT zL8ixmIDS4_$PB#1;|eKy-^|nvyOfj8@>xNjTkmn&EPE4*e9Ze+4mFIQ^WojI;Gr_J zz|y=4r^xxFvmAb~0{D24nE0IUi(%X=PrcoPbkD@L2?jw>Bz3_?-`d)x6svUZKa~v2 zn?m10?p5_z3eGcE?{SS(UH>WWG4;gp%@4km7G3KL=%xf!YgiMQnt#Xt3l~!dLyy(* z9LV;ifwGsWGm5g&o^y(VSR=pmXXBGd zW@3()(5->m@B!^{$?INs&|n1T4EdHZBXtc~#R2v5Oo}JJ;qGImL2}by_PZ0`3$Y=t zGz$_Tq<)b#ht9Z??hxW?)dy8#{vEF1WG!t>+R|u3z2~l@x1YVgx4q%qH@@q>R2&2{ zPMfl{6&WI%%xbFY;UtDxj;)$DD@NE|00+OVL@%PJ_&V1S%HyCHfr4}L)V+RtY`5?q zAr&wkj6LayIkMYzM&+dAB;Hpac|4BlmUu-*PWpW~ET0ubqDn|MtSzM@QrN-QS8p7= z@bdBdeVb8aCAS?JM%4F862cSXmQE*gGMrkOe86A=4)LkZ8p`?b&P?@?@Q~FKPx%I< zOs;vIn^5T5o!Gh?7Uliu^3nf2Z0WHkEhS0iNGc1<{aaX6}qP%;^Y+-@r2$5OYoLy&vaJ(aPEm1|cen{SW`+=PcKWp)yKa zkQFcF?dk;Yf<2xk+t7`IOOuto+v`b6HR#melUSg^enB0%Ha zrAlM>Jwb~B){1kNCrWx$H#NHowEvk~f<765l6kHB;r=_ofEs?)s8NS-cp}7Cco@Nn zXxAqNo^@=mUi+NI4tz(Rxm8_oO*eBtzv!4tzAx+6gBc=4F1xwhs^Q|-Wr>uWe_|HgflA#?1OPNRFA>?rc z0<<#STYi@gu-}8Q)otiHbvAnEC5IU332LBmxH2HL>jcIDL!pmFne#lo?^M+#>g!d z|2*c3^&Tp$L38^dd2BUctiT5|^|^tBAEz z0zr?0s{?KDg7$0kdI7wO$YA?T>fv zuMY7;Jxb@=B^5?SEkz`p6e5}E&YXj%%zRznTURx~XsWR+m?cj=s>E$-JBgiFZKsp$ z8K42pg)+FvV@kpJztoj`i;=9b*Ww!mnvtv;Ek6hSK-9uy?7Sva#8D6`r=JX&TCo~i zZ+Z0l**n;mn-gOv!PYykUxHIkEbz3g?ARYD`#t#3(6>jh-@!Ool>Ge(%-+cW&#p~} zYx!bMC}Y?KSzqxPa=l8)yz)C?_bFlXvIlchpuRhEfGr%>Zqg7@b-DRmLu#fl8$bR+ zFif92OV%jj6DNoSOLT_s*1egKCY_>r-@d!~Z(iCzTAFe44@gc5)B0rec&ry8Bn5bx z(ePHj97#c=;+3?hnp3Qd-k$s=7VYUVIiQQ*|Mhj~tPJu z^i0J+5;8juHmtn-L1Jn=!5o|ArY^yH#aCpX{W%^zShyOIGog5$3$8u%n8d(^&QU$J znvJYmE$=!1X=~Q@Kc5F#U$WhL)M@&r3y3DL3T)T?3l?S#S~HB#kH8a#rj3r}9v`V%l zTV!8a?2=TrWLH9okjPTP|9(H=dA^_D{~V|Dob#NSc`vWm{kre#zOL&o3kZ2+dUuxm zZZ!mU@JSHM1M~G+I+b90b^4GyBeKx(M$|uPEr8sO)dh81{K?8EUzvP?my>T>ADZy= z+w`eKG~k!c2qDn`lXXMUv~6D*Jke@p#Mk8EQ4ZBfhIg~Y=6@eQe4RN8h`jS&O9<_k z;kE0>%#I`P!=XW(`T0}{>m{QD^j~GuEx2!rn|Fa&4zhsREECdk(1J;R&9p0%CpY_H zSO4k7#|xwA`w-tOMCOYqiV?9MXhF&miWtr+6o!21svSx9PrgUF`lMT09VnfE zG+V+TRq4Y#k>!1k{7sjV8&eXGmK$Z;l^meh`tfM$KIo=Azd!q~@(d0g#Gu2aB>3bu z<2N&l=sB}Kp}H|?ceBl#iAU+{jn9w|y#v1hHMg+tCP!*#OjI^(e`6`x_`{h z19~>kRSQ%EvKG||_j(%gdH;Ep`>4A`PnY^nUHbuIbfud|_4R+UoJ`4Q?$Z8jr#z8z zawU(Y9Fp;SDn-y8Z#L+;Ad@Zj4I~?jk7j{-DM9+Tyv=0DF`5HVqBP{s81IJx!bCNa zEYR}!*FCOYvS{enRV7Z_ zj)bTCx{wyol~+9sb}fH4cC4uyrV~g9YHigBdoul)|A9#mmltY}-vZXf_FCU{9?>>Jq*Hl|;+4_(UB7HmbSNayOP7*gSF@w$ce1C$c?_Jf9nvYYD-{dRXZ-NRL66I(DPD4k-zl~KT2~M{(7=QI!jhr_$QU~Z z#{@^JsWQkF1y+Z@Uv>gzl*?%e&I88HEL@aq4&yBejqubQVJFK#f*!+xC@Vod_^_Wq z=lm}A_FuCNv>ue=kRevnb^=c(c)kcKu)yzz!}A$MaYRXZ9V=5os5k#{eE{Bk+Ap?+ z4!XOc7S`q=kPHIj=;GyR7dLFOLpvfrf%S`dN))aS287xJ@*5td$p#-tteyk^duHIW z+TpJ>u}macy06JL9Fhnqa${mL?kR{tE#V^nY}_b2{9Pnn`5o@l#NSYv%%85>L+@aE zOc88bKs;B8WXfW$2Xp%%i*2Br+3VQ3WB>pdlLQqF;~bMD{U;V?W~HFs@2o>#fJP(c zx*ufndD#zUC`xPpeq&P=vB7Nd{RXVM6UZRx*m!~km3H~#9zn*9}vG#Ah75FS#i zsuGd}Ah9@htmm)xoCQU1v+((~6Xd4la3J2Z-~pvC%P&Ce13x|n31L&X)(M(iCNV@Y z4Yb$ZM;WFP1%#8QUL(yp|JubK%V2eYR`R61wNLC+8vEy+67W~h2kIvgPHm;zx6up0 zMU@%^W~*xLRlc29qU+455ga1vgu$({DUC;UCw<~`hRt0$#zAnt=ryFZ?;Zu1T@IB6RG3P%;SUZz_?B18PGr+exhd4-l*`Rm!)n}`! z1k-@or{5&M^%Xy;EIv_A_iy{=7>fSP!TuS@v(M$BP5LsY`VmM>QvJ~!0opzQ%+fpa zFh>+4P|HV;O1EA~#KH6yP!`@la;9_M_}1q4_wi?%5lnASDL=YRdpH+y_Cd@_5x8CC zd6E|&$dE<$u^K(6JPqgBZvxOZ*jFL(n_&uB2}58LP;r$$-Hix5)6gRzOR7-({`}oP zrU$ag1E*Two$<gN_EWy6sZrJAR-$ZNUp z2>)1LF~{b5^z!`2DG1`bKiEGk=jS1!HH$)wV6yU|Q>WR2NQgl&q+QQ-O0^Y3)`QNm zlz^bXgjpxcr{plG-)^5;N4v4X;n`BdvBTuYpC%RqfB#2%6zbLelG7hb4sK^Bi)fR@?3j0m@@~1q zgz#vA=U8Yr_$n2q9;+@W7Dk7Pq|4F58B~m@cqXWZqp~mU?ViP#tMpo5_!H7?6RPu) zycw+uiFq;y=^E43!rY<9_#x(`NIin5A&Sw!-aKV>*EKb!@6G~#-|p+xbX#6*2&YqV zE8eGjLCC6dX)n=~AsTUIeLsEnjC=LF8_KcBqVf6k;?QP zf|u=$3w_1kyT z?m`>YcMj0GSFwC7>{ta!8rVs%TglbC9S6dj`QjC$w1da46#M_M|1S0@&oRYpQu7GY z+mrb^2X5h95L z42%}ZYEJd$Wp^TeXKcx^R`q0$wxWNMG=lG+89>}}inBt*^54_qsK@i-O|^WW1}=c+ zJx{Ca{Qd#`%r{9+TTt*%j6Mihfz~wP=RWyZB83;~yjG;JKKG4PS-{Q!hp8n7wAVmk zy9BR)P$Z(q`i84pcKlKh$3ExwQ}MljzsQT7lA4$|WFl1HNC91(CkVg+B#^bYjTXbv zA0cf|+f@d8W_=Y-g09~q;*Woq|5gRJr4Q`lX0~u579p978`ZHEu-aBRC&V`h{PP!Z zayjjiIpcLoz5mR}JnQDBBD+!!D<6Q8r1pmHja`!szMwR(t|rxhjEc~THZgnfjJI}m zR&gUAtVn4#l(L@*o341Pz(SpP@Y3xs4M<*x(llu5wk<B8zfieuSJk>1II0aC?_^ zwVlxdn*HBz<=GmtSiG<=%Ar4VX;LFX#d#WF2B7-dfokDLY7OYK51B@OT|O**{Z>G} z@9a?j(~C0!85VNvGi^U+UcLUiCkXgf>I>_cznHq+Z2UMU|LAfQf^d3P3k4#oxK|6U zqN@4Rm(iGJ-GR*@#{zP0j&@j;LNc=l3}%RJN?bDN`%w0yVVx@S*MjB zd7UH`w5hUnuS#_RX5WJu!*C0o_^*ie%G~SV}H*zkVFp~Y={NhD~ zU7@kx4SI|BVqne81EAVZdWVSO^CxPvp`s34%nx;;?w)#Y`SgNIh;vE#@5OJcv^&dh z{0j_2NcYN#@1v2?Z8N8)3Js0MaK<}Z;HdIlu2x++Smb7AXlQ;)`mq8& zCKRQOR>KNnXavJ_Z*!qp4SVL+Hio`mJ}5U9K6^hqyV1D!{X6C1iSFE|?URo_E_558 zdl*XC#r~Z?L|iQWdTmr0H53f8;Zx8JyL31VKTx8~M4xfsSJ0~sF#Q(M7`Z}we4v3w z(_1kiPZzC8*!+vWOW{(Q=&NbPM!(Zys5G6)sbaQT` zem3)kVc=Xn?W(inUW%M+6e)-GaCIh?Qg5$1@W;~NIQ*!;M^HcI4^+oCtg^BvBZerI zJL(UTOXTgmU&eto%5X8Ja9Za+HSV2$4*eV_s5w(gwhn!s8(J%uhn?paMsn~zu)NTcPmt9dGre<3j^Upk1 zm+<<+VXjhnC!FK2v=?uj1zgy$jyQYoHziD=;)5cAq%knYC;+qI54`I-T-i4JHup5L zt&`E@mn}2x>ZMH=o9y0P;FlA8L5jwZ2|;vTs$q)3;0bbBHH?x*fRQH7%({%(c`VS= zpr+)?Q}XKTd2L}PrvF-Gjbc=c32naqa`-7|@;T1q%)JL>*d(iL9?W3XB_}Z$ zUCc@BS)3%!c9-23Vahuap%TY;p4Jgq*AD!n>8>-_ctALO7Swze&`4No7QwyjmEI0t zns`+dFaUp&R*H6@W_)|CC&ETQDJ#biF-?&a%Lg!JLsZ`9!sKJ_-KM_xOZ+xJNsirO z;N<2WH;Sj=7e(!5LTiV2oe0ImJPPgov@yq35woO#dCaGU+K61vC=j??8DEQNNz~ZF5)MZ_4D?W<#9B{a%+yh7d?lh_#c399Kadr8fFzx{k zrd-8dq&-*4F74Z7B-qicesnPUZ`>jz;V`3nM+=VBqp_*pv_Knvs}hWjl$zXavi&QX z=~Z-6ic>NBYrO1ko146eg|}e*=6KEMY-HSiRIVm5;+FY{KV&~DK3)Ler5YVadF)Hk zDmUGV_V~{jysJR#k;G$9jI(Lr86gpqV8LR`O^W0RXnD7*NYo8+iC{C%e(OI61o_B?x1asWbm(}5dah4Ph0-$(uZlK?c4apQkPN;mAk;p zT_nuANttRW)EXoD0W}JYLtJ{y+EbMa5X9nhdh3O_soKpAL?*bS5eeC){Y>&B6szEs zpuCi0Q(H4y+9&1eh7)Ba;X6u*b5ImHAl@6$*fRs5>%n7#odY?BE*jgRR2MaVDgd70 z%&k+We^7+qBkAuKXFLJ9Y}(0|Q3{A$sYpSlHLdUTV108n5HYMEOS}(R((YnR*w-e3 zD(7;7S*P@=xBOVloN}YGC3NyaV!n?c-iMhgF%`P$S8v~}VEKEXP~uvI37!w3TimC* z{)1@`@NKRF(Cc;0F<>tsnkkpQ-0gv4w_R0hc5k>_=z;C1Pl*%VhpJkt@Eo3ceDZFprBLQaH z4`wElo}ES3StSbprEzM=kCPz?B~WAEZI)7{W6k1Lqj0=%v43 z+&;3fEF`#^)$SC)C_0w~R)Au`Yn6W1oBq}xx*{`*`rXE<_U7{V4e0r8FIQpa6{uJd zkEI#{XV{la)5Ft6jYBW)L@9MU;Y5$4-XtURLB;+jI`)R2!Qp>zp0Hl}Rr(up%vSJ0Eo#`11l~%e$@Io)NSY(6M1uARP?riA1zuCqa*cfRjSm$}%Ft@axS#2< z9a@tqav9AMZsuMP*X;x%3&)vAFf838=kCSf0q|}tm-D__5u*Q9s$b9`tOS~{|A*5c zKT({OA2Zk5imn8-Jzw3(e(7CdUs;Dr#m$bW=>M~jnEOID)`E%QCIaO0{LcDi7UQF8 zS~~|qF_S+3cL6VPIaM8OIfwMpLgoSxI*!M{fwY>m>af$(+Qi&Q@gV?(cGM==-|9uo z1&|tixZ<7a_Dbf*?x3_s*gmW31y#=GTBoT#cgBnTk<+*21%5b$n>S-08+vaB1@f;f zoY>1nNu!PEpmMdztL6^`ss7Fme z*xpqCL`TUGzyQDRLY9gGgJCN#8%o~b>2r7St6p0=n8EQ&`R*s`NN3F==WlIpid{jt zZ_FFat47S;L(d0T+Lvo#Y>FZjATzLF=1i<6y9)hf_T@;v;gs+D#dF@nr=Za4iA-KZ z!_BY?{L|l7TkENgDi#SRP*Ddb9f#6R`DH)&D7)!@R7-zRfXJC_R;Gr0+RK4gU@|_B zr#RHbHFsh&vquu?24+I~nO5cF7ALR0d~)Ya1Zf?y)d0|l0o5ch=+wbar3a4t0ypdg9zQi$Uc8s(KDG7I();MJ@ksrPjEJM8 zz2wwbCK)QqUUT?UI7^T#x4;Yi;g`oAF)NagIVTG%Nb!g8_R~r@0wo4Cw!Q_=g25ib zKju4G;;J-yLWW~h6W#4rSlvUd)+1*I4s8>@NY@zVjCd|6<#aYBG-UuI>1lS1I=V4; z1+mhG6x0!MG)Iiu-r?{!XI#{51>X0D)3GScY9+(qyk{>PF|oO)tFGAUCNVzF)qEIB z+_Q5Iku)IPibH{!hhKxG-pU(w8i6W1s`m&&S12Q`bkAQ6Nw+Ln_#)%CToR6hyJCK=Ojj8u}HQmnqrO{z6ZD0Hn2u#TnwL z#glfu_qGFM%ZCtEo4`!Z$MvN{Hsco!`fQCxIOUbSCsHqT>G3Qdhhy9mx@AZQdV1_FJbd()I%KBmqd{SzfkkyUofCn2Zed~Mz0Dtw|Jk%nel z?_FBMIm2o8CC=7NrmybF{+vG-olHA!|GRJT0(4T*H6RRNTEE*6fj#QEKU7Y8kZuI) z_$siu!4CAS8MJ(u7)Y^}@Ja&pY$fl|UYR!uUz=ZfL9P}U50vtkS_g*<<0IdDZpZe$ zx)h({{bQ?QIVDW#jcd&dM{=`MWxA;GUOX53>^=l(9;SH&MT#58Y5)5OI|~lM>rPYR zJ!)T}@x<-(M!E;^>^*P{AolzYz0j8FLs-X)eDnhA_HyI{yIGqU{pMGGjLm@X_fEp) zPskvkH%zT2l_ zn7api05^jx!SxtddwF;tn}4Ig)T^PkcAa2}M0!lQ?fj=sL|$ z^JkI3cIX$%4oJ2aKl#C)t&v0JG9_wXn-l*5 z=YtXcS4kH~9aATHBS?Ou3Ow@KQH8mO%+VH2U!)eU-u3p{*?Y^+mTBX}A(T{mQS|de zgcin7DU`$;dtAqIk&E}ol$wOF+rEllWqT>xW+#wLeqfOw6aDqgW@G9R z*O)!gEyId2@B`KRe*g$%3B~K8?67yJEscxOeDQy>#hD{^QFNBV?SnCslmS~^gFQKQ zJQ!hyJRWAWKlnq;F%+r!s7-qGYz<|)c0PxLy+(JJ3!S{SnmT?Is*DFp!eQURtzqJl zg$u>%^;zOB2Q~6%D|{=T|MknExvTOTdVYY2@va2AzXSB~+FhTSmW}FH7e5VMsrg(P zY^TcLvGce=8&z!MrB~+X%4RZD7`caJizpE5aqIjue23cf&eV*LNbNlSp=F`rm*5T> zCGbH0iVA9z6MDDtGoq%GwoNrdshygQu8bPjHLo-{#o?T^R1}5YJp7{xFIgh@@k=mtEP<@)nNI4uuJ`_u_GyDRuflqp3KMC` zHhK@GK%DCLg`ZOe=5R=MT^ZBqX><#(2QG0{(NXcGY}K_%KNmr)j1PUYn#seRZ8+Hs z-DgCzR|->LVEN2{(#A7l&HMW3x=;ja22Z0?l*OAcb9Qh}wNTHGgpHQxoKOF9kx^5F zvpzXA=gqp317jWK?x`KC3ktp=Fu93m%!jd!DOae9);6%>(>kRZH|>E@xngMVN4lPUd%J_x{$n!MFF)fKWN$-6xB_-%X+Hu^jjkZDN|L2)v9%FXydMj+hp~gv) zjKGVdJ<(}sBQ!tIo`1}I2wt}K>p;%DFn8@S(+m+phqzU*%cUv12n5ol zCsg&ZO8wVf1bvD*aU_;E+;FPSoH4tlL4iMbm%40**iJipQ~QuEqmIJEzEf4$SIQJK zBb-u%qO*Z*l#^Xj`0&-J7Kgc{%Y4pYHr1VVJjM0<^M$zVhlCT6RH%cw@2^{@e?F`h z8+oW#kc)mFvoiyKX8Y!v!)wrCo1{mtD&EGEU+m(X6vnepDi7_(023?AsQfKzndD=bm?tO<=!yb2&cX zJ9*vl6Evn4s(vFZE=nqzDlecf6F6?z_4hA*8VZfI;ZXNC(a)w!UR%kZ9mGrCX#QuO9thJ{LOq8R>uz3fBDXee$X|go zb#(egzduv4oRb>yMapZ&>Q^^J0s|G;Cq|Qf+KL$@tLhduzI;w?Gq>?xZ}&aA@kF@s zLHbhWmqS(Qr~gxjDGL*7@}WiuoNG0aozIY&x9QkHLAgh4aN=qPq2#lc5v-lIbcP%5-T|V*8C(!J_3kU zdc>J}Fh4nqoqnn$sY^;~Pt(m~zL+W8Nw+-PPKTY7e(MIrh071lzvTAww|%<<0iic> z%P+W=V{=E@=*yIq5Rqht%V`gtvL*^*Q8Xb@woqho#A18_d<@Pg`uOy`l*<$+=%2Af z+l05BAm^HyxC6y3b$Jo!On&vUL8*hXKX%Q8d@HDo?TeM)A^iiiYZoJ@AnQv>`F^l+ z&HE}MxrKJ9JBQTWC)!7-93SH}1TKyWKb%oZ*Cc1PXHpRkSeH6p%?>Aw?_hYt{93m9 z@=%rcEaoxb9ur-`qfsK$KoyZo^%jk^s%)D18MZr>=lGTvz*vOJC#x$=sD0`Rqmmv( z)!CB+YKoVb)NWJv9as9Hxsi6)ADrQC#k?g?H5Dmn`Tr(-^6%CNpy5GM{AWbHWA#st zl&go>@6GLPGz-Y~*gnGOk5NpBJ(#%}U@p+7Z{4bpqJr*Lpu(c9QL@^f1?$ z_Qns^p*}GiQ}aurYReXo8W@?lre!{*5qnmyr&T;5siu@y!4GDH zPk4WZrR)AP>LOp)^(l%U_YzsuIPWL-g6Zsh#r$uW?dx+NrTXRepzK{mjW;a~25-95=L8rq7Yb)>e-*=`k+M z*rR$>bq5J{JILS-{~^NkcSIO_1^GF-5L3Qb!pOlX?A2+6_4V$#K6v}0fUZDR7ccv| z`cRfNrU}zXe)J|*ht36^$f)QQ0>D9UKdzf#aK%vJ9C*jYD7@or4Y7#ZfgqAep$LlRHN?3Os7HQsi3kIL9mkQTj zx++eNC6mdwreplCjn;CDnM58FYaE;=~s&^B= zcH_+pXfBRf^7HAV&%97KPTkq~#ns70$*gvlwvb9B&LD+T5$l>8NgalB_8z;wdpZm9cH8CJ^%bDKBdt9X-}1_lfJp54~BVb)7nm4$Yq$(;6qvc1qae>3!qb zDRH<@I6hWywjbwK=045dd??Ao#bI}Sr5u|2l0Qe+8D$&1;#3yi$bQH|+(TY`7&@P3 zM4KUf$;L%YT^jWTn9-9lQ}(v$)#OC^i!Ka>|^np0V@YX}H)FNp2#4s4{U8C|(1brD5w>|6?}&ynfHHG&>N*K5xu2Z9%wa1wqu{q1z&)rL2 z{Ti|jrgaMvMH8nZ0?2DX3C0kHO8OZ*L zZqK>i;G{IHn)!y{bHk87qB}ua$uIU&^G!2ftmu(?j~EAzWe>pityccP+%C`Yl+Q|K z?aamAqs$Xy`y4Hstm3%M)t}S3(R1lWNP$-n2WfxR2x{VLPFqc~fYGjapTwVZ)7KK; zJ`t8ckIDqwTcSS$xWDoXa(TCXm@-Bhyz9G@ZORkR()mdhc=c_J2f_}Cjl}$52F;50 ziM=^-TQ$a()sK==a4!~7O2{JJ{hfID+<+r!J>7M-lC|RGZ0~lXEcO%*c zUKgsINV6MkDfLo@bqk zA?v@_X*#R;yZ(n)REY`|zo19P3?{3Ls#o;o3w-^$->~nUXIF37j-`^c?Ix_UI zX_}nJ$@}$OTZ44&jbPjZX)7f!C?)IM}zs2++ z_U6`RB0JQi97jfl4(?W$X8QdmnDW=zep+^Ne&#_U^+f!#m)5&OZDPLyyyve^5rg+8 zt7=-H2Z8GMwC`0p_rTrK+E+`eD2WLU@7t?QhK5#AG;QT@v((w zA2bgfdr4+7DbGf}1uAirk?83BU;(s^EttWk0@}5)pyJm zj}el8{iyBFRF_jH4(e6v2L7G;ZUT7W4v{*x<*%HsVq2L1MHJbTv5DCdca1Ofp?Yk@ z9He!J&WQ!%wZ-*=8fwTvO-Zcl*{Nnz_Lif@=u?`e;jcoMCgiJ*13Y_?LkM@wJXTCT zSV82ucDxq;%b#(fj4N)8luVEui0{QY;HHjkft+0Ai5&xaT_289n0Z9AA9_*DefQ@tAKJU^sgEn8d}yS7Xn zrXhLyd{bp{7P`?tbVHr_lUO{mbN(Jz~l4c_AXXAB(;NszE`C15?<-MUev?4f#;L`XI_t9#B`BM87b=W{x{skQW(EUbZu(cPkHVD zrPgI~Us}QN&5hO zN;_h@{D5iSI_u))p#7gTUHXdbOr)#O0~sn}c{ZClpiwMEeNXCP-1%=Zhj#9Z;;U0= zDj>Su{d4w@0ikfaAs(-SCKn{`ZvU<{%4t0FiYmI%NXVDYO9iKLY2?JQ+YN*e-H;Pz z%bq1ag$!qGgPFJnlHY{_*YUq?T1uR*AqpMVIsW`e=WK5O)@j^{ZeTq>6!7QESjG5# z_`ws;(PmVkTxI0xaoxD)KSo~6;iEG*;K-!5yUNuzA6SM+-Ax~(-4O?j_})@paz@y7(UBC= z2N{;UuQmDID8qJU%EuEdl_rkrux-}rH${jom0*su3R5E7fmqq>`fy*a8iieoss9Ez z&h)`Bn`!8zl>=Vz`q;hQ*#-ryo1J|TBH`%Hfq19Xh-Bg8$YE= zvHC_CcSCReQ8r=N0`O!pBOPvCnR*wU?w-6VA0A@>>;U`^^vdj~RG};c4Ici?b4+Y0 z%2B{$n{a_gDbS&_g?_cQlsD}l9ht(48fV3(fOQpOA*R8rdaaQf{{vKSGw z!mnB792Rza0^l=Cfa@MuByt*&X;_k{zSzLeQktLU|mxS$1_BH>p8$ z^4(sLvC-7~We0w^(A;&rY@EGt=VrF<)lEJIDTprKuL9gYXA?jBrRf z;!AfQbgjaK%WJA}A-+;c%T~iIq;Y{didP`wL&iO=r!)=!KYSNFYL_;V&G`@jJ(tQF zj@ew{JK?wW&~xdeT7WLoKisve=mh8A1_L|)Znsw~HRDvDaSC8!3et=}c z7p!eS4>U5Sp-WL#?C^2`a(~(Eg{du8OD2Z02Sq^hw+n^k2F{e<3Eo&+o*iXiAZF>F z>ek2UUP9%+Duyww24KU4)6uLZaB|-v+?&s~;YO&noBL*kg^Veu zaqh*2Zl2-hX4FhCLL-yZ+(}Cz8_OYAH+Gq<0fi#=uGByDCxN0fub2^6w>20G)&p9f z91hHb2xey%ztjtYTR#JFX%|Xd-U#qmge*3L_+de}@;#`tzRV`yoDAQ+Z>#l2NCzEn zPs!P%jnz8!dasYnkJp<-XE0V#bic^tn*$QhDk>N(9%HzDyA7G0Z+v-^!?zXYzMxru zW`C!1>`VY>kKur&X&11-Dh3geInY4fRW?rD-RfY8lE7fA?|vjtQc4{8c^PQXekje? zuK#w6UOMuYoz%@wIFbM$>mc;&zcnDwfBK2@gP9j9ot`;cRV7gU@Yg96+Lu0tggAsJ z?)4p4GS=fo4GkUi80ZFS{{)cL706}J{}bSq&_`~IyTJBbj%eUi`Us>xYe9=*`v?%Q zxK>g(UC(~+8imQ^pk)8j_@vEm4oJJ_!!*Q|ZxZ$}NO*{C2h(E|mCb;_p37&|XZ{0> z-XqUnXWii0kNpJfQ#ZuwX{TPe!L^0F(|>WuOkNVw2f&9r+*T^fJd_9mJP@!6Wx4cB z>S!x7lQKkzb`gI{;lr}YWs4sT|B_=SpqX?PDbx*;ER zS(?SmU^~6Mp#N4YmhO%ml+1sZBnF*0b^6Kqvu>ct@irzrrTPyA4@hq;qM+4+iP4SN z=)_;@78#H?JbkeLM?oh}3-5!8@3IA82{8km{!{u!yy+FsQJ|&$_vsbwq13bpWl|9L zYrKxz_yZZ$5=h`J!Tom{mcx9q<*rU|U!%>Xe?6W;x?hXKdfbf);YE&>!ep{Gi1@SL zh3sy;7sTm;R%{Uw23#(!J1&2);a6leWavP#$!U<9m;TO(TIRPz+)#p2KVq*0b|Gw0}|K0?d;W-o{ z`BnUq=#USS6>7}5jKz_o`R^aI0kPhg-An7{T6@U40}v7CK7IkbWtR&R(s~#*Aik(q zS^H^h@JpKq^ua0vwSL7sE@@_d9ZTC45Aq*`4|ePXa2Ah5_ICw5ZwtT@(c0UwC=oL^ z7}UZS*=HBY?Fb5+G7zx^acMXL}a+fEx4sM?4K@gc8Vwh#?O4@^Q&+iqWI zbRwYzXY=f$#~^rjG2Of_lBzCgp(%tA5S0byYByGEFMkh{tp>S#YS#!HCCh)>BMQvn zcVD@vZvJkI7YaOmOTd?O02xxCc0NeKS@ur``Rh8Ij2-~AAya~$jm0SL4UzaPP8Ybbj>GRhSUV_NV9n=^VpxYgU zjN%+1@g*R^X5Sr#9#DVqE^h0z=|^LJUJ~7KcC?|rw3y1}51pc)-@=mp43vI^7vc-< zH9RLz25(+DnTe9CRGhU39IjU(bO%$U_+RZd0g>sA7&(ZUnc->Fdz2w?1I8_rL~{$f*okO5f|E+E5XovuK;I1-ZR%83wsE`GS;&N66X?_^E{PJAu;t>0*N?9 zu}7dXX0aSSU>4;Pxc$aO>M+s|npsh7S%p4S>HH`x-u7(5b;tozI zeS0%&9VN=oK>MN;93TRdn-5F_%XKy5ab}MFoC%V1%AtbJnfZlLuZRw<#)q-L3;_DH z!z1yRJi=4Gw(DfR>+5}8^8QF__SMuwHtO#0t=!=U zKSCWxgzSocT6Sze63`E}aX~l(33m4Vt$89hF!3^Hz$35X(t@L3=8dolXbSd#9me!( zM#DVR6K3ZJ47MKMR#^M{G_NO8w5jljv$A*XfUfUbpnzo8FAA!hbG4Yq)y2-kZW{z= zwBU{~_ECME^kL&=abLak2+-2~8$t;Ubl9)1okigey_A20whWnosC~ivr$&lXHr31{ zb+R8~MG2C}?b1}YW`m7U1i^aaLlAJuYS?M1cxbsV#H%jlf7zIoAj$EtnnwQj2lUa2 zj(R(vBaK;L&7^C6vCqi9x|Mvt1iDJD)PeK+4v_*xE^`5mP+`H{`@*5a zfpgYV!_O)Y0ypF-J$>I1lm{j8XYtlg5bDu4Ln>^w2SyLUm}eyPilhZ~A(S=5ngqnU z>ije3$UnE_X`|Xi>c*Ao0lAST|Qv^p4WpNd^E?X2KiP=_q9I>H1Mor7* zMT{u}+v=jW6vaK?&Nn@PHY}~yd%mrdP?u<%Y4zlMYI{DUh9|*^O8IzP8i`Z3mKTs^r@dp1)pwS(R zu`GYMi?Yib;n4{@9qsp>?^?7bR@PZ;p-Y=#0vNphZ|rU{#G zPZ5$*MbH;IreFCvku^JJ{?u`;jbq0#=TYb){^36{HF_K0{ z&$|N&MMxU|(}Y}xQ0O)QrFJD7AUS<4&h~9MGCK(bGWn1`&&s`w_y9KYY08}KfoU7B zlI)i@E-RLu-aO|vyWOK0q#k~E20ynjLSwW|I!|}TfJIA_=d&3(#BZFT1akt!_?3#Y zRgKg`847u>Y@}|1I&n3}07QII0U4sh(g>_!_labmxM3i0X-8N3vuoA;$jp7ZeVD+1 z*z*$uBPl9jv@^4l1OLAZ{UQoTI3-YE4ESp&55wyi;zq=ls~g3=Im(*&HOz<T5Ff6K7{3Wo^uINKvjhC1A5!S%MdP}6{+ z7L>Sw!0kjz@hrWbSAK2X8BHuJun^s0oSq!``J{c)V&LqM2X-w|$EMaQd?>h1*mEu; z@4ul2tws z^SAf`#5H}rnvd4Q5Try}YNo#by@P2S38=7$TbMnJdYZrYQaMjwaXSy1)=r1T+*5hZ zh9lXlV}1peXRt*jjgFtj!w{L+Iv81qI0VaqON?-4%htg)96I?W|P;593ITAdHkeDZ2u*F!$D}oZ320PS7#5x>=v;qL&i17l>bnO zcEv&AH3!)-U}%l*(2}lEL49Ny?E)1+yCgLF9A?5ZfQ}Rsojr;C&ldv&KgM8({7$+ zt5V(Wml5DsM@idv*D2M-`ssyYNEZTT3z2?9)ZyoGTR6%t4Pt$VY@2-Jan-d8yj;UI z!OxTzyNxRV=ZUpTkBg>oQN}tUi1I41L5HMY2a?Oc@vs_*V2@h(W`_4aV*O=!fnw`K zCY1>gI%I&I(#~=iR2Z;(laQ?LMOoRm{KGfBQ1X|&_93%~PQ5{N#!LI2(w&RlDY_Yq zT#gX;4nZII0K1BPT`>4Kq@F7UmUIt@0N0kXCNG_;yeNWr03%u!&=`3m-nQ+^&%kR{ z@$*e%r5zZBd%qsDp6$;T6+1P&{q84^QdCZZ!!$B`+RqR%yMu)15&zNTv>L(pr9+tb zQDOw=PQ_`d+N@}qiBq00djmH=8^0IrMJMOh^9DIi{y_+U&0m4|m?go?V<7Q$js#{| zVRj9)BhVLOXTl<^UxY}yvCk0S8Ow_NXRkpGV>Ui2{#B^M@hf$X=jQsZO9hny6aept zAl4sKNGhB9o@(@dJppYNPx?_hTk*kRQ?yX&aZ)~RgYy5_aMxNdcVXZ)~ z_mZd=nn<>!{oymHTPfv$hlz$J{xlkK3tL5$e*o=WG-Pk2 zqhh;>fb_=|XAamf@ST1smc#RZIHmubFM3)duv%_8C_*v5`|pyAmqhDIUjRt{(uZSR z+)^|5F3`d=l9HggC+TFqX4~@-nnvzTWE_$Y3EX0D2Njt36rC>n%W9TIVhvb3t;s5HKnyyjA2wKDbnZ^Hsi+6{OjCjrooXU>I zWKxm9Kwv!m=xYg1xyt2~--Swa(~k*ya39@Fz`EjzL#!_TMmMUC zjw@eY|1~kwV|lI|8ga;IH0AW;qr#6|wieHM^iGu(Tg8n(3s?V?R?(YlPJ-i)6xxyv zr)!EDszi)8ks-%OoSq?4?erqCSIU15Wsod_THuLU4*}5Xyi>OZBsMpP9>EqkYD+H! zo#~Au1bc+*^SLh?NDi1Gl_l?r%sj9<&2ZtbWj0YY9FqN_#tP=g?aP-2=1 zV^aBA`%LZ45i2)-|hAg(;MG%`WVZ-j(1BjZ+abSwR>^_DylmLj9@}UoEwA|x|V!(aYi*z!KDuS46 zy_Nm*qM!sX0xdziMWyFV*j!Tp;#?aHjOQcLH<=Cv-%Lg~i08xc*po6+)u`t{nC;)m zeYo#nuayG~3=yqpKAJ8e_w#Vk)@oLve52rw^aA(*-K0N0f_ej?<2?MJW19kZDTIKWaHmMe@CRY#5A7J68Z97_Ka$vN|pM$g=XWqLYEbXVyUo3o*BPKCG1-DXt*UZgK-*86s&*`v4ieO8G7oO&$kU6%IW^EgwK^0E(- zaQyjj{X;qweA+522tp2_d#w}>y8T*#*^b$=l<0l{e0Tr40Yi@-C(E5qDc} zr}RTcq>428!y5=a3#M!`tYdb9v`$zg|MU0^lDAJ}`A8_X`#)QVT5muKazuj_S!k?Y zp_HxpJH9V=yor!S^33}O8;ui(@#bw|z1w}kex0B(R|Tx&OXr?p{?1Zikb{i>@(A!q zy&&1M9vI@wg7o|&s&89c7qkrQcIJq|%ecX%TM9G4H=O4<2~@MiL^WAPA=v#OB7QI{ z-S_3xdR%s`dgjb{|BbZYCi5=V^muF^`N{3B1JU(R$%kTLWmsqA zz5tu(uPGRfBIW%Vt(f2w5OepE=azv8h0iz&ZT|T!KAb!h(=rQp@A<|`dteDnZiEWW zkMruHTsogZEZcri6}0Nt0z~_!#Lh%(`-V1VZlNuE+KyLf@437!q0dFqC-9|dHC(Ku%Y4?v~K28@*7oViE0`+;B zSyGh$$$K}`$d*{^Oc2n0`(wgq+v-2Sjr+@rt)QU|u>u_`s0Jz?vpDcUlfRND^8yxH zf5#EV5z0gnf>OBZpn6>0!J5~sZoBN}xeQU3rrh#g^Mm&WQ^Ivhwe^x}7pj2VbrsQ& z1f{}M3eh{gw?$GqcmJ(Jf;;)D>GrGVA@X!4=QCJg*_9YwjP&5*ZHHif<}HD!sIjr> z-cQZS$WjjKz6kMW5#YwX2r9%kuJK08<;Gzf*-~xC_egNp!yH^qc;N=Cn1z<7R@+C+ z7L7>=2?g`qrt%j&H8CN|y0KD!ucM_m;FsoW$k2W2l*5)wrI+P?D6Ng~SkG#^32UN$>gCVoUht)Gj>#II{cJpqvr{FG z6KW-s#nl6i!xEg|U6v?-31hh+lyYU{@DF*HjeL4VmZl zE6`axEj3DZJ)^z1WEn%O`VHgC9ss5}fI!z$|GSZ+nfVg#;I8cVp&J+)J~~;$PL9r+ zNy?m@;x5}o3L%b~JVYRbxQqYO1H4ZBaF8k)FY4(`S*iy96wP25HxTKN>U28lIT&VN z{`K|ClMW!(t$Pz(*z2m@IqP~)PL=OeJaRNhNbcaqGAJ$ed!{n*#3ykGLWMoUJ_AyW zOI#1?!r(R@AfMh3GzQwMp!7TiBYcl1(p`-1EEI##a7$645VN+l`qP2#uHwNZvSSn7 z+h^ez8k-h!K*BgZBbxN`>?>a%gGaeSIR|F!`Ov5j(B?YbwW)2`{G*&go7qXveWa63 zQx3!kHhv6($J3l;Q4XOW>g6x4yGyh1U#?YBRE=hNZ{3G9vfng7bdut0N$}?W5YyE{ zT!B-u;XQ*CcMzh?VMJ9}p+(0DbpX$(h!QW1z7(iMSh`+yiyF zmRb4m<@Ac-Q}N?F+x7-$6vhOtnV5odA<{9be?Qe7QH|6QB|vWu+`aJqg<)m|183OL z5W~#QY-V2a+*U+^1?asKwsLaskt`<caJmtD)c1?`=Ox`8C ztrOGjiGq*i8NY}up^A^zc{2ydzl-5GXKL-6F={s|b6f+xi%_jdB{>5-n=jnDS3tha$IxK8m|6zyY?{nf!qYLonN=eF zXFWE7yQRoV_bI%n{m*Ft!JjZ0XS!mT~K1P_bv3 zz_dc)TXW}R#c6JFRDTiihJ@Fo88Sz7hH986kMWv2H#Zb8USk=zPXoF1In+wOk2n`} z@&}ig#63U1B)pp|1z~x>UbeZFz*yt$Blor~ijQ?X``0=YSK9-eH{7aG-yj z4Q!Eo$G)=g3}$bD@|Q4q&I8_Ie8Um7thS z{bVHE>CP|OP=@$WPSx!tz?e=@ zat6Fs6EFkfUm)*z&k?72p|9ZFHQM63$j5F+fYC4TF>@jud#(z^TrR?rsv~oAUU|~9S8=7@a77;Xw#D*==Y4DzS>A5R6-wALz8$%3%Gr_S? z>Z}}65dJP0a?cp{#;Q46djaEQvd{!N9`L#E3-5(;(SDRTDs!{vG{e#Le88alrp^Wh zU%AyH5EQhDXlXk5xaZYY(1s^kK7NCLDS0qSBJ6QT&`Wf0OT~{*|Fl97(o{eu=v>}BciwQTm|SydaTU3{QaQd3X?T3}V=b~^U3fu0OwGH9seQLFf% z^d??&-_Yvr&AAvdL>;^;{sJg)rt$Wo2U0>@M*_nAx!XL63m#l(pX8ARtA`9Vw_4qM z3~Jrawg)Z#TlnuIk-7gwH0Fv5{>(o)>TBIAaY1`}5s^$`U*DWd+nfyjP$w&VM=i*V z3b%;h&5z1%)=Ab0*U7XDUIhe3j`e4{YV5BJCT^>#u}jjQ*tW7rLpYh(sX9ZnDVihf z9zu3ZW(_k7--is;))I>#Jg;^-huEWdP1Nlz`iq#uOf%C9e!f8}4<;dld4+qJFu#WIGOM;gvAw#Zx5{s^sB(NzJb|# zuKg)L%7-@n*_#*Vg)E=QMwBi7Gz3UgMZ3zOOoLn&DoUbz(IAGfcZ{OcYv?%aju}R> z96;OfuUPtGCvR_9gZ@{m++SuCey%*k$8&>gZ(hW{;rLv9kd)HdykrY%On-RxGc5Du z`!5H4A8A-(`VHKYtmuiON8@iD!A=8e-xV1@FWr{*Ft{Up<)gMx4N)GZ1JL(ciWK}on_~<^)mBR$rum`b(t-89-IuvX77E2KIbzISp zT6p;?-=!mx%@`G`wSNv1x9lRiHl;($b#F5<@QPov4@9yWTq^^gOv z%>H>^9Q_Ae{PlZ@2z{bF9dIW~EODN2PNQTb$BBj@h<`!xyB@RVsa z|9PA?NOenk^li%A8^YC`i0k}3f1Iz$(ucxU%QscJ|I>qZON!U+qxnxk?^bZt(yB6Jq0|EV_c385)7e9;V$)a~o)9%dbpSJB{K|!44~i@YP9wv5I3cfq>pdgUIhO7C!? zgKdQKq*zijrUu4y4GxgzJGOp;2$kbq=}<2Up_Dq&fB~m)3vM zm&Yw-MJsq&y2+lG<5@$%{?qK4Jm}M5hU2Z#XW9ZZ{xIAdl=yxu;T58z3<{~9Y{FAR z$$n3M$`1>7$qwAYzyBCfo&3s|SCZR7qFvS&=hYLdgzp5+?eA5(XKE@j@6TR7S1OUO zGO(PQ3v+?`bNyPRDg=rW_^vEM&|Y*;!_^eFT+v(_E?vL3kVdvqJ?Z;sX8K6?l4|q= zIy6CjKIJ>PFv;c8Xd%fO($(%^smrfBniF50E-%Sk z;Mm44<^Ei+KYs6f!PpnE+vsK*p&+AhpKz@`JhGhHuWww4AX|gN`PBurK{e&D(omxa zn+Tt4M$E?wv!b&)(Jg+lnDB!)T(CYNK_HaSf2A{;z5d{^`98D_$%k~D6o3gJxxzAd z;T(P!V{rjHYnwGIF8g+-+!NHz28TK|lrhhP>Ud9NigeT=rK5jWt3f2BNbzF~c+ODxy4tFR?zR8H}JQNSzpgO)wb3#rWSTH0&#Vuq%ZB;a=1xcqvcW zPCzBL!CuU8O?*o5KI$RgW&GY*p38_wuQh}3X>+cyiJ*zU68J&fH3^HJ8_+b=uZ73? zwI1#=Tgy4b&}mo!1I^#zQ&^EQRt~zsJ&%YATxDk^Y6e6`c+owcZF2A0Dda-RGKYd2 zv^j6X7-q$_EOKP6PG3muJeM+N(wSlazq`Xr7H@pBgxq}|_HN$6iE~=6gDU2}q#g!b z@;6OZ^d+vk{7fsEFPCmEv9+a8w;g2Y)#R~u4RAN&mnu@mw+y|h#<&d1Yx4q(*7)#g1F@PXFW(C#=@3xl6ZP!+GnqeVVw2Q?C~8;7 zVXa{t9cP{Q+5F_>{U>0;sVCPl{^aj~NBJ0|^5dFMBnkDI=(wn#M?exR?Q2X>cwD$y zSaMi1;}WkbYc0F)dE5=GK_mTPko!(May=+QBBGf^l14I$Y}bR>5R&dVYF8g048_DpoX?j42hsX7y1dW{Mw*~5uwx^PvELtON)wGhqBSIV0 z@A%k69We-?zv^vS4PHo;Af)GbbmQTB>D&RWU{=XHex-oROZ9p91uBj|{^#UV#`V_1 z#*!t^DL9D9h{T7xl(L2tl|kC=@&BgKVPe_n92(brU3`?wi}Zu|?TAYEk1HXxGFk{k z0r>-(F?7m|ZnNd#R})lhsZ=9%BNa*7X59#&b7_jPN1FRU0GiayZi$#MZd8WM#hM2N z2gac2I+Upfi3U8^ch(1}an1bT70jv51<;`=zo7ZPeiMd6b2c$J zk4yd-zf<<>Iao3`o!H)Xz52JKHfd6lkdL~_c6LOq2>qi zOnkqh@ZHb#Jn^jh@+Y%*hRir7d{8~PwvrCYBD0adAO9?2fJVq_-$;So5TE+-ko{qt zC_!{N1jMUiEh*&8s(2JpD>BFeTM#=X2E0tRZsK^j^4)XaSnFDw=dYj%>aXz^3wZmC zc^|SIc=jY&TMep$&nf6lpM8abnBReBh=f;Gi)_)1)-$ML8XStrTg zoxr#!%|$9VEJVn2eu$eO^A>U;&{I!ebgeG+vfbZ5$oKPvOz!Wvf#fba+4n-TuM$26 z*HHHIIH~N@@T*@5e##woVmz4phS6YTT+>49Vt)=%51JnD!5H>dB?3DXM{z$yh;J*oLMI|RaipKyw-FI!W?9x( zeN9ViA@#l~NU422SCTzuEqImhsd&p9S;*nrw%&P{TfB9h@D2?lNi5F3hk2+=ObM)Ba%M2sb^}{ zu$hV&9IM+O^3w==kGR426G!By`Zhe0O!81j<}nB`=8Cu@_P&;vr~fJbBb6lQ`PbAk zXp>YNIZX`icdb|O)scgvOb#NHl5xs=s09f7*9vSnL%4T4|Iu{TNU`5@baWJ!@Lut^ z=1AF;XX6!~z^r1gqtHs2BY%jZBLmiu17E!nS$>yOBKNSmb(mS0LD&UWIu+T+Mr>VD zU1SgUcDY%HPie)IQBdJa%f1N)y*^J=eJs!=>`LG34r_~2t-GfFWfcrEmHhq6wJ{ov z7d+wL`w|U8O5406JT(ReLoHaJIBB_5hd5!n>5Sr}(KQ>OtFkZ`UXGzkValaVCgyq` zn8CZCeIE_o2u&{G@AS}%We|!zC9bP%x;4QBwQ^;@nqRC1coc8w^>u@U)W~_|y0|)Q zU1VJ4tP|L*^T5A6-YGo#Jw zxWd?X{nC83Hal@-O>;u0`Y;D8b#%_n>H|_|E-c4*h|$OB6yI3IPGC{A z2U!%kgsf7BFyhPDc>wx#abx<^2luLA7t zP7Jg6P`x>E+wW!Pn^yYQgV)HK%Z5@$M2O$|kIMjPX84HeMRMr&JY9Tl!mdIJ^NYxouK8j${Z|pq`3r%l&)#x(S^;SAISai|~kbzc(CI zb3M8Ug-fu^m80z=oS#j1(QAbd?aSiINc={)NMOJDjfF2kqwWIJYwST^{c>W*aCM|c zdN^km4Ka}T2(!5kDjxjAWBBm>Eu-_27TD^l$hL%=M~rx|rF% z<{YmoEMTvSGWdl_5=))aCmna4=OS!b4R#GxUp(i@z=j(Q?Gm$i?G$@GvkU+*R zNnfy7*evFGTbOWWO@z7;EUUh$a<%oY>r{w5`77?Qz(*kb{H^Cd5+u<5L)KCk{Y7L? z#J+}FGt7-*+H04)Qr}pUQSO3 zgC2zMyElwII_`R0liWjm!q+Pk&nxj@uc3a^XME~&M z=pD=QvN8ubs?nR5aL}K;0A-&>gbFiEgsy#KHT_rdHO~F6L}-Dh`pXIgC@Du_XYubf zBm|RHeOnj9J&|B<{l!;+sk&SzNT;3<8+k`+L`I+$SLLvt$r$)_l7pP;QDhqX1wkdX zWR1N-p9x9$OdC$Bar5=1hdFnY>-J&3^HKE`u|EFwb@L{rnA@7l>Y|0>cI6hs7bK5; zaNRGdbRN4j%akBRr5EMJ93NjyA(9_)a{*MgB*Uc9vUL?k(2{G)b&>8+-uF~h<>?@t4viV}hO@IYob9d6+1$De8Jc zXHBH04D1PdT$39rSE=OwRMR|&&${v0TG?;pttfMtA(t%ao!)JcNqx>1Y*T35p7Hjn zJJNhwb9y{q8#r0UX&f=L^3=|39<3f~I>9WLhjbdViF&MPwT5f^->G8X+1D+o+_y9= zz2naEEk|Pnsbf9$4HLTD=PS3u3G-ocv}!Pq@2QRlSY;Z|>1=fHGY;OvO$ziHr?G z_x_Z1++oI>qtGX0d8iq9Ci%NXo&`-P3%sBq&eiB^si@j3T#amaM~^!(;a&jrzzP%b zX8(JpHQK@K-Pxw%hSW32#P|eP*i@OLs`7XL+5X{@bVE`jOflk0Puz)Zul15uF3XB~ zF*uITJJ4p0Hb~~fd=rUf);{-K=2OY2-{9V3X79{n&ar)c-F{s-m#tsxaR%OyYua&k zMT1}I$sXDl|1@p!FEBG#!`U137>rTzYN_NJb=4A>RyTzri)4;BF}!-a>cr?&2@=Dv zzb{?ypMPvch~Uc|^beqjq*1lX(hyR5F2EDSmNOKt>jjvqX)VF~9l*Ktd&?9<%COdZwty8%es_FmpvG!iu8Ml36~Z3`Ej>ZRMW-I z-?!oi70N;EhMpKHUnBwX+kVx3-K@ElZR|j)oPa*%C}!yHA=;EsJ;wZlE4@Yo$fv-Y z(+oD*1>Gv9L^ihdPm+Jv3_7)`SWpFM%UuF;{VUHTg?UmG;&S7X72P8@k|R(u=zW77 zEs1lFA2=GYM>jT4GSSM;=&brtWMjeV!K}pC33ReGHtDF$0?`t}0T1M12-2#JUG9 zB2U3(#b>?Rx6$rFdH#R^O>wxulZ~xqBVGgi8*qo>^B6}}l3BTL{9sDQ;hFk}6qH9A zT|PYU;nKalH(7U1uqPz$^SECI zYKT%?^gX8IaRXT?Ri{YTnfnXqj?G?uS3OB1lrs2GYyl|-!mWoH$5np{Y%tXFAJkg* z2iADmsJDYnvWo84W{zLXm8SJoLbbvL^1}y2-1ojA=Sx!Q%a0`sou~r=Rf^fV@X7WH zbk>VO-SJ{rhM$fOtRLzGsy2oCscnE#qd`!3*dGABT*0#%vhmgQ0V8E9yZf60et?<$ zp3XRvvYCLUNURCS2*<11#1^4~lkO#HQHxJUIi&i&4P8gjd%X_dV>Fh}U3I-%sx&Ly zukJo~(?CYqcj2)4SIs#3P~papf-nB5GUGjIum|uM0?+Uu*TM5K2r&H5T@6=BN^D?Q zr-3slJDE248U`##lU+|39CE&KDT1VFC)M8@^WjxpU|}N9+>Fmo+UuPuzfsfMV4oKsT+PYG|qN-TQS=ML|U* zNYx zUcz;a(pXXfEcgcMPjh9EN{T_pa9Au4C#S~gdu*AmfN9}a<&QuegP}W1q#~cA^YRM( zm5k9H=j%+&5;B9l_?y5&{?ouWe|Ek)V*;gpZ}sNc2<~^g`UsT-*PFu6VSo82ysff9k>^D3{ut6xg17l%X+`wfyJ@cXavvuNd4#p-nO?BwxhcNksKr@lBkwSkk%e` z%`jzK+7kM3ABuwbd1_e;oa&u~c9GP3uCE$B8zZ{+PBN{uIAEd`NRATXcw*tvg68gm zbN={MaDAEi(w%xPUs2V)+Y8uuN(bm4B)VWl)P#kMm2nL5IwWaAr*U^|;&D>y>th&u zi@M`s4qw%j>&NaE%_xOPs)~#?VRMJDX#0eSGs|^Lls-2bRAr(y0 zdM`eTyAt(+0@k0*hHNUYsE2FuKUR;eGI^t_?q`*nKHs$ym?0z?-y)L{1^7>uA`nBB zzTe<-K`-?0LwF`8;<|qH;Y->szY*!V(_w&DE2WK?r|Lg>X{z@V+gD5<+rmK^_WsvI z3eCP!zNptK`^Ud!2W*{KZ15%{MBauGnL7$kMEb+ofmoMz<&SAV9*$}5 z=drAuzvd5L*^r{T^=uhIifQfrllNcqtrSvmmwd5ZgLFUcQ2*jP(vfF$!4zxd+Zrle zlQM;?)((A(Ka%wnTd#e#+Sc(kH7~Rfu;Z8gd?aCe_5peabmSo$(0N@=R|36r1P#6M z_7NZ^ypDfnuFbbm|B02Mf#uI-8I0UF;%(wNwYWf-@s{4?ceY4u@Ao6}Yuvl@s-d+P zHWX7E`Q5#3iwqq>ZvrU&^|gRH~PA5?KMNKi0;^g22(Xqg>x-J{{a3+Y;S_F zs^~*os7t4wK8l_7N_A!cQDZr$;dr3ve?D!XbYA+uaWkg;yA2+}ceWPf7P z_6t?!0Yhlq`7xRIY}NK;X*fw784hJ0G5{QoC7P?C_)sb!Wb(wUUJ1ZdmA-7Yf{yN7 ztl4aq#9+7ia2BzEi{?KJ_i#U?&t?>goolcT4j`nF-o3_<8c1y;I9-dcxa;QTVMftc z$@{T*^*PRAsany2@NXd&s3s=fke_=tt%f&=B!(HalNw?5m&wIgu(o%ScMr?>X7Wam z(H^aH9!3u(&3)`Tp~ZlD7CHZv`n&3O(19-^f$YAY-v?X$=G_f`ySX>*I5Lac2ZJ)M zA!E~EGjs1h96=I96_IHmf{z|Gw_dov^VE$@Ad^}oY|?><89qPk&KJ}j8ArO&0aMUd zX(v|c=9$bc9|R?}8#mHU^?HQqTa)r3n`XjZuvMc6uKDss4iQzrXkALv6I(HMPPP5H z87GczTds~cJAW0E$o&CUX^G*#u%0+4_wXPhCyJ@r?}y?k<)H5=zg5LT4pqA1tXJr9 z(#O*Ulva*8#~zjb@doPdG=; zNO1QZd6d)qd~Ta3{tBDUQ2o{zF~b|;WxY)f3IK%w%UphI4nk%hKAu145*0pW<^9DT z!QZg)k%~x3rG~008e|$IKQIHIdj9h3>^b+y_W&7}@_dvSccPf4Tl`n%#O zyuZ40WKI^w{@oJeg`!71r*>_9$bB-XOZy632Nm<$?91$pgxdC_=&bYGYIwQ3&Y4^M z`oZkXvSUPst$sKMp`q|5J#i(pAVz^H^@If zaQ=vFKms*K>OYayx(U+W9^s}nr-Y!C?erJRiWH3L4d|cF9cRz)XG=mw>Q+XjZVs(P8Waqgd6t#{{>FPLrTX7!X$a zz-YuS@AYid%ilRkHP562)WmK)JxYjsfc}FuXMJ4qsO`P#{{OtM?A6EuPhZOIX=M#e zbLPp2_2|j(GT$m>k-EAAgI6gq{|uHC&Iq?0_-{*A?MtQd)p;VWQDC1o9&ieGWyJ-6 z`ISegmdKY2xw%)eT|Pa#C1`hW3S|^66$!?ns_%GdeLwkYfcn{(+m{Nb2wYdj;cDgfeTaW+x{rRPHX*ex9b`vli zUtW;^arpGBrFCMTg)U%)Eu2igZ$rAI2-R;Xm+4MmgJ%gr0!yH{(|?V#oI_~!<$*lE zF~oxL-=EDivf*(U`JY23EJ)<`Zy6Sh{xzlvdz_l-k3pZzek++5JllYGFI^_x4Sn7e z$l0D6IKgJ}8A&mhPd!8Z#{*TK#x6rDoCGjJ5yCA2;GnZm3Nd5~PJS7MvcNWP?~qVX zbO^pp3BXulZMGXtXeR&}Kfk&({Wahhpg!-J*L(DXb>$S|eFpT1@-+%+dJqKhJ?2K2 z^y5>;nacIg_ODW*2Y;c8;|Na zrp#L0WAaG&e~d0iFyww+0F&^nbjRr8Il6~D)c$+piWt9*g)T2ZEcOBj-3gQJyjpK} z5vYkSfLihbHO)P_yA#5JiRU9<1?p8!J%t$v?n-Q2Fgp7e96=hb^w#-3C;$9c)!;R0 zY=VoRHE^3%>w+~^HrTEuDgOAmkZ$`A#18fh(sOs^JTomC#H?R;12XW(oi~PlKnZpR zC(zHptxR-Tgrdj|q0p6ij)HaYb;}e0nz*hq8_D5<83pmdC1JtBa{q-gY;Yl8btVDP z*#4Aj)%WaVmMs9rt67`*n+1iA{A<5}C##Il2gnpsSbW)d50z%WjKZJuj@jjxC(b{) z?{yd9m1oGVrERXi6LrO1SlI*(#1;~-{-%l*ZzCv#DnCkVX%cR08#ZmU=$5=Ui~25 zVfeV>yfK2$c>!ac?_&uUput}RKqGsl6NB6c76In^*V9c^hG6UtkY9gT_~QI2vF*@E zfx1-JGctr&8E&2hTEf@j{?~K{c_&Cg1t+C${W>zVgAm>)j{yv1RkF)K<%Xasb(Y5hG=3{@ZI+VwJx;`0v3J1fN*t2N4c%M*)6ZfRIsAb@ylFA8RcSf^3ksCR`t6YS#VP9 zJR(Jnh$|J)piPR+4hwIQGyZa*?T#XLBt0-Rce`=)Pw7-~he1S@c|Xn)q*M{*sB9pm z1g}Pz4aV*{+=Q4;1Ftc=T)O&tyd|gI=HbVTcfrN&)OOq<7&)UX+$vrSO$S})VpdN- zgpuHoaQmv@opl_1y82*4JU4&FbdRsm-JM6?#T#0ASjwd%+ukC|o#o3W4g} z2L~OoV%C#3R>16r*mD(5xf+`v11R!D7Hz(HzFC+8M@uBnhGm;{t#gx%d-{1vZRyQ7fZS-xIbbb{HFoS1-M$XND8^+7zEi;LDF(No zZm=lzXA&22;5>ZM4l%d6Jl@&ZHx?3Al;Q=z?EIsfEA6hqU=JqW2S1WFnNx!)xJ3vb zEIl`2B(Mz*^5Cy-dkta`QeWNOtB+wBFEl?tQaD>@X*f69z9eu1fy9c(+vPu*5IK-- z5&kd6H;|#0lkZEOi$JLCf0f@)f-#R~AH>~BgfPrO_mMb zQ}g-}sx{*c1`TN%n+g^Y~~alin( zasLt!8#|+T>ZPmIM8}gGh`1!gH^I#r^Wz=D>b_Vg!gx9_8*INS(3A>B;i~VG%gYw^ zU_KZ3ITu~8>p{hz*cK++Cx@!>@Tv|Xcjb#_=mkK9+2)^(=T2R@CK);`EHeu!Fuvx)Xwd|qukNmX9f|SG?4M>1~U9P zNP?M{+oym0Wz?pr7R z9);jZJfuA;MCr)w&kKMUvpE3Qc`S`Vao#8`u01y488acy8otO(Q~!N3+i&GK+APEx zqypYhbBOIO-mCcNnYfEow0#@g0viN24%#|~CCy^xW>Bwx6bx)4ZI!GeDDt`g50{T|?jC-AmL$MRo&bek*z)IqYHVTI``w+3AP ztDJt{Z<+SO?9>Jh)xGuBBv+PwmvKZC2j zzD+q58%Vy=z}^UyG(Qalf;I4hx`;?C!Eb8vKV@J^DQtHH?(eeZm;A85JiWf@3(tZNykkt?SWfQ3PPW@qS|_se^0#qF*05A zq!SlUlu4=OGHrd8)#uUJb4ULypDUlpEuRc$vnJlALbPH4USBU0@a6nH7wPCc2KX@< zhRPG~bkYjic5V;+1&aed*CT%bu9tXCopu7Z!nGIYZpu+^haae#W)Kmg7>TpxMx>yXweRHc(&&kj>0?Gor{EcJgIR5vUj+G&IixPzakun zY9~@iB1sAHB@%zqB0wd)4Q@$fBR9#DP+6^GGDPHKVDN8!P{bajhwc1V=L^7FK|4h> zujzxY1~A@e|M$Dm5L9*ugku2m3nFY6fZF5)=ULd}$`OgHG%etngwTN3z`cdeiCf+O zf1fSixku`+%U$Qw<0U{iJkuERT_l$t$vM(@-XooqF%mi%uLo?UIp~RRkP-SIMmQos zJ}5cc{(!F~Vu5BC9M4S!XI96@ zgJK7l4Kfco!%JEQ7hU%PX~|Tjotwg(M&^HLy|s{o{m*$s7T$#jMcou+sshB66L@iF z)|vLh_Yus<2L-dbgfMRF^)*!+|EyP2l~Je4X~n$YAH8)bUO< z_y$7De>Ga772?|Y|0L1;PbJT`1PPB7XF_cl6ApL!>p z{CCBW;IN9dxz&Vpgdk5g*9H7M{~JN@Qo2_cr<{Nw{a%TD?1WWQB*yueGHU9)?EFr% zgLL%!(Co~Az_Vt0Vt1Ms_*`B?B)~MZb>8j#4(0^1sTrgJL)^+iTcZc6qEXHuzBcPC zV;Lr89^X|npZ(b==-DeA>u-;Xs5p=}i+E1wbdi}q+Kt@0o zWvd=KRHR)yH$Fpi5JIG<`(gyAYd%PkcY}I{9U{@zDhJi8Ph?TXHS7J+%8}5+TG;Np z6}KI`1<7*+d>SOCSS(x@`=Mh}@DZ3m1{d=ou3hwJpqP zuNg%15%R)U7^&JrPgMxRCNo6f3+|rRc-9lpVff+;|^ZHh~l^5ah*S>}ko5g9uPzp@?=zmk3C($no<8C0fA zK}GXAB%8v$QS`531T?$ldF5Zc`dSfT(=(?V7yhSUC`9L{Siy$8oK-c&3VfUMY%W5> z3Yn^X_tAA?+2MWWBRcwMo%>L18*An;ZM}14FQ#q)7(k67C*M3i;(4fiLf(9=!h@Rl2W~#jvn+b-S=sPxnDrf+8!Y;+0}WaLPbz)=RHGKqevZKb%O#rKunH zI{s##xPaT|gbE~`w=TY^X?~(>W#_zL(6q5g8iuIrf8r^C`kaj71QxPwca9=-$|eU8 z{~C^5b2Sxn79t@IPM zZq>yTvFhp7NF~R9i}0Uvld>ygwe2XNv|k`%v>=#k4;OHT{A@Rum`v8w&aK04w2HnE z>V6LFiN%-C+aSs;H;0X%fj$XhIErK?V-`C84Qy|$KD6HVZ{KNG5)?>uIMT2Fo^WRPnTxugW%?0%elIkxevAGnk|9Rq0 zDeOcr(n8IuSZLWq{qKD4SWs&Xu&B->a_5NHQa9c1AtXHin`oxn(d2WCf*m!S^n(&% z-N;OI@r(BvL>>>Z$b%E#m!Ozs41kq=-VlSED|RJrtKRNpNaQcUmO_8aRWK4&gyB8N z+0vbPUORFDDB+WCfou2bdMvAyPyWe)03w~8Z5qCffPo>+F=J=D8e@5LlO+1NkFvCv zwKlSL>xSlj+)}{|9zp-q{Hqh)HV7Had*Ijd-3%)Py)hMaAeZQ##3F)(&>kfAdVS8; z0gcG5;t7eSMbHYbw$(xuEe7jl?ZE>FR|Px?>%GsdD|%eiq+?)HK zRD&_2GV+Z`fAfR9Rw4R!X$@LNq|2)0GlNmsmkOgM^?8sHQGUzxUhpi(nQ-C10|&UL ziM-@gKmg6jhx7Oz5ki%Rv4fxKS#zJnGxwPnTRMk8>cj8$g``u^w6r7QBrqts_1@Yn zIqDyp6psqWD|0Hqiknl(uT&pa214cL^@Xlqjv#U^&V$v#h~}04UDlAShf&IzNWGGE z_=(?E5um&WYcs};ngaaRImC(A!n)8m9Nl~9X$tCm@DV?RhYS)&Nbz^Zb_K3A>&yj> zt(O7sSop8#8jSNC5nzV)@m@Pi&J_Q}9zIu_>}LTtq8pr1u_C=APkWM%i+S*))e8B2LCz-sTeZX==c5Uma!>@&*&1C~9D!*K0ZRWOS?Av^&xnm}wjjbM-gvshEW^i^;OW3`7DRGo-_VJ|Xo%RYq&b!CITCE{TXVN=#E z?L|mNx;hM*3U5Hln9M$LOKUqgD5Iru%TO`+^CuuPN~lJ2VB(6{fb(*>Mm-MPiXqBq zo$zX441+exacS7nT2uv8}uYB+_w$T%d>KN!gqzQIxqZyw%Sp&vyxK1p`9d1lmk#1q7{MQBI}(nTId7DD$x6 z$GYxB_5Xt5CCob1frxN?4`N{NRidsZO4Q6kO&$Dx1H6a3E z=IyK%S1vvNdpwkPp#}msE07Bhn>0I!3Wryz%h)fxV}ro|cSSUcOq%GH1KvHaK~5SX zvJ~CBzWn|PqR8HQ=ioa~FSr0l`zFK6dit0*uE7XG_QB%eM3?R4PK~0I?soSWJ=X(ugd1PN?4uZx=7)KPtT#_=CkV``im!{|V4dL*uOOr?&EP;Ur(lZB$x?-I$ zhJIl=n?NtL0sM53*>gnX+6La!-bRB)YG6F&>16!$u=h@igQ=-4fi(^E#`0mvo%kaw z>b`U?Rk!TTB{;I>v>~=atARA{WuBQ8JghR#py8EfIJba1VBert$Z~MTPJ{k)Uav>wo0Wpz*m{8x;MY@kCL?X~>o>x(F!_fED}R>#Ogm=<4y2Xf|6Ze@djMo7@YzRes`B#`f4sN+2k$ z%yQwdX+k=O-|yPfRA&m7Z`jM(JyW(^@7D)P9IKe+&uQ##*ek3g>r?6t{7WH;d&qnD zqD)dtC$&eBV*Z|z|0@Jn`uTwx^%BsGJsqh_!Jvsv+HaY74|X;McZB-8jgZhVO$cf1 z@hM<$wzD2&^X6kdEI~RYlR)*pQ ztcrW7eUTLpB(Bf-uUx*o0!inXr|$ND#qA5Dm9bdJ6xWKsHtZtjpgrAB_kMBMdf!Mz zu~Qg}cv`?}R(Wvsjib{K2BDRZMI`Gi=B+`RX!=dkYa2=30N)S1BsN!F`8*wPxk-11 zU@B_y^~k;dV)Ns@`A`t-Va-Qma^i2*SUjs{Y(zXCQvDar;rZ^ny)6XyH|FLuF3&v2 zwuPn}ZYX2r9Gi{x#eUpAX1*TVT?SjRmpTGz#ZUNtW%vi=*xxa#e^CRvcXolBYXe`N zlTR(d&6;*)fQWh?aU1<@U~`JTR?HtoQ=DO?2A{48iN&+$@(%8F_9Phlu=@z~?X@e? z+FT+(Q$+U@&hKb|!`i}x1NTGwyY>a@2h9WK|A7vaU70ZV!uZ|~-&C@q2pI*v=zif_ z3WemR<}}3;!+s-Y;Vdn%99+Ip1vbEcyLT8uc9U@Y3T_nCyXUXJ#X2CSATAZ?-b)qF z62)=*l!E8|FS1rS>+D2JWwcZXS+hvN&$Dq6)u-dg*-@gOWk8hWw-od{oj{^}60R;xLvqGCiK{-@tcl18xX_51|3%@s`PF+;#`;zcfviFLmumpz%OmOsb@!ecGZUdEK zAFaVZ&8tISri^k5rcb|Y#ZaOvEl&3O+9QTYW=-S`?wt71ByM=5=)c!DLPt&&g#=1P z*wHS6M9y+hL3+3Yg`{8GY=W$X(ZwjCzv;l}BRH*uvW!EhPAh?*`}m8-4BZ2m;@S)P z)nejV3vPMI2dI{SaC7BxL{)jH3X#>Et|cijVK0CCc5Qva3dS4qn1&E(`SIf!I|~tk zpigwo3+%+lMFV9X;fcc|lRsJ#j!CZ{fu-H`FZaxO_E4Rr`!Pt{WtVi2&pK*kVO!rg z@d&wk-QX}Dz8htH+$`{8E3KJC16HmFv?5vi&&961Grw4$7k{9ANK z2v(E*fEJ?&O0ZYrhy9+hILsvIN}OGSO=CAmt2x3BdMV>Nsh>|mA_dkpMfP*~5O>Qk zJON$h+hJLn9IcS_oxfKyI9YG+-rbePG?++CR3oZ(-{s@QRHaq~Y!oeEV6MMn zubN2z8pJG^?m>s}vIgLfhtRz+_w5mV**`oTo*=1Z@^$&wg${&kn}WKAXT0V^1Y@cP zU!cx^E6lO}F=5~%q?lLb&1@pF#9S%d^u2&dr2aJ0jg?lFy@T-bx1nTxFL-!G#5M#D@?bh0M7OTMm61#T}S6- zd2eRbX;_ByueAQ^k7VN=<%0180*jbaI? zR1iQ?=ZMt2GQwm^BT>snEL`Sus{=!siK)u9>Aph+ZDm-ip5jTs%q(8n?MO_88or0g zP~j{HLcByYGT{0geYrgKDf_l3o%0D#HV1!I@T<#jr&R$cZf`w{mkF>3-pyirE&=N z4;p=|d?ctBWZBu;u+WK1r`aVOCb>g;d~R49u}J;l7YL>17quN}4ljeEQklT$Vc9*C zu!*C?&6}?ROdtB1`@vr|!)mcDAx%Jv~|zW059|Fv9q zxt3QQXU;t5obTTIvppNZQR~a=yAmIM@i5L;?YG}iOWVv(YTzad0mo)YQH4nDla>r= zeb6ygn-|P8oF{zJ|9wa{pTa4^xv^3EVOXLClTeKXuTJ(w(&yIf42iaq5jWhyz2o<& zUD6v!Odz-DhLy}Pef;-gsX|QF08M=@;ZiZNxLhVW5wojF4)dU;3^HkWypHwnJj+aW ziKK*WmeY1(d+Cd+nATN%AJ^)jAO)hfG)__(oDScqEsTEL9q$D=6i!CIs#t9;2N@*t z7U&T0zPF;liFk^x{sH8n!Uy<4OTV#pZAZ*IyMq}1H2_t20z%GrM{Ui4(msfZc7!HG z*Tp1vl?rwy%O4_b+OC9@mf@rA=6D}?tkqB$l`TGNn8n|2`B@0sQ45RhJOY$V)aY|7E%XZ@5Cf z&N}!sBEbReZF2K@uzUP7eX+hzVlzPhL;mze5rLOT9)^v11U%M__+(;M9#sv3-|6IZ@ zI8p~z>X1IR15955RAv%YDmYlQlpE>h@6IC!s zJMBLk8RHqT_qS0GO#k0uw0%10b7T+xj`N=8Qx!pc$*FW-WR^{+P`oDwoG1c*=oN+5 z^Y_HlnH)47{A7KXAIZ7Y1}*u$hGZCz*a&ssdGOxGUIx$p!LcR1|33S1Lilz$V zs)LE($65*5Xh{+$h(b7_Fn@V_$oBTdX&MxkDL>{SDP|V zRa6dA1yryyxRx;(Y)G+4~K~>Ect6=y(Th=j6Qp*yV19XC5#vaAOqh`cl z=8>&goCFy6FJFNsw7;P9ML|L0zazVRL6lR$<)B}qGW(CV%9C@6u-B1Jl~#&CezuID z!9Lx8yf5CQ!9{9Q7R2;xhVk4A0#>p=XqWur@8pgr`8yfiswQh2-g=)?q?UW%$stE2 zT)34D@wRquKenBnq=bh!PQOeLe$g!4}cn@!=w>l_WiLS7gd@o3I=USOoAD8MD9 zKLR(+LK7{Ao`g}HE3C(8>>%;U3Dx6-CLxA3V)Tck{;_h)`iII1bg}C3hrBD|xlxaJ+D{5xsB~)`%7M zO$sDM=0oCl?B(esZT0KA|DC?iD-kO#`mD88tHW+m!_KENSck?gPzE255>ZcKklglM zJ;l|PVcSe))#)h<&#A4L%cA)6B8^Qp|JkprZ=ISUwe-;sb2(QyaqXI3nSI;3-5_5NAcZ!ww(RWbH|2ar6;ZDdT;bWBD< ze1gr@+DEHWqc+0AHx-d$uNi50nD?yqK0}8(FA9#9rB(HIh{rv7x|LL_mw#fXm$@u<#-L82wQXL)~xczIUoR&^T5fOJy zZivTHp^7Ckp~iZo%uHq~nfewLa!1f4y+Dq!iqWf9GR%AM#O`%`#drTOIWr{?DBaME zfBIuT{tt+i4j<4m?m{H)4pW$Y#^+S z+OHL*$k$^XPdou67Zs0>dVLP4uIAWUG|3@^mkhC^>peHtvY@4R3EFkR4`5PuHNwl{ zFOq2sP{KtII8G}88o!7tv+hjuS7L@}TqU|}NDY!J#Uyf!>|dz-K2IE$yi4I~@B@AX z&2Si(>CRSpJ~6gsK+2t&%ZIwXGa;_XqGuoYN8b06_FEI5at+M>43piQidD$Hh1%Bf ze3C>Y(Gxz<^VGVIv$>ktgPZGHq5o`*$6)OG1dxTtp?SlJ&yr{|tf?)H@t6vk@}doo z3L)dpSs}ZHC?nFeFcJ9ajr8rd_tsOEQVjwVw_ zu|#p!dV$)?H-U%lokhD+=GLIj&eE68_AB^1WICUdaM2lpB;bO?v7Gr57njeK>My)^Fy5RDQ$2yW)5>DS0NC-}?62POwb(gdL^9NEAwBEtX99s5YYEBjiB{ zIj^}78qJFq+|8tq7*8m=OJgMaHlZW?8Np&JOpBTD(|?cbD0&j=r@x-dpl!xDNaZ8v z$(Zoc*$zZq#o3?qE3IZv?MeAy*Qs&3x@vnHQgLVKup+R>n~$Dj@%ysd|FHj1e)Ppb zI`;U~j9r)p%}ml;)-bwFIQJU(veZ!JQnhEKm|s< zSp!m)#q))RQtiBGJKeZGs4GNP|sS%~KIhc53gkq70roVw6YR4>x+QOk`SY|F6)Om{M{;Q_DVv3(E;prsg_-m7(}g zBshGW!wND*5sNk+wW2Z<(EXr*VwgOdy1Ek4x~x~?cDM2HAA$haMdI;du4(?bNd9?9 zw59EJuflW0f$%T zB=9M^C5kg}UPbW=(`4D%BndqX!LXibO75qi%2OqBm#Xo|Pn8hbSoCJ=%Z&Z4jSw$Q zy;oc=bn-W*UP>Uy$6 zG_dM5gd|<0>Lsg6c9867#yv@IfUXzNk76VRS9Mf3Y68Y(+7VaekP_{qKu*F}aiq2A z)8*>2NW4c|C*JJj394d(2+W)x>dc#OkbG;e3n`u|0eh0EEEtr;br|dZ&(MT2AVITo zH&(N51J}rE@5!hQOOZRflFz99oA8WttRcg3n5oUgd#I!C?xcGLwf~9e6sJ>i=~bs( zC_x$?nGuS5!fVoNO6I@B6CE>H*=U>QI+%>X!{Q;;{qZZ4od4Bs=#n5QgFwvVH)J4O zVMQ8M#G_;{u@`)| z=UI&xbe3)yKpTV*C-?{TM%RD-mubwC+)g%%#1<5RPS7vLwxW5-*1q&j*HbklSwl8& zZ!A1r2s6XjJa>lHMlvy(bKWR*oPHZ52?IAmkT-m4>95WPY>bz{Wp2sy#>;r7>80`vREH*gk-T>p+{iA)A-&Y zVDWYFA}wnke_Tb(fc45Y1umGOqsj&rC!90jvDcL2bW$2@h?w7ygWVn2V#gn8VZ?;jLN55K-WGD+p2 zX_l@Fe#->UM)WU@3%##-Uj6^^pT7qb@R|eQsMRBIdPvak1Y-{IisvqytV12ZXzmF7 zHd_fwm!5j^;#V5BKGy)Us(vwzH8gKaz6mZ~PjC1N$^7@t`M*Co!ymeT2pqj*zzgRL z>~S?>pw&*>f$TeoytY@uI6yf8n49(jlDGMQ{XaQ_o^-J5OSivH``^O+*Vq7!-N}fx zMxjb1&_d(_g_NNP&KJl0rqh%9ub>-E{NwgjWsqBb4QA*3UItJ8ItHI;a@hq95V#zl z#yc?Dbd#*BBp-;gk~%o@!dVb(??lfh5Uqwwl$1Ove%ym4@_qV2l$N0MmtUC97B&zv zXQ@FJdx^`!uwDOBXQpacrG4XSfBu-${6i>D22HH;E<~J2eS1{L)!csosjw9m%%ZIh zV8AN=IT7^O2On+feh0SL`1Pv%_9?ff;A2Qgq<509_lAssTmlo3NETXvujCnL2^Xc0Hb8XxIdOVNptSdZJ-|YZLExfthO@ zYDR@ZE>PcYzT*7`soAhN1*}jUidBh1)(G4z-+)e*@$&k$yFlT1Ao?Y|^@-F8h}Efl z3g=*$9o$c$oe(Lrtbf}@qr5aQ&4c=)FBF4n+Xe`;3yLQEDdbPCU`TvwfB7$%Pg^Y( zf&`8c7fLz&hT&1OFax2WJD^k|c@4W{^z&6i9LkKLdAULZ3lm3X?7TOe!^G2~%|9vd z=M~<7YOa99@5U0H0jsYzO7mibNmJzK(9s=Nsr}pmXZ1r@&@2misQT6H40_sL${Y^i z7Z2^uuzV|pFvuVnEcRMZ?yNa2cnW%l)IjBAYD^1H%JxfCLDMxfQG(ACbrWhApo z5}Ia(@=b)6oNg@eU6tNwB=kPeKT6bAagccJ`kIgMTo%+lPrq-WZ#80`j>I@E2Ts#$ z`8;&Yk~b;Lk{ko0-d<%<2fyH+zKfyO5vT2LH0debiC;iVE%H$?BYzC_+AqMJh^~JvB#qL33p2nF`^ne z3jocjyK=X*#i=7HiXNWaQ3~`c@eXOKj?>j<+&%W--eK6A?N`^GK#0NTw|#2GhKO5^ zw5r;|)5lM^NODl2yFv}p+)ZQ2oz1s{!aliPQ;}PIWCA^}lMTs!DcjA5-noehY66}} zAfU=VV8s<1vW8J<2-HF)dYAfCrb9_523`p1Tz2?`kQXXb_y`pp0=S6PIfC(f;qPD- zdjRp)C+pl@+TIU-K0t5}OXV_kBoTwc&XA1p7g-P7yl#M-qIN&J-K{bgXuq*vfz{@? zeOjJeBXg|L$XV8)L^J(go$J2_7x0tw8AKiD%8az07hH7Bg!%{MfN#?{%CyYh!FL&Q ze*ZN(f*{W8&hJbCGiNtJt47V=II0uU3A?@wGLA!#ho-~QJMHH1D;+2sk}e;1mC-q8 zP%}FS_#%#MVvEdJ&X!r5onvv_+DHKN$v<`KW=HL-z=X~$lXvS!#`FJe)Bk&IC>7!e z^PA6XnRbAbQjb;!GDusrD6y+K=1ZY>k1*6L_F-y%f9}gn$aXkXNeY#G-ieG8>RrD5 z!)woTwW`w`zzi^#YTd@k#MnngX~C!BU|4+pj5$Xxh_k(eU|O#obK2YJ5nh=c;p8GU zzL3vfqRJUqKmM3}DZ4dhMfQK41pj$$s9k#0^LEc=@ceiK;locrYhHcTc^1btCeD?DvpTo>>EYSDzFF7qlbG=l*k8{pZN~pMz@|<+QnH zOZXWz9jC`o*HQoiNR$uif$le%<*MX{iLGL^C4f1&t6MOlmaNfA}7cW3WSja}+(IO{SkA zE_8|rSLxK82**npSq#}-MV98>vC>m6HJNCbWxOaJ&|Oi(MzBc6F!FP++0PFz0_aG&+2-fS&yr@g$-z?rp;%e$qfCqK`N|0K$Z zuB_yVszX5Ip)%M14T=0mpb181EIz1j2LgHsoc=#IA6Vb zt^=F|ugkTL@XF5>9Bn&ft|t-{uCuMY=~6e;^pfftG$*dBSkv_|`Gfn)cz)4-KGwgq zNW?>kx05cq?qC618)MkeK+qWHmcU2fu{z*VlOWk_?QPQAXP6~i2EWqxXr4EsVTk{- zUf$_e-0ji*O_#;2#bm!O@^z3(bRlyN0V~D{5a^|lH{LS>g3f%Nr|;_MV3Fd&4S@-k z%9b&R4jyIs=y&n)Ok|=bFnu0cU0le}E%<9!HpoTAP}M51Idyh{*1ZD)4){IC$PHAs z1F~nGpk(3AAGF7*uov4}T$U6AW45N<&HU6o*A^1OEb8Q$lX>r!vZN2V*g1_(%jYHz zK`she7ntHax6dxq{%?>45LSLT9{^(MYR>!~k!LTctasB3x+6EbsRHhG!i`sHFKx3#jx%EkuUx3W=Yn%_xaF@urSrWVj zIu>LWoydPcKIJc<8TD=ku}hkG<447FE96$g4$=ai9d~WPQNoJTt*1-NtXjG?+o$Sk zvDyPL#Ji)JSr%`>i9ZI3l_Qq0PH^hTtB&^sdBVLQQmP@4)qnr>^=)|vHOSHS2f{D) zyK#U%X@>7E&kEi)>wZk8Jt{~GnOy~^U48jH>Ky`jq7fO@ z$R@OcLxf*rj)Sd*nglp=3koaJ`VNACFX%UM8FG#IP$=yX`v*6d8#xKtf{Z)`VCC&*M`#c*k!Vpy6xqNY?3Vvk8G*6fHCz1d)2I;TBUgd&($K-~IZb6!ppY~4{ed^K# zZ7Qtta)FW}FqwQTShO;vu2amFvogTF}6h z@zy^vnYO0`R_Yc((hDKal|sB7Z{@e6c%?69JM$&|%c}IwN-U`ojCg!mWpk z2VNb_PX3d8Kgfq=_I`uP96K4uz73~yzO6%sYg2gXR9dvD&ZuYq^rfVgi&EnK5=?DL z9~Q?S7kzzV$k;T(AoE##a>M#SWbYvmWeX_#qwuJ4(C8SZ3jP6BMR=PpOn$qUJCgpa z02Pz4l7vYnUoKemvz>lV?qrpo;<^J(t!^vxXQr$qWZc?#96!;tuT`2)kropcxC<#^ zO%W<18wGkF+mxh|DT@pT z3uDbS@oy6`X{YwwrJt1hSHKxJ;76Ke;C!SX>7L6ls{=3Rwgf%tPFE1>z+dnquu$&@ zW%~d2LXLa~_s!!Rl*PDCPnLfWT~@7)#&^BEODct&FEDpn!&E%kT(;PL2J2_iuIU!G z%|}ng7#=+TVjt#j^Vor4FkwGJ-r1V8ZxKC@bz)l2Pg_?YFS=GBI(D#Q37~f$+bi#G z{~G+^=Os2^QU^$E#|5q>NYWAE5!wE3R#41R)TxrSUl`~NaNchqz;ZQ`EbYf?GJZXnd7uxmwFvY)5C3_6+V2~$$$JD%Mu5}>d)z7&7I&e3{`rgG1|7t zSyC)~_u!lyDSGg@iC#Ppu_S^f_oMiW3OtILSEEZ&YxXjd-cC~s6E_NJ~$P?f+ckNP&5pnpJ_G370>>ir0xV@w$ z_8B`(p`Bevygo~o#QU`vQd&YV2nThP5C;wIrxb(Fe74HjdeuwVj!&+`I)jNv<4468 zXRft?8MZ+fv;O*yvm{4c#8Y%F{Mj>BUu)aJX#oVKOicHo({h$;I%b1MZO^nC<7s*N z`(DVF0)FlyD0XZ*P2=Zq-+=S>bBmAY&65Z(edM6LR^|L9)!6I3j(;Ivy^hda)5%T+ zn$O!kmeZI-@Z*E-TYNt{!5y(lSo|PHtH|DL*EK#+c|K<^PW*#Vhi@j6e^j$bW4zey zx7&*bwoT-P79%M_Z-w2<$DCbUhE{T&{!F)E<=go=0fNaf-j&;Iddy0GIV2LJOOjf1 z`DRk-F{s-1y=Ia49uODoCceak%>_hhr5WYT>hc64>09U@A1>;VO_ar_=rP7+e(CT> z@WTq{0L=JOd_xrG{;AFhJ;UQer^1oJA7}Drb;9kD^gC%C31n@?n|iErgC`XFTg0^E zOlzIVw1sUc*-hHxDpSta{4=BKQn;4()(|O=C7xIO_!#)-e%K<#-WmYerM=Z>qjfDm z_EguYPk-h{Wo@jcm$pcAz^sPxA5xR=#}w+S>~)b>fQc2K7_U2J5arsf*{u@e>EO!^ zh5LPPn(q^xQ~ATwauMjJQ|0&XQ+l#*fQ<6Xga`bzQvosg**X1+qg=%`hr*j|mk9-V#(DWc+mta(^o<16! zA;4UIZEX@taO~x7{YgG z@+B|Jz`@7p3pYMG%QWIqELWcWs5FXKy5sTQ8wBO{<7E-7o5XQUekSeDWN+RXEFOOC zUg^`)Dz2tQEB=nB?b(HIDk`jcv{Vdb8T6#z*!m`bS8(#=n(Qxc(erXNMviB~9nLjA zA5F+?c7&l%_-a1Kz?eM-IVrz!WU0?H8Vc>>$(gLR5j6P|xmi<{5zF8XE3p@w#Oix? zQp5>FuRU(y@2Q+Zj4P9(4>;O+(q6Tjj2lL!@o14JXVG#kWGng3lB+7uvG!pP@gHbK z%dKrfZ}0>dQCB`~ta{07Nuu)NF&?FQbp&nfMeW=5pnKLExj6Ts`C-C>S9;E^mqhmn zq5KG!)R0C)gr+4t%e8RJG}AuJHEJK7WMhm!McXFt;pvuRT*+O9Nlx3O%(%qLO6%nG zlkP7#d<j%^R%d}Hl=ri8P%zJ2u z=)(kc@^20)baNwQD$|J>Hgz6zb5R)UPevcNa&9Qi3z~7cmt9ws*uj034KPIDz5s*! zJJ@_Kme#?@FQHk9zIC!hHXWWh99Z|QRdL@aDA}Y3uNs;mIfANojo30)%yM+jYm2-xpw)XN!1`KU_SDZl*)w%+Wyn6Pu6je+z)Kx{eRiMI1<)b%~$owy5v zE?!fqMT>sb%^xFn#Hs5F>tLy7cK4BcLL;gpa{aoq+P0gB5nTmZ zhq(##!!+9RXZ8UCY9nL^Q5O%U$3w5ozR|Uq^-O5alZ)9$X9*bQeJEMom^Gi(&Pm&x zwn$HD*qEU3ZM!dWV>*9WYLDyGj<{bj^$ejWu7M%?^aU@Wh^g*QmD01{{6Rw_2p6fCUnkJbTK+3_ z^4X(?hyCIt6Yt7QW8(Ra8fP1A&a^-XwG`q=bdWPl^_PV9wDE*`|7zFqeiIOzYxYFz{{$0r8r z-zf)d9$jKL3-ot7b81MD?o+;hF8TnuehzDZ@3Y6=^z%w6E5!)QkW#>|raZ}(Q2aT% zAL@xPQ!%%vS-~27zF?N%kAWF%h9#cwC&ifFr|u_$Eo51E$Iu$g%@NeGu*_(kn_A*} zJTEJ^Wvk_O>=@=`OdYb!GIL~_QRy}W!5kdESu8j5UDw0*G@Irgf<5d@3|0u{j0_0^ zJ*4A1onWF-+wCO1t=X(>nb*DS&-FkAf`k(AJnB72 zEiW#VHl)Mfk>JvOw!zxBR#Ydw>EA;8D0)RsDw)G21SoF7lRQ_Glk5#&Dk27%!}9Jo zXB}SVKbsm%xD5n6_Cb5tTPE~N%bbL8g* z#_!LckxHlqYnDZWi;GjESeq`~a8?!7sF&vxu$xS~W~LQw?|b>E%izX6U42RE8l z7C(GZ1!s6$!$M%1zDT~*Sgo#5i1(Bs>7mgL9j3HJ$r-Yw>pK*#TV-JnUSc9v zVlXKJ2YS0k3%SLg1J=0DMo9BmfEUKr(-~o2SJ-7(trSh%e`EiO&T<50n*9M+(J+CP?_o1T~; zt<@jCl07`FriYkrF6BO$35X74EFA0J_ww-b5_B)Wv_?GcQrHsXOR8M>Vq2n%VdP@C z+HWJKnJ7FLA#PFU1TB(+=C~Dn8jf%otGZz`=*s}S4Vr#p%=91JKm)@hkA3*|#%scV4;bcPMfGlDZUSUlp*yRcqh;@I%l# zHQ$Nnz@fU(zq`Cf%kMDXlB~`Nsy!UM3wt2gdfdC?*>Z-l)a{jFEKxu6;WVU={YiEr z8d2#Nsy!n>lifTQ{MT<1=EiTzLn2ndKt^(e-|%$sG}O&*p;xo@uY9&F>m-WKFa%R9 zB2tesS@=sPEgNnx0H(aR{7dFJ-ZtX0T$7A~9V*MMSz9o_1wpC}%Px)dOf#@}gpJ4$ zS}*NHiDx|5AWzcZCv{gsQ}-|*@t0G<>F(;pjp^*Fj7Xb|ADh;&^cCzn!7({IDX zdrXhG4k$*fGf|n5{F-zr zpC;`ZMbGxTeWTOF6RrS<1=E+5^VfaL*K4AQj!iJLcIg`G9HfbZDIw+pmkdNDm%q>Q8SO((pXrX9V%fbMUb9z~s?v&OT*|WW>J-TdO)>x&3c22^wRg z!YIgIwcDpKf1gPVzhG2`vwd?-q<6u-bEUqICzL7Hx+k{GIZZuIZr0}kYgxSU&+$^B zCvC-4BRsg~!Kf$?vBWCM5tgPIO_6bn{t{yr{?CXrm5BgF*ri9(j&KgBoc)4((v9wy zp6kH=ft+xhp3M5%IfyV>fzS{+0TjhD+cPh1VOQwX{?Cu(Gh~(CT6%5$Dkt|h&&1v) zk6h&sRVJRWl>FTJggiKXY%eT9I!^fn6?zN;zZ1SBtiv+`e7KsrG3zpHi2bg9@`Bs0 zm7Pcq@P%ZO5&ST39F9IU+6YtAy36s|3S8W`Z&g(7txQw=HiaY(g3Jx{#7a8tzu7J< z;g$YRI$DeC8O8KU<^>CoUZ|GPrOosEc|L+o6d9Z})|~Ol8?J9A7MC6SJ#3Z?X}=k% z0>xC_7^^bxZ>rv$aEpHc^{^@2zWVJ$dw0cO6k1p{pD8j7A-D@Dp!ZkIIG!ssIi+#? znc<`V`6T}*ct9V^Px|K9hz@z23(j>_Dm+g)a#bMog4oF&pveEun}t{vjf+NJ2FG9~ z21Rn6fUxZSn`l$M?qaj;1Bj{#I3wwK+la*prN6}7p#oGDnYrT-RDHIm%9AV#1qpN!MR>u~YVOC!= zQT;l}B!2sCU*}03w-9Jtngw7%lRTAx>bM#v|3}=`+rGe3_I|+)4C3T}yIxDb|BLDG zA8k*fy{plS&B>8q>j2UCHMO`{nR|X2SryqvjeO~QK0KkUDGR+>fzO^psyV>0lwR9S z|F=*WdU;_;P;hw*Na%TB&I++3I-b5+RV@MBf(@GQU<|pZ{^#56dt*Q$b^~CL$Z=N~ z+z-^<|1GS|BP-`D7w%BR)$G4*_s(=R(y?<=I|53NFh&jfSS zS?v=!!Dag1+W~4XpRD!;;*9tZVmKDTh@+?nZ*Do1XB1x}_d;;iK=DkPU!WwI*Go=b zTrdM9W4UNav=Po;h*OYTKPZn=@dx-i4=Bte2kRw$8%!V8N{1?j&^IO#o=H_@&x;El zwdU)3{m1eh-3-`w_P|Z>`rdqalipwRf{-u?t{NaLHjGOa!C_9c!9c|QNm?Tr-E~@p zX(DAHF;6^1h?)Jog36QGR33Ye|;fTQa?sL>R z_T@$rvqU3xmMNX zUKaFc7mU9zN&!%zkOs+w>x7+;y{910>1Te5Tlz7mD0p+~2~a(^Tdn||L$MRMl*De?ndam$|Q|+Gp1$f}J;bV~nEmeR2A|RulXu>BQKj*ukFG1}0>R_ZuAGNXv zuq3~`bWRcfJp%;G25#*`A!e6)0zm1!Z<@XB@!laVIS$SUUVjzDVuL2Iz_`vemquNG zF5MEW`5}H&zOc-NvuGDOy5v#5c=ET7O$P6((;u$Ei)AhelH2HfTY7)%RHTZ6jHLF> zz9*}qcj>)8ZyI>;LX%BfRFqKMcM>gg3l4xRIhRL_ z2I1-Y=6wLrTP8UHV(yAvb|v@d#Sx3SKi>x48Y+)c7eLv69BL5hoTKW1mCUrTOBk$n zFnq8tRtUa%{JNomamIUz3bPcPp1d8;d9Qh6GEyn*>4{LErd-teOrP|l8{Qbt3(4mqYUMQEpbH+M{*yr3ipPCau?QXHb z`v+hR9Napr4SBz^ZwG)22FOs=R?-eSgW(>_yfE;$=h}8;h-yUCuzVv>w0xC8rc=E&~w1&*`E#p;y))eCjPUKBTxw}7u^698luA#F=tZLV`)pGqDS!pP z&G_E4vFIqjluyDrbPXU^&qm>OPK^fW&BlTGzjFA z{7>}894#rK83p>a{6Z08Z}CLI@XNHd{$@!c-DDjuWXwVIBS41 z-su2c<Ub1jR@b7;%O?CmeDfo%rt*2b8PaKxQoMdu`kt%icd$WU zzx#JjEP~!1m;qpO63(g9I?GA)To+}hyTZYO_`!T$nn~-t-CK#Y-i(l)XNfHWp0c&n zvdnZvAUU??s;~oJIPGP;gR$9Hz6(;x{X?_i|KPr*3yAV6Hj$$$)sy8wg^JZZoZsexDT=A!Q@kEA4r$ayrf67I4s1LfG{0j+uHx zm+jv80zkvTR%6*R;}2?`s^lO*k)t-0hP}iUL(Uv120V2Kv#I?ZfUc1>H@OY>Jm%*Vi+usql$?zxO2^{(B;eV{FVd-ROGmxR&^v! zx2rLMJOU;zX_EObZdDXQV7ElIL+2ueKGhLp50js*7)&@0ApqBRsepoa+ zfLKXkM--A*aCJK0<96&SLrd9zj`bNJj)`C!A$ z{*8qi)#_-I6A{z$H_{3Su5Tdt#qpsOg`+i=Ukq#MNOu29aeEO5sV$x;vs!Y%F305q zd4vPWI`#aE&dLBordr4Ff&8_6=(reG%^#{~2Hcj${dQ%1Rp0F2kiSBMErvPTxvNxI z?MG!PO|S(V0n&&X8w>1SiM!>s7O;T_(<%-36Zf|UI7|Q)(&POCV*Sp>#82g(ODa9R zamEZYq4hv$U9Kyd)Z&&NHe%w6s^+%syqyu{e#}==?ikXwX(a%$)$% z=O|MIPkar@nMXMdPkT6jpRdqDoav@@;EkxATahZfZ)=V|veoY`W>#+pNl;%P4$3{r z@Q?qOstL?R9arN|U9Nd%Y)xZnKh!^w?676)K9L29kGBBm7Yuol&wSaltXTh{zqW^_`rad~TRo*#i{8z22Nb6;YoN+;a{I`=c| z2Bf?@oUrp`k-PEe{ppjr2YiqoK|OpITKTZg@u9v{opWlTc4%0Vb|3H=TrYV=OGm9= zbb;ATse6mrkOgmeG?b!VIO*meh1NT0wHFQ-ai=pB9s8EPBy@?sEei%(N zrI1K-lddSum?By(yVkn9!==tF6uu^&t@5PNrAD8zF+b0hTWTPYw~qr;KP1kB(x%Y5-wZ9LdzH5VspC7b}`OkX)M0wu4m`0P4 zlvin|FH_32t2FfDAavDeGhveng%Vfm{zlj@Rd%|k2{dM-1q8tNG*oi#DJ38TDyTI# zRpZoE{UsIfnsEDtqF43d_6DZoS&P=i%opfyLjwHf{i(3u7hd`O3bb}$ zB}t1OzA!5+F5)?fLix8(i8Xm$#{2;#F20~}LV=SGQm57e_d#M{!`q8(r}Yi|sEXI4 zO{tcXqIGk@gujnxO=jRJvNi{I^vLQ`;|^p{&m+qwnq_XeFY#4|Prlne#aw-W1nk-1X;b!`%xNfqzl&<&a$1|`W(PC-4;>JrPM0X)J&9x+0dk8 z0+G;r!>z^eE8#7|BLavaNo{Lu>k!CD*cG2a-)k!)?D={E5JEl1K53D7$ny<3UZoMK z;4zgZ^YIfP~uNY%W_KmgU?b2vrXiSm=_07i=nEi*)0An@RwR}^SPTA`TiZ$j5b>qZ$V1N#{ zh4|xl{pPz;*4`35%4`_?k#ep-1bNpCp`X{bsmt!04)NSiFpobyQ2`gPqIV4F>D0Z3 zaqeKj5a4vLzxO=Kw42`0LYb?Kr}78z@QD{oU%8cy8aJZ2dh?JXUBYXDE&$8WD&%h3 z(3~J?+(cS+VK8HgPGt<2erTWk2UyXWG+n`=a8e0qk0Ltp3lsTS3e>SPZB*PWz1Fj- z90#Rh3z&UUKF->hhDx;#Mk$1GLv|&f1R8avkgM^k{A%B)fajb7q`#;v{AVH|V9-GT z;bnBI=M4+6d-U-(YjMp*#4rdxWWA)!&_R`Ri)EoslFa#9_B1=aap7eElKHZq@HX0u zDJ>lwZmQk@;dZ#=Wn_27ER=^v3D67GqFB#%w`CKbE% zf$?WgRy5TV7!$b4`6?4_->WU!D^kj&Qj5>cbu7MqewxPFBNKe&&!}GI$Wuh^W@te7 z)QDQP%BQ_ktcbZ%25nE5;6NKzJv1O=J>Twx3}lu~HPN~?I^7!Tn)JcGIgY5=I@J@% zBe#Jc>FBUjjqakpXrq;iEz9YnvPb!D$y;D<@t%k$OfEsx*o-0UDpzw&=v$nVK-k;4 za4PYQ-9sOXe?pjJB4peENDB+{^#)J#kl@#qnZDaFmJy zg764NoweX>Ce1Q!t#*@DLy3>ur=#_Z{kmokE3Mj`8 zW#^T@m7hI+@h368II_N~JzTiD<`$=0S%ia=ba2ZRvl-kYg~WeZ0B6aURM!JcxNW;- zk`#UwV|!X9@7dYqkai_={N!KexZ=9TlM+_@vz^a^!R#l!o_Bx+7Ae5$hx3c5J0MFP zX>mk64Y)$}n{T&b*LLFL-Oyz9rP8$6&4jXx;(1hDm`GeQoh%_~N2Qdazle*DOhA^z zjdx~x_3f8iRfdS$ZdDqL0|Gu_B@>l&l9^TYc(VrseBWP z4W5GE8{T|g*E*BZyiTf=iOx=Y`cvWux(`H43hvEz6jHHV_I<;6rWp;DVp0a=oMM%Ss01cV#V$v#x~~98O2NFlaKA_C`jz0QT!A;A1N;XNLJB|l$R7( z5)_*^;^ZO?xUqa&nIW~muKn7#)|XPfOyO+v`Z2ZIb<88j9(^yY>bq2rYtQJE7X4vu z`m8V)KlJR+)Wt(zzSmNHJ3brClwF?R0bQndXIuK)=0xqnDd1fp?aQ%ml{zQJLquzC z8LveneQD=#+E-zZ70OO@tyA0D(Edz?FFI@p@r=C2_9F6q!=yPr&ksRBc&Bpr90OIG zii4}!3@NEUfk>yCr+ufR z*S!R)n_5iLFo=j}~dQSts18Q<2yd03yBRr8)U^_E`L+k+NU|M@-e*&i|z+iAT~ z-+p!YqO?09^nGZR4w_vqzv}t!aR;kJ{oU0PskeM*!v3z3GG0lM@Ndh^cG$r|+HRsY zYnexqIIe3Mk7SrVlY?eAaw4vi#@e+0N!z28xKJobt@WKOfJ$nZ?gh2LGpD$wHmWx) zsw#m@2GmZ4^^H#FH-j}Slq&m*ArNqedv1e^e=bjR$`fj*&6S3uD5AHvs)%k>AkgE^ zw;5VWpCs3IS!2cHm1wTO)@6qgw`b zIW2MSQE%K|-gIVu{SrA}--o0byBfAoWT!B^uZ!E8}hJM;RlpQSgK z_RJGbG_1eT4LtkW_{r%QyK(05(y=QI<8%mBG)1j}&WCgGXC@<^D^3aO9qd8zQq8t` zUY|)Gd!0QnOY?tmXg~7n-6SjgHth-ha%fAUeXZxSeqCoC_k_TUZ(pw8mHtLJltp}@ zYFVDHR{Hr3vlgjWdfON`4VY-}^^Mb3VX1UrgBvNJ{nf_KU!Z^*_BcvI&N#d z+@zdw?PI>&#dg@H6THt{gXHu3e{6kqT$Ekgt%QJrgeW06G^l`-AV@bzixL7u3kV|u z(ygQn-JKHBEuD^pbPSE8bPU~Rk3P?Nzwey)&*8`1v+tePUe~(Ta+&B-Z-@(a?08xC z&LFQb3IhkxKJ7T$KwOMx8x*sQy{00vcYXW)lcrKt?MRzRiHFp~pL|vAs1LA2m((Ad zh;Hl5rT!EqUY#JaMV`y(wLn71p67={wef*iTPaiwUBW@c@TSetfD!)L1FCYlL)krD`Fp_PX7-b0CZh4y<4Or50jT;2m zGb2chU*cU`EU@MIZ1v^$!Gb3aYX)gxH$ILC0W}UeR)CGkWQLOxD$=6+i#HKv&_=Sz zuZ#Af(dSGHcW=}HfzLayoW5}@Qd-aa_?*W3YYRje-*e59_WI{;bcY%9x8~hj?`p>C zc2GlbUgXdHRK1hg%L^CKF~7C=-VXC=GR%7%L?A(GMXGS~tirf5Ik|QWtB$Z#a;~;S z8|^^sV?k_RN7bBg1qi|Fr=JJVgF_ur=A}=oYgm&X!9}lgK5r^?DWF%J77tE{;m)1e z@y(cesEN{eKS%;=$LYzI){_P8Ac6{3NzsjaHVa`vr&==w9ikWc*SU>JUXWn~SZzYC zD9lei5Z_|IKYvC`5^S~@UR=Dq%26y;v`eNo#WzEboByNm9Ol<(yLQ#^mEcHCMo^ym z`5Q&f5PBb0?Vk)|{Dy1;j`XDAnO&lTQhvK}vU$7XA%s3v$ux4PImRVUTsr@h>CSD0 z>cUrB7s?-Nx)={LtjIRpaug>%m1^kW3H-VTYjs@n-c@wGQM$!c6YIj&XikcN`UJV= z^PE1+caB&T+#_5pcs4{f-PFYzk7B{Y_I~K?^&9F$&JJSvK}H_1rK7TgVd! zKDkyr$=I4_e9|W2pr=et_Kgncfdq=bKm>YP}8gd&Ql~c%5}||W|xz@nE+7x zDWq{Askan`K8M8<+z*^HO-y$zJ!*&RI4?y9{l0A+NCN9_e1DC1*MzH?3qq-lXYh%( zEvSfq0l~Q-_YKQAOPh;(U086|c_2W(BhJkHnNyHWi0a|}PoaRXLqPatZmFHZ%&Scj z-ZX;SA&DbYd_V3Pp7a>bBCnfO{?c^U%BN@{0&zb&qX`ogXV#j}iebcc9awP;A--Z> zM9u`#>4c9CQTgP~lwX){*qb7)FA@xXlHRE%9by0db)Eam6JWLy} zT=hZdG|s&5XO7R`_TMFyN<&k7jiXueF14r!*YvtNiXfa@$YkB1Us6#M()3(|urI&A ze^!)g=N??~pJGtgw~h@cRkPCV{Fd#Rlk=VrLA7XGeZ*FSXU0wG^?~p3xcAuGB*qDs zD#ywGo-zB(c#F}-lNlspf3D7aM&^wXogqkY5hhxNL^=(Mr!4r zlQB+{UKVM3*27mbM2fO#0=On zX@_@D?cC*0y>IAhXR7v$wJ+P{=9WA~kafnr?QLP^DY9+m8jZtJ#Hq8q6Sg$?=M0IX z<@|#88t29~sb4TfF#(|t2c^3tiT+3$7m1a}oCi63tqk|?V@A>S zs{73a1(+tC!NsCeAp~y6@+RE6loJF-I-JoMWFO4%YCX_mxu0q*J8WpMgs_I1i8xG) zu!LK+tQZgvBE;FGo^?;gFUr!>Hj`XCI9nO&t`)yBp!NXP`@llL8E&_x2x<|G8W^NPQ$sR8xm7+ydVO13TB?WdZ!OEaA)v41s z*7uv^zF6-m{FsA0ZCdXfzX(X)cOBD1Fr}Z4+0M^UZI6GRka_{aF!X*3 z@qQ@X|IyO-uEv8&e&=huwx;J}8G#k1ZWIPzG1x@gvhG1Wj0=vF4M&6pcB!0ONhwMw zVhNaMHET?&n7Eze=^>P@G{?=jpZ$rAqmJO~x55Y>l+HQL!!hAnS6XIv4 z=@LAh9l{TM%gSOLW>$caz!k!2x>z$BL>@YnI=AA2!)R@A6oHf<>T(=uZZXEQH092n ztg1O#P9$GQ!U!>m{yGp$?I0|9n~%WlXu(3NI$ z8@v;?_js@umz?KUeQER86>H2q`O1iFAj)19<`AHW?~A$__{pT)e7XO5YOq@JLfmDF zVK&X^k6LBV{9>}RD3+$iXf;3L#bBKuvhD)&FFG(6(%MO*a#+jyj#`Q37L}-IRl~ov zUfY$}+~`)w_7?3hU5ft7!uc`M`vKxHcO=`An9n_Hw%e!;TlQ(o7ktLCWi`-onKd${HymC7_ArBapcSm!<^FTotPGqp7B(j+^4iGeZl>h z{JBu`Ry7hL>qaVXC`A-uu$p3B6^5fvwn z$&KBa&oxECU+=nh4XGDQAPA|be@&TCWkGMT+ClM#fQ!H3E7u6SG5=K^QESA}AW$IG zhug?7V!b$_*d*ZGu7^7h(H1+O?_e6?4c(wz--mspELnwBM$`5FlFU5~Ciyy5QZ~Q# zJz#lmAqB;CDyV5jSqE2&yJ>82W`2Na^o_eV9FIT_Ioi(oCT=gyR^`B7NQ{`%hzZH` z=jBdsU>Fnh$@#0SwH+izQ@~jLZC2xqLmmdb=w`3hrMuO@aD&}oTsic8Io3zri{i_{ zZ~k&4X7_MOa`0X*#BqtG!rW|gGr4-Mcqwt6e%{M}+hk=W0FrM(xFNezO^TlG{I=u+ z{z)5*FY<-9DQcCkFc zyiXaA7z$^Ws&U2$)jE>O_}qpoDfMWca{NYQ0z9)r#pPZdT-~1J#dPz;m?d@o4G{?@ zN_D}eBf+0eWzX&BUI>zC1PoY--+?yVo~LS2@UJzO&vtFafvzb;#FAI=`|SAFx&G~f zkIjqX&6vw~HFXE5?T6J&>xcWOjNPpZp1gU@U(v>3+`O1~6vYUZO`vac6odJ#C-Cq- zkHhlspQ5;aT;uNNbfJ7QqK+N^quI8DJd_E+j78q^0Q(w;w#`_We}}eTDl7@b4RPiQ z{}FV3x10Y?5LxGB%pyaOs&eVSj z>k)FI`~erkD!0Y=6$^2Lc71^tnGrA;&3!*ME@}JG@LkYS=DMtN>J}ob&O4fGFufFZC`GVXL?52Am z6_u)j3D*UW9Q5~aisu;jZY+dx-6unwx0HCP=`F${i$mHmHWW(?b*ad4+s53(_YeoY zZX)^m90w0uplAIKJ{9JB4qL-?sh`BqYd5WlxW7s8$wbE2ZwmVwt;@5pkD0l4K?A5t7ct(5u~q z0CjBQ6|NwXnSDDBrxxmT0;%a(_dAMU|Gaoo#7@acsU)QHEcka-{ehq#m&%JAeVtpT zV_e71-4&F9az^q{KlV>VkooVZX4Ce)GPTD}N~t0ZPFp)MrtJf_{N$iM4<)XNdzr5& ze~>JAL_SDG0w?e{G@CE=y(X3*IEIa(d*d~KbLbqaA0(7{=G*+We93gWiLL2}p7b6v z^baB(u-E^LU$Y!Zza#BJyTa0SOM-H-)ik*aPbQTM=9xT^B>yiSW^55>j;`K>OvrEi zFax#1h`dwJVr7YNf)nCF1bN*9r>UD3Z5=Q9q;g9!=hzg9Z>x__1T3e2xED0Ls`I0; zGz`g}IaFSAR=ssK$MbTKk9cy28xisRpg0=)$4JYMO?IIp%BWQLnF-HEV? zY`g7Gc9>XedPQeaO|g?C-8*q3;trTqDM4VC|Yvmc$A@&Rfk$%A7>HPI`y5$$UT%VP*-B3^6dOrt1 zvoiWQSgkot7wXUACT>87VH9{PSJ}7VhH!S{U@ipz8#XRJ=W+SD?_xY5Jr)Z( zo|@$HxRPojkSe&`90h!+RUj^t0(Ya4^9?5-%kyy@);_7z9zx2Qk%cyG(K(MP+6%8<@(XHelM}(fEEUfCQ!8AEkC{A5H|He`~gGW zoMjMW1f}+8ue;^jfEw^{l8^G|{5rk!G_J;)*KamAS7)6TtkucnsZeC>&RwH9dMoGW zdw5huu{PB|1x`d2vDe61C<8yf90Os=avTzw)J}gy2!b~&;(nDis_=MdHZ9Z}Bn$^~ip-OTAL2AABBIz!AH-Gwce+FyiB`#-f`T0Bo&8$l9e~u(E zVhT%aVh%EYpy%I(@J>P)3kI{@(=n-ZMMxN;V^ei<48Fjcypu`+H@%Md+JJqtPTxR5 zAmZ6o)bqTzOptAm&TP}9B9m*;%5&7Z0(4%QCkX|YxTS$i$?DHY1Ox*+>DZ}WIr-47 z?bg#f+jp^;jYgkW!nWWnq?xr70*46%j|XUHR;7)D;6nwJc0yLQfl}?9*6gOY-?UVz z!#bL0wV5ZstQ{t_@UC32Hpocs9L5NpdFD|}zHX;-RvtOp;Bele_`CkgNinbsu~Y%m zZvXaHtFFpVsGz`4QJ=>NY{u&w?Yu`fF(Yqa?78Ff&G>rTg&8L2D`(p(kCv6p`s@n= zFUv0P1ZwXwvPSafD(+~rJtiNBAkV*dc@9hlW^?-)I`bdZ5sDuOJ{#q^2ngYC5o8Ab zB6$?$jU`!7g4)O`*2=4res;HZ)5px&jbo;mw1A|y!z1#eRnS{&6;9IpqfD9$bGmw8 z>o~lF*&7c>{JgtB4Vx^7~UeTtheYn+4$8#Zzzg<_kjr0_>*tC~wznXz})G z(9J$84!9`Zl|m)l>H5X0fATz#sc!VOWprEh8kt;HexD@Fe@;(_2)ceXLD_UW?!0wq z&LE=j%XCWTg5#0gpXY#i?&GB@Nd>D=6FB}2+}W+NiKyv+dvPxGR(K!pIBc72q&#P* zH@$_)!(j0EZBEi8xl_uLEJL2Hkv3%yRgER%`I(^GCi&A*XcUW!-`8_mZ1b!MF3xc5r zAK+vdho4kG_(NRG?M7Z{^3MOfsm4&?z4O7cpj4(DybvEDE!mcVM&uc-yTA@SYB71l zb-DHrUSc9ci@j-5G;PC{_$_z6pj^2esN*~ZezEG~w@*%yO(MIi2nstxhQ_0YZC*5c zK|uV>nC46?@GdW+I`G^7XvCiuol<+Vq&Q{N?DYyJasZ{{;OUSK^~HiPoO-DKQ7w`e zP^TPiPAP)o$^*v11-Q6l%WfsZ1N{~TAq-$%glLLiJu>)i-^mnfh0Zc@Klr^9R$8o( z&Hz-DTW&v}N9$k3yndUv{HoYxBv+&E2eexb%&Px%NE{q*C&CTn-R!(x(!6~9BLom} zi%zC~iy+TYu}y4?Y($!iY#bA}o=%leqY!p{4mvb1&0nG?W*6N~)I-hrJ5GuuCATdP z?rN`t-lwNcY5~lComQ&zB@R!E#{DlGyqJ3|e=nE+bU(KDu0)#O_%m=3NCcJ#B|pV~ ztV6>EU9iJe2yH|_uD@$bCX3BBrrf^-2dEL)lmlXPObHI1nQW*dGlwY&Je^8Qg ziHQm#$#hb`A{fn>>CFTp+h}|)ibXrqN=_WTKq<1IbC#p9N=5HupguaT55ln@Dee_ zU2)4aK8u|c5cD=5(;Cd93EB1=u#h?AK^fK4X?+X~#U*+|!b zr%36^NU_H&V4GD~nGno3k=y=xZS)WjKHv5NInL#;-fh@INJ2&)67tpz!T4dc`@_4( z^*gNydw7gv!@6D%)$B21G>|5_7r0z4XO5-#OdlY6Jv5g){zi;NC9OBVB*74=RkV*Q3Ss+U5fKz0K9K_LgXzaFu^*seV^=P%;CPOfDeJmJU*vzl(3VV7`Uy#rUw6xl18S2BdhqN z$J%4)97Ot7DM$6cqfa$NVWuVlQ(%{1+V|(?l+Zsci>+Kf5R3;dzOMhdY$=U%(d)pf2PSnPLmL@)AIHcwD& zmg+`rmC@lX1@9`f?Q`JxkG~nPri=02uOQ0u68SU(vsdOQyDze$=b7Qwu&*k!8A_s} z;xd$3NR#1zd2OZxdo$4Lw1TN@$rryt7`JQO&;NI`QKZX<`vt3WwN=>0rWJc$y(SF2 z6DC7CjeyEsxANSqwmO_^za;ESgG)}EflL3ZhGyxy{-^n|e8L^0p${06_Z1q}q~oK* zA@98txvNHk>Qk`X)i)*ICRaxfG*o)_#jOj4np6w_u7p;Qz+N?pZ9`#1kQB;i`JsTw z^erB+a&&gn9NitJbv*kOh@4jyt6am_i)*bFD&3oP^m46FCR6lGUwWFbGPEjqjw)1L zS^(B3@<fsKi~Jy8&dQZ;8;eL4-ay#+~@H1$JB{fIurd@$X68je%3`+;hITBnhjnAf9Vx8 zx18MJ#Ptf~?lAjl8Cj?Y?r`^eLOu2~KcoO@|(3y7Z_VX6NAD<|w^*E}V)mwB#26MM;WduEbwB2Q3c?xJG(-Rto%-g^u z8oEPe1(8+Rli_+E{YtoT;r{l6J?d{rK11q{4YpS0J$#Q95)ANG?1q;B^H(*6=OCxC zf;{!WXDxo9D#FgvxP_B|v1}-ASHLq%vv`gTzX{j%`aC(ZN&A`DRY4npTAl!S}1deh) z$k&H89)IT`llVjuDrNd>FjL3cZx+28mvv@OHR7|@syk%89H~4BV$j8^)MW;!{8YOp zAiU_oT(fUNj5Z&Z)|9!*PJH)Th?2s5`i1}q$z-2^+l6ushu#q&1WLT-8GS}t(CJrs zsUZx`)7l=O6C_9~-aLjR1ICWOKJqqHV(E~QOierW57FJ;SLJir028_g{QhWs3G?J= z4WN;m;f!Oi6L}uD`SPz0kDDEQLz~MD28nwy&VYO0^W^Fg2H|1+smi|lj#H%+g>Xb~ zyO->PHlof#D6vG5?4u7Xamj!;)pMO(S2X|^wyK_b`ck0fn_w|Fl($6829Vv!GYFt$ z5Vn_`%Yu>h1ZHEcx;idPtoqs$KWw|%pOVI3Uk9Aznw4~@P#>Jx&)G4VbpaG*s86Jt zkwfc$Cx5G7$p~`zswT1*GYAR02j_~jP)Yf*C3T2C6_$aH*5j;vhpRu487LFzwx2#B zp&@)IilI*P+CjCy_s5XAf}AlI=CL71R7FCQrGfbR9p^ z&|~Dyz4lx77uioA<$l|JN)#l4C*nd2i7KxX&XWzI1l;TwK;CYMqhZ^9oApA@k&?t1 z=omuxz|L15#yW%}#}NeWhmWa1h{cqBj&$26mIK)$@nHg(-N7JWc%C>)NRBxDyC+#H(Xt8bKsLDmswI0 z0bAg~l=E!i)k!iyt?w;ZDtCD)f||>Lg-_FCA4$>#ju72HiO%0DOxdhSf!Ti zq#(5(+&h9Ez?5mM)XEBlba0oT74$qcwd-Zqq~?r&#`Zg zYG%i8(7*Y|Sna9U>>I@)Gbd;yGZ`l0BPg{wV_Gh#3IDAo_@uAjYy+JpE#~If@?d7x zkk3&18_SCsyu@b5z;TN7LL)!R)R+XeGOP^KS@+`G#5e^ctKuupAOe#zMgvK*6Zb3& zxQsJ;@eINosq#??Klff5x^Z$+&;~TWLrFq!|T%D{oY~ptBO*m=u6HlC0mIh0at?I5P){n7&s&cDhVhq^r)D3BFG1B zDP$#HmTGbb`r)6iG|R2?E$XhP$YGpvCNy+!*KRF(e6SC$^a@-;Cw03nlKT;Xy%9>CUTeCm`_}JB@YvZ7ZucoD=UIf4=PY=^Woj4Z+Ht>t-&y z-(oarh6}4ty(&+46Fu6;()71_^LvZ$(aJRK23y}@t9Zq&NkeO|i%fRX>%u33iKQ-6 zno|+&W02jP!!SF*7BmjM5{xv9YFv{})Rcj>OT@d{GgFu&g?fdyZws@%I4(SkSNpx6 zjjMgimz7^yS*I6iN)V>+J}BurqFF&wu~RPi5z@6NfJ_}eWh`bcTa>OE>TC$!gc*C= zuJmoPeZ7rxL>e||%w`bzeiE6E8<@_vgdPp!cH|HqI8SdDE!*q&MgG=7 z6~y6Itj?VbdsN&Qb5fqlQL5S&y23kquBt+%#cAPMHPtPI9llY1>J@C0T?mN|)a|<^ zJQd{sev`p6UUWB5C2HM$Hrx0+?RGg0#)ZGfPovK^)88_-){~@D-^__oy|WA|*I$_8 z`g@V~llY}?_DyoB7oB5Ol*9gNM$q=Tv_;FnDK2a`+e9wjDlw~)1nZ`$k@c74r=*hp z7^=5EZb|SYQH%tr$y}54Q`>^T0(V2pp!ayB&rvTe0Ft-+bV-@-Y;9%Tv4JjlKO!#_v|KGW79|Sj12!JJ$VAZA6cJWNfRxhhsCmp319 zXv^iJq0=xTxFSNY*u@0f(zSlapvPcoVBD|LcCSmSp4LB#av+cW1B>ZPj}+eyW!Ni0 ztG%0c>-KH;-bB%N3^rZ4@f9F#;;zOE4kX8xQI2L|qdAA2&ty6&8X+oy0^$pqEg zSLHC-^N@X6A&k2(7Et5ooeQZX*`DrncmCRHa(ay`)O;YcC5L^0C~#wD0ziF5Fw+9l z7aA7cIX=CK2#4|+m-V|-`}ircS#C_4wZ#LEtHDk)sG`we)wWe6YdRXc zt|=V4rrR*lN|?X3NI92GV7CA_BNHF?^S2~X7kWjqg`(B1L{??#e}$B%=sjEMp4eCY zaavUCB}LTtPG?eG#uRzTHZxrH+xcC-ofg%0eK%{3CdYURhakzj#*<}wqQjKws;rPB znicf}(~|61Ga5jwa;&Oa$KS^Q4R|@0pICMI8fu&;4Y^1EgP-pAI_nC&2bo61;!)~lZl z915+c2)O6$zNud?F6>LP%Uwis+OWMZWq-}$5ctK%mf$`l=UyIE;9!pwk@5C!G5HGP zbtL9G`2z-XzQJ2TP%yy5_&(*H$)IXwW}tow4SwRi?;#;p&Xm#21q8L}%tg$DMTS0= zl=;ce!dN+yCj+JukaZpt-4{#u3*cRTil6+?Pf;x>p2fG6o$nZKe|>DC8OpvyrAU4{ z%02jrM391c-z!zB=o}_rv&b-Mro~Y1GIC3JvDjYu9S0T^V%xxY+BG|hQ+JSSFL_iY zMTAomd&bi^tduxo(nfhBH<+`8-5mn8#$f_(js&iWePYg#t(W=Q(VgEgm zGsStsL;!b}f+LbXX+qA~UKQ&?H<@1uS;Zj2 zZafPMJhpI!Tmbxw1{O61wO8g#9$_k&6B^r-BLdPqu+R;t{o~Mj=&Mku7P2w$aaWrA z{@U63%(%a~{^DWlYf4KG`yAN*TUV2|;4KkkJ?vnhi_oAfknax~jg zWcx!{Y|(@sNAfyvvh%v5Gr-$}y*viqd2_hU;WH^JxE~4`li~QpN(`RH#61jrRf#!TGv`4a^ZqLxgSi@Z zvJcbVPo``PH9k~)V=6ki#1g#5mg1Y1eD{vspIFS`4fdOv$ftHGZlwRTffny42DZ#w zgJ8xhr={!aqFs={UiEbNLf{WMnlIri+)!uPuXZThKRr(sMhn)6XGTUU{gWs6IzR18(5bm61lHP_JDK+rL9AT=9_(dOz=?nHIUd~xW=me zmqqX&-1pBb!*Ma|je+{%%rda&wf=*^1+t`=ryN$~+_lx{_=IdctXvY~T-*@a`^f`& zB=zQRyMEGvSMR0!WjFah+V`Kv68{nqbU)o+#@9cVumjR3ym;fDS4W;4zs`XsItQ*d zS~&Xqr%`dkf9GxuF$J%d9FX>`KLqvKAu7V(`pT^av=Z|1`Znnkg zSS>$Paei(b`$)$%*?!cbYRa~IUf|67>d@Lwb;z@UcX~w*`EQ;%Rr=tluo3pGJiGbVmi-8saG6dyY4RSxDcA4rQ?8})`@U6N3uAGGom-B6ui>Dm9%(6|!3ovs?t4^w3D&*258oKDS4yZvLhF3^<4Sx3jQ-gqbV%^T1$E|nd;b4siW`*C(=KlS zz^y+(;4Dc0JR_|9>SFe4`W(yvt7TAGx{LH{_9@W z{8HLW?;I%X@|q>t$k+j;1wPNqz0^c>YLB>F=q)H3V#Q(9%O`l34-o(YkFV3CpoaKBXnQ2Oov5F1N2l%%R9SeWinxgR8Bh@I`yr zNR5s%n%0ay!**p2gwB42cf@Gkv)cG--T{Ce$SoGn`>}ranmUS|JMTYt=vl8#HyNsl zvEg?PmqUpj+rmFnUA}xywq&#w?AGC0vv+Syb;lDzbujb*CzKI4kW#a{1Y3HK zIo6sBDCe{WM{(@D0F|x7;fVa!A6D3kEVhhN|DkburXA>MNrq-l&bcHJcbC1o*^45s zyz|hj=ml{HYb0M zLy&XrM<~MwCm81*Onf#@WA0h^V0K%hxPerwTA_;X>@ESm%UQB+>8Q(6p?zmqI?s4RM7^@8hUh-6D<>BD z%m)n(n0-n8)Ky7^FNd}U5Aq8iDgafXeGwnK9>F0?s>SHR|V6jM8J)HRzA-{Eu^!z{1@ za^&XD){#m?CmkBUIhpypcA;uuFlPs( zYZt&?Yq$qk4~0SZjL5t^w=hiEzgr8To)z{5M6rb>Q}q8bhRrfmiP>$ zV-k!*1hR)!?pwPxfpdqvZ~STf+^r>NN>O%t0k9*06M-2Ev!SrFQ?)jV4*Wp_n?eFE ziOAQSTuj;B0ZHdd0YEqiGL-8F5?BLPb(>W^PaAiG=WnU{ZllvRU`s?ke|f%PCTi&b zgl(2VM(ELz^SaLL1hvQU-1o6OB+?Q4kv7_W}_%dzIlH=+fF9mdTp!H z?+HLk=$p3Z#eq&Bgd3=?c~MG@CeG|W9G(-xgwJorewq1=&H?7nIF1rp9%bqFhI!STgjjVYWNJ^~wp z&KiluUWj9TCPm8h;&Z0k9WpQZFXF^6!9Hb|eJIXy;#>hDO!n&vS|9;$&C#w}4M9C( zV89pn{>=kK)VhTGxd;t20TLQ=y$@;@Dvkb?Rl!+n%J2%1;&JUkBfgYcmMZ9Sbj2*c zDhqFk23j!#0}QF;>Q(VW5ZfZraV>jU7~js`#)sH&=mhDw-1Ts|S(~eJzqO88AlI#cbXP98X?3 z{lej%r@Nq?VHx$fkv^F2a@pv;-IVB#@Cu#?tRxlmd^o!$0A4*kutbU?7H?X8M zyg)}g!$H5PDf9F_M)IM&q#k(8Fv+H0kgfv4>hU*;@e}GW*Hbs2gvOg{M}aDC8A3eM zE|F;c=PL!*L8Rb$HWg7H62vf*)PUO~O|;o*cjisxmKM#(L*{lml=@gI#$~7o4bhEW z%6o2#K{h-0e$AxlZ59_o4nomfq?UiREm!?j#0QlFIF%@Dz@2Tc~TfYG37kv|=DcWoYcYg#vg85qmGra6lS3)k!9 zWlSZiT2=Jv>7=98QiqazR_&IY-u3ZzmCNCxalIQqTNpI9Q5xsq0ai`Ktz6edBG;ZX2^#WNE{!<}J zUw_4UO_@y6AhW*1xsR9{xPvx6z9OQ**le3pyvp3nW9ch0)7Q^Z*<}-;AQ(vfhx#pU z@BqZ8gubdCM3|8T7^HF#c7mr;-H-AM2G{IA8ytfarh2^u#isz0NVOZ|24#KCVNSG_ zl(tJhyg`*cQ zp#TmgA37AGlE7sVcO1q@S$2bRq4w=LZTW-{q0(~avUnz(z`~EZt+Fm@--Mt$v_kw1 z{fvgL&G&e@TUuOxBoS-KD*}(js9LZNh1#REEFx^Vk16g`QnlT|5 z;NuW=q7`#8l^5~~%M|jM2*CB>#l&{l`l3uSI-zZ5J4`I&J!46PRQ|1F$#c z&-Ms*iq?zBovpvk`AHb{>rcvL<&*okI-=a@c*9KANMt7Tsl?X>YQfD9f3&h!m^>wZ z>r!0UN#j?#5R|pB;)Qcw_i6nr_ptSeuMUOqBBUs7vQTJpUKNXTc=P2|r;5jml;U?r zq14!g(sb?t+{Xo*N>sV7S`N&cftu4Gy3iMTE?oB>Y8=e_hJHye2eRVr7A+*%Aq*?7 zgB*mDJzQC6JjKSp+fn^414n+RjpnjsUnPaorDa2J+Xbn(`TD-=o6V4wjWfOp^GuD8 z8lu5rFO@0w#-60)roQ9GtQt{>1TpL6UPfLRLC+FYjZ9O)ED|Zdl^$%)DsogC`b-$eBz0p}Ri%Y;GiwnP zm3?kLk2l4xU;(_m>-dfnk*6Q4Yb%>e!M+wCNi?&5eyWT?0ozgRuz;`y61eGX&)@HTZ{Z=G9| zIEp$(Lc4h45Bl#u?E1CMcc>RsZ#?BHtM!e{aBO)_!VKq5=p$?zTaccD zR8(k}qCJ3l5s36Gxy-y1vWg;^LrjLNjriTje2ZqfW%=C@F%F!8IgbQxhQhh9Lxj4f z-UK{ikc{@vb>+Ejm1oItu%d|b!rzSK z+>vT8vG$7@?=zCv`q_yR2M(Wy=V2z7ER(#j&TXN15=uEt`;Zl@AxaPU8>hXHk&vj; zd(+7%z0jn7=CGI#OX&#GV)hoY(T;mkco{{pvPw)i8Z@-5B;0tUjDiJ|w=XDWIv&di zG_X<*#^^((L{Ku5>WHet#n*lHuH77T0@FF{BZpZkVsD)YJKiajjCz`@?C;cDaKH}m z)-A9WF}Iz@=t2yTGG%qKhgtXcE)NKrAu%x1EdTE$N24-tg)~0!q&gDUznBOD1U{F8RlyJalw9P#WJcU7~WqLK7>vW(#b^Qd#B&3idGvJJsk)o znVV zndwpX+=VZk%J=zl<*}ax=WwBlA2Yiy4~O%mB{!E9&ORaQ$XAJ+oodIFDgOSk>g;#j z44%0)XJv_%p_1!MeFSyqSdQqC(3!};xJ2`NlH+Rc8$(xGgl+Y$VttdHe7vMMk-DUU ztbe$vidNWRHdQZCWlz$dv{akk&O^$i&`DA}kEkceeyhcGt^T}^IB&L^PK=O)&bfoq zs`U{Ecd*rwCBxd9uwum0L6HzaG+(Bi2MQE^-kF97{LTF6N+M+`FEF0 zsB*!xVmcMk=j6!n&VaXVv)lX8`jkx0lLXvNEri-VxmozP^T|tPt3;B*mB zrGAD@$B;u%v}<9m0SqT|(=jXJeA{p1Iu(dc9a)^T?)(ZbdPfZH=&PzyBI56_xn0r0 zpjFLu9Dd6(mE>AS02C%A_yx=84x($dp%79j>^!&W=saRN$&(G2bAYwm?^E2gv}F~~ zFRUWr>Y5raa_5^BOQ<+t;G|WbcJiC^dJoc3i|ITB(=-kZekBKdq))(|ss4kSGO8X2w5Qf7 zwpueW`_s$4Q#Mcb>MLAJ4upYW!bbD^d%meUg+0u2`C~{Lf6X1#8ydrU1ryk_&$dkqYi!1)a{jYh3w|^HCh!reb)<2JY zSW4r3FTR;Yi4dFO{ownx4~)*8)bf|woq`Y-p7;4v>7421v2Kz}OyymW5OJM7LRp8A zHeo*oS4OQ!fr#^kXh(;YIY?WctSPm!2}l+4E6u(#ZRI44SyiHoEuH z6HTymh^TN*JCcn5p)Si$U;jOxFEj?;hj74;oM)PxGj6H6wZ1zHbLb`%7px=cy8y^y z+#|e-x=%{cgE>j!1`%{#Uc46b@~xy9iSJ9g8kU<6&s@a(Z!D_LP(>1CJXA~Ca-k{c zXXsSul|zMU&V;XVQGYBqlKLIKB7A$Cp;PdMuuR6D+%8q%M)=~Urtn*xPS-#99X*4A z2hx&UZY1${PBMdo)mR{1MWoZbL^|UIE9MZcQLo^lvd~Fw8^#SnNkb~hGAxcVOuJ7*;^ahaqQ z)F$V9-V)Q>Qqk2bVzJg%897e>Dh*o=Jc2p8B<){fgHW!pE0q@_|5gA{P=kcjr-exU zPh$GC1|L`6vrhtT&-*EJzo=~P$*v8Il3x#r_+PMQri56MzP~Zrb#ICI71x|54gbdD zRZn6ujah)3FO>*xdv;`B z6A%*a_mC9Tz?cLfwlrYukP}0?H`t2jt{5 z)WcgzIcmmb`A-hz2z_z)0^A+<0)BW}2KC?j6duv?AUvHLBB?UgVUbL%#jSuEOu<=F zC@lsw!k;gM09q*-5THnFd%iDyQ88hgANTTsrsx|Wo>V@S*)({d9f@sr9kLt|k6z*9 zTrGCNhIEISz%zLE zK`5lqSG8f2&iJ4T-xPTHp$atI?m}B+{o#H(Nl%-ed_O1;J0(Z47m6;Ck)jZ?jXN8 z{i9vT+Sk&*x8?G>NzvMuEA56}LN#JKJqzX5tr*|J z*Z?%^Erl(H2Y)8E#wwNxfU}=uDxk~^tx1Sy?1!ud5I{@UCH~dq zyUIA=9h)YA@8BgQ9()1Pqhrt!8;WEG1()1_cf%6Rip!^oqKWoqsxG266XxkJ?WIMw zkje2GJ%}CCRi`zKsPM0m<`94MlLtMg3?RZ4q5u@XM4HrzA@S8E=)BmEfBz%vSaQ>~ z*6hEpmx(OIP zlnG&s{SRJ=aqPQV3e$E?dl?u5cpnmgPyGgPrxUV4`#79bS3fsY{GN%W$LdrgCnLj{ zuSLq^%O%02X!?-J;qWf*9^d`(6lqv2Kn3ItON!UURlR^b*e(sgq3n`o3J5XPV;~Nf zheF7OL4@aH172T1;$8A$K&-5DcbZpavnV;ZB}=Jgzo=hoO>{stGixX{>DQ%rBDf`z zD1&SH2ZYWOs~xF5KE1z}^>JlUo0EAIOq73!Sns~kW7eLUjr->XHLZq{PFm0gPZj$E(uelUbAnw^45=$+uoOo1z_`xZFz??`5w=thZfVC4ILr@D*%Q$@YV3^ zo*#peMggQ)kaJ?FaghH7nTc3OBK1lk3XS^yE`V?AsUMn7z2trZfstyI4qBPlFh9=V zJp%%OOlV8?2^Fba0QMwfcL3#*hFAb(B4^1{Oowz{(r+nFI8FYXi=GQeG+3Ob9(FLT z{9`>mS9*Or`Y5JCGy8#4-FhAiD-SUhdp9n+ zwjKXh7nwy}f~f9mnIiRLCVr0Y>$wp^K&4Z9j_zZ?7AFjK25#2vzaSI`scO`U%%Bd#z67L4{ z>>Jv}&wxMB;M49eAXhNpjostx!xHJ77*46?Q162F>sUpFkDplZ6qL4m!LnAJ(e0ub zjQnSThz7Pml8!8oLDhbczdY;!OdK7*Jn-M~6&pT+RpldrvgQd`_?6}N9QlSbehA~9 zgI&&X|ASs5NA9oiFkAkABty2RGb-(DtqT4H=*+fP*fFo3l=-b?GvVSYC$-U};eLR1_ zKuZ@p&a=*GE{$l#DMwv{dVB9G@1qh!&fswK6I4<$cUy|pk42f zf1o}IO;>5pBNbvSB=)W@-#d{2vL81J3XE@ZXwy}c7KH;~hPnUy2)%|yQY#`kis;Lo zQj&r$j`7als&ky)w8X+`#R}~h_#k-1vpSVd$5l^&P-rd-Ods!u6iED!D{7pbm~a0E z?C8%$QG2s`Keo|vJ6=(i7X!m%%Uou6NiWyA04z4+^&3_Uciw?2(Ff0`@4(chFxx*A zn|Mc3W6nSH`}zEH@*|1s5t$r89MW=bT#~jMVEO*egJ^OC3BRPuHlE#|qyeo$ht=Iu z}vRx53Hv9Lw?ykSbP7?=fR z+014#s!>R`$*a@lY}>ne3K(0Y*$dz-wneg>1PN<8x(084Hvu{}O>E!Ku%7au^W+~L zHjtMn$=rH>XH-8w*1c1MZ=3BAG2IccaXV;A9u6yI1uWTT`#F4`@#@DH|8Z~C)IwA` z%a$TGUFuU7IPrV&=`;RDL5$P$s$p)8PmA3LZ#@M#rcBBj)#){Y@c|!2EbsAwSfqi5 z&=hfd+(&o;&|wk1LT(Y#<9D0Dyc2RQg$xFTH$nuDNbaJjrL6sElS$!S7Ua1br$VTK z#tzRja5)oCm#F4$wbK~DWSD2b0X67$Z5ObPkE?$rwZ@(&!c=+cz7=K<$ zn5Tm^;nE1K&po<5P@SmwmjBiml*VS>#c*#V+YfO8YKl5)BNW}m9$i@9ifms97S=X~ zyO<}dCv+O5iirWUJZ{pqH}feJ7=lj_gp<9_vjghEYud##Njomppw{`&z69XBZ=F5? zQ&R6|!;q#3r^@e2Z?Zj+i35L2$x}2m+-R?z@sWq#Vj4FZQu+*J;c^HDKXDulm%T+0LqKxrCMNWdc$nm4$%qcG2mog-4<;?a z^&_R_=&PIFCTu0LTt!om*W1SvFrH)rGxnd5+P7)n!K4fJpbAB0WZS+SPZ`y`<><`m ztE;6mwo44nHM=y$D>{p`BrSwht}w)kScgWP#i%GgXR0L1!>?NW9_^rbzwI}W`}i|_ zZH9b+4#sRH9BudAwg7AQU>nyR7azaRji-Gbv^hE|dNu5?NYWF)m=n1I&V5iL-$P!* zDZ-~PlZfyJsts$<@HoCD2qkkd?FOQGyfvWzJ&uQOO3N{yGoL+HgVv#}-4vRTCyWJF(o6ZldoPxZ;u z<`N#W_MZk|x-0Vc7_U8|f>XEi9A|Q%9`ty6CYETNLV{{T`_nb|G&y$-N1yrd#TR*ehzuM`7NlU24Uth>nujysL^&TVa z$A{EJb%9=L_0mB4?i22EZ;XVpkC^OBr-mm)H7+vu{gfJpn|JBkNE4Zu(X$-j%i8;Z zFEv$yVW0R+LZ#|;t>q&S)DUJHUh0)6HPb6eGL);9H>Wt@=(eFo|gq{#0N2)Q$xvQT!(1xWYIpLJVcOoT^<;p~`)8rx87DP-Z) z9=|2#vM;}reZ-WPUJQAG!)z5B7$_V=lmk%LJUPlp0>n#|8}^btwf0PCZZ8B76AZDW z>E}7*A4${r7^0Nk>_fT>gv!c%RE%X>q8F1!$$N!@0lbtX<`beQ!uuht_gFY zcj%o2vWQq}-}T(EVks~s?pkgiXF4r0It>~p9=h9&g{WO6B;yM>0_Ec+^Agj-mr45# z)Kgw%GAV}nDrMD$1npvia6M22y;1MwX@1HOlJsLYw@7HAF(7;baNLF}#=dntc`ua9 zQBT+=tu>H{{Y~S>l=S~;A!BLLEI!n}xVmyv>V0y>svwz3W8cu*QzPF4TV=UFm`p5` zQ__Vyfw+#IuW$t|v}SRpTR+J|&?P78Jz=pqa~doyX&{VIk)KW9 z4*NlZyh3z~&YVy+hQb8o3&eO^HFmkocFz;>JJNdu9#=Ogz+0kI#Ii62m|4+v)Q2UH zrYoqAwaf0`w&dapC7CC}r8-LKVz7G!I^nxUIts<@hNOEqOtblIqV&>9sXZK6oCLa_ zVf0G+;);~-xl6^~Mz~-|#KPa`s)zB)l(*f_qid8T-)1Yg;%rrL3;p5QZjTOugunE0 zD?u7Bo=b9QjmtA^_+I?VR_uYN^Aufb)Yrw4ll}^z^tR!_Vfbj$k>VNWg~^G@i(W6F zV?HLnGocwg>JK0B^(OHAn{WnBDm((T{VOr$URx@wBAt)iV?aY9*$=cz=j_r6w6w|H zm-QG7-JKXl`R3x?uI zoNbGjo5ef~jY%iOh*T?JHTMQBe<_EkJ&_-9LEpwLUN!98`sbL3p_5R8`%6;mm1hXn`$g~-bbNNP zX;m+lRzjOdl>){*LehS&1U^aw-WjBMx;(D$8xS}^Bl{V-pNJY z^2Pa={rgWJGSl6Mg9KTQ(9)Vx2}vJcRFB6y17W^N*3vEU0!R=w`2$M(7C(eM&I$rr zM4_xp3bm>7wHpb99~I25&jc1L++vdB+=$Ikcob8{LB=hrV#NGrfS7Ccbo6r?f25k4 z5GDS+FEH~PN~jX$Nmw)zW-gEVF7HL-k=_l`T>RX#=FM|-Ur!G9?W%6o_L-)0L=2eo z2=A~rutj&94v9y$pxKqA{H_MmZqh_rk%7vk^1TQ>X*^K{E&_W6DIQ0L+t0(;%0dr5 zhVqu{!f@Nm3usec&FUF@>YS9EYvW@Ud!LT%lJ(>|3}LI7=Zqny2rEge8va316qH+h z?4>L9(Tk7^C<-!3bc`x04f>Tf{(K??G8*JYPiZ!gE?hYc647RXft4C`+BoDCt-tA(XyN#SIkpoLj!MUngWSE4;E3dd)%GTTgP=J#4BwC^fB8J;~y|00KG_c0`L|!Et zH(i5ybhcOU7T?3WvG=SBLw?~n-%H_dCHQ`qhuMW~0(t&vc$;u&5!trAj5gqCS)o5P z`9J#U0GSq_%@&<~nAyA7TE}|h&N?nK(JeRPC#ptI7vh=UR@tkKS^2Yeo8v0o59JtCiyZO#zz6Z;b9E(7ZTxFb*oW*YWg*>gVQSKjKv~jw_ZyAEPhii5=a?aw^i%t->6rs1rWp$7KJ5;?fHWND`1TnMP1xTZM z*5_#$d7QOplN%8dmbv#ZBoyFj^z)%i`gU(pttogA?Hd@h`S{a|D>26k^!vOtfG;y7 zNM%YX3;n$`vpB1wI{Rhui*9ymlBSAw&aLW6CI++MtSZmPKGyoEge9`8j;IJr zf$q;AX(*$9$UpZ?cHCZ=MKQSfdp$dWzf6wPr&6bLHFm7<*3a04*ZF?$AcKdpOu%H! zt`f$BT#mtajkn<5VBU$h7?V}%z}hx*0*be|p*O#gLBn`R^(F^4ak@jZ9=vI%Pj5%N z>}Pz&@9{1v-Oe~jW+%|#8OI`F9C~Hf#7dX)poq3NRVHohEVbxYlSfirb14czpMqDg z189+PY5)tbz~w(NmSz9O!|?n9raFCSt{ltl!`LeaHF<)g%pi)>=t* z=nu`mr{*^v7zLd-7%#;?P*&85{*JHG?*~;3ib8M8>!gjm=^Hf0OJG&7(;Bx!?k23l zG{{q8Tbj%&gNvUU)*CWGJ@)lA1gmwRnU_E3SpnNSa4@oC0y?lTalk*Mg%C#1(sA_; zpwqkSOMIp9T{j9&Czz)!>wN{|3tSLV;d0&|FqiKYeD-77OZ<=>6|v?*ESFx^X1@yxPI9y{gRirLb&-I|4tqBC9UDJ+Tw<4^Q=1)~F&Zxj^ zZQahDfmg9G-~s2F7HN4pEwVli;aD#;_m_apP=Sqeo0mK}x>iUdo{U9BXQSgCQ;C4a zD^~)lMSGPzQjQ@h6i$pnWQfroz0zrPH~v#jV+JKbtqij*i!)(H9Blljlj2RdJJ8st zZ>_Snsh)*hvAEjmWM6;$sX>b3228?mKa7SjI>?pD%bv%)wE3=NMYW1%DDxLpb%V}P z1(gG$2;FxP(4-=t6S4Uin~$L_q^~j-y42bQL~fKxKdc>gj~fzyIQ#priO4jMF_77F z{++Q586WnPj$o11r`L{q!}!#>nX#>8_eA+K)W%a84<4>9ZWhmSj{SSVDd33CPFY`@ zl{F>ABvh^r+b7=LP91&)+a>f2Oixe5X``Yyk3dmx4GdP>8#msoTHCfVHugx*u5bmxjA55?40`E*u`OV(pkjuiYRAg;taP} z1x033wfQMk1&!Fv(ut&nm(zbR9uR>DdUn1|mPc2Eh%qYKO!<+i!xymex6;!ePtP7t zKI29!C*eCQdVKc5r`01O{&;NKeOo&8hkoeG40jep_FoP@RvH~P4AE|(7#K_Pp=4U=}m~Q}A5UIkACz)OH-h zK^<#lldk6J{UdshXQ7qUVlQw%)<_!RU1d%tcD?&~;qse`UMUrqlt(Eu=r7&MM(hrqn z5NP#gWJWHnZh>!)zWY|9vY>8kh8jdR^@EEv@=9c|p%Fs1Ohb*8LBPpM;SZ;R_tUfY ze_noor7}GOKC9pL!l(W*fgg>koo4`SVKrz^oCSS5nj_>ubf9iK1ZhWU`VD7BIqUx} zJq{-NfL{QxoD(5DgiI7n4Xw7YNr+;yis*;VZ8?T0L!OfWpxL*p1~et${nOaj1k|mj zA6$MnxZ_#SGR!)Bd0zt^ldv2Dka{sw$IM`ps`uWr%Ryd(H13#758QIH8Wwof{wXnE zClKlTZoJ8a07Sxyd>q#n`JW)%>j|W!UIngEH3qXSW&SSv$WP_`DBgDe)3bq1_4-}}IS?0;ncal`>IJ#oj?hp#{J z)j77uqM^gwEl;7`NWMfeqCdCm9{6~}B*di%=f58f{x2RCT10>t!hROM#=VTZH(MfM z19?p#Q#ku<1;8O~8Ga>J03=GU0vG`9?122X-d2lzQ4fdlt16vDO!Pv#6*-5+M32+| zGPf?6IsC^1ClGxi8r1^XLRP^PQl~5j@B;nM3kuc_D>3?*k-LUFkYNac$VB0uEYDjr zK3}2ntNp5M*~1WKnhhQRJgEW#yyd^*UtOd=pONik7+7;*S^f!rz4~3qwL6MGak=d+ zMFfTrXIXCshL%2E;aqt*wO4P*{HJmH_c<>x?LrPV(bM((Oei)Ia>(gO*kRY70+W&x zJ_z7+ixa6(k)o}cg)(OZ!S3Z=0Vsrs-&A<*zsdewK?)f}S}k7ThGZ(ZJq3y^S8>sS zHpj3r;P?xjkOT>Y-LED9LKpKJ2gjdv@y{>#{p{y9=Jw>1X}V+or|JFND> zZ2|tX@u*4)0&jQx=j@L^Za%U%Zub1RiH{lW(bj8M3W5?h!tPy#EXueHcpp@Y2O*C9 z7?eC|a#UP3^TA>FUF z!zO`mOqkm!Oyn5wh|@0L#lMWiE@J%ovw!~zAxJ9Kf2|Kp!b9>+?Fok;SaZsu7)MtK zw*%wF1NJHlB|o|llaz@A+L9Ea*UJE>K?a%sPMdt6{pZ`S2KlIr7aVw0R(QcgwyWBS z)tJpkT}y4iu*Nc%kH$PoVgLPZ3N1{q3~flkhdJ#5n3fC*$}IyAPTKlokNZ!^OcpKt z3&<;2&Q=BAj#rGrbzKXMNwo9^?C5RXDb*x7G`Qh3=VFpKn>8MNE} z1VxrP=xeo(Bv;=agIoOPW7Kb<%YM_e?NY8#rVC<|+)(ZKF;Ni%l^wg!@+~a#=c3=4 zK@a5LFKww|Y{_jYdKtPZ2565M5YkZOgM9Gn&bv}y?szi3J9oc12?m-F|bw|LMK65wRwKhKZ7Tr&Uu!aoZL6RofrUu^yL;_w444YN9vy=c7~ zP^7UK&CgJ_P#iG^bHaYSKbN(R9<=*<3%$5T@V^)O_d{PxqD$Czyf`G}<$D6G{T(8d zqqNmEjDP;9sT>h3*h@}a>LtzJtNfo^B_j>{J$G^TcyFxvyozuT+%$-=>%+BcDH8h6 zg$pXte@z$cWNEsNsf3OeV?tnX3`l)`;=`!B|M{%T8$}6JV?M7%KoWYisQc;-NY#KE z+n>GInmgx~To*!2z!Tf0|K1=OdCazuY6h-7D|+627`+YXGhUHHcB3-jB7a*r<9p4i z;O|BL^B7ET5Y|9hc=_>S93LM*;jQM-+DR<`&)Rz}4t=b6!y;0~bn=gz9`YWJ+r09R zhO#$mF3x}97C8RDJ{m-RL<736rUo3AG0+j;A$`7=p#4sFnBni8e@6q>$Y@D#URz3V z3m~*Vx0RJaXY4T~Wsv~ZrVeOVSG5bM6!`mB{{J^VN`_vJT3ta{nQ1dJ5Sts?Zr&r$ zpDmKSqEPNcjwfla{r9>%Zv>UFt z1L~2-6&MjB{QoSy3{b|r&Q6Svw;m-xGpT?KpJl*}Q&>0$CHOAL8a6w{-o*aT(&nv( z0av&dJ0x!_b9^kw_4oTSZbP4RyC6SH<~25^&vz&mSaO>3^Z#8!|E>=3$cNE_a)L2C zQkFNrRcUwq_n!Rc8~=U*g(s#jqwUpacy=~ z@uUyrrT5NBlJzHqUK--UC!0gY+C$({C|ogpQLjH%vwMC05ZS&aW6BEy)Z=fhJ;-HL z17a0onnA{DNVlLH&j0ygSb;5C0jLyn2=s=k$VJHOdJZ;X0{SsT9zJaT$~lPMs6Pg0 zxo!kfiRSVspnzkZqFyUfNJ57H93W9hcq*SC93(SCIG^MKB$i|#almCSIlZA^90(s{ ztkMkL5}Gsry9@BbFnt<5QR+brlmmnVW0?sO=`Eat>kO}7p^9|2+~ycZ5%Ql^DLvU% zd{dW-F$~ylnqkkmjwS)wZBY$Sf5ku%zCOS=nQ8(k_>1qb=wqioLWD(E#vRU=B+wIS ziDmdkF1xAnMIdJ{cJaNIKQi_X{WW~dPExd(Gbj$BX0MYf270+V01S{GIk~}T3u0+5 zymNA`(X8cHA(%qqO9SP0toT@!!07VH4u~O;B>B26{8M5tYAb?otZhqG%lCf8lcPh) zHuGu8EXs5(c+Z2=Hg+dkTJ(c0kd*qsR}$1dVJ8!7N~+Sj!nzw>Ltq!^+W5TlhzJ+; z#w)mm;9pisXt z;J@u2V{6?<4w`|xpFiB4WciDP=*#tvln*60d)B8u_{6tvg}`$H4Y&zg2) zok;(D)C&{F8y{8cR3|s3s_zvsUx3)^3A_G5Jf^YAM?>!n^A9y?wHnlri;oT$6`1DC z*ba5$`F6ox?wPzqZyOQkyG-|Isq@CeDs6-wRZcQ~36-Epc^31eT`Q<%oKdDLk#4rv zM6FSXBI?T(OF^JNY*x=D3HUWN_jUlU_~Nwm;%sz^!mm3e0=1-a2r4JMDOXKcJ`HdXeLy^g&7N;mfO*)L%8rQcYA zbj%7ygT161bRuV+mp0V-+{XWYDId=CKzGNwh7so3Puh$6bcTKrM(M2;hd9? zku5wi+qv0kHGvW1YW#Clv7G+NBT7O_lA|g=6-)~`8effVWx@2#X)d0zx)PGoDraP&fqxwg=Qu3MFDm@7 z45A&1-+z5rKL*B`zru6ASo^(ZEO9)H!fv9i?iKABv)(N^Tq+gwY^({?>;7XOpN}T2G&d zpneUi9H>`v1?d-WHAqZC5dr$i*HHw{K5*|>bNp{-jfd2p5umfJtkvN$HV2>rnC{@%iS?jzNYE53qWC`Rj?gS<)rUW-py)<4-)#E5;v~$ z_{h3~_qq^{&EOy}@$c412=hn)!%1c=$h|vO}rXZ&8Fsk?Br@P-MTt zCwjL4P!6$b;VNhfb#mqHo73R@ur(@FVB*g=Y%9sE^Y;6gKfX&@X;v7Aw=26cwNbX> zG&Q&FJHBiYxmB{Y?2V}@bggJCg(sXfxDD$H0bS=JfVs0_RI?HdK~Rlt4zyQX&=?73 z?|eDJhf@OAUUlbQPlK`pYEF?wLtikundKsAdCJ;}JHQC~42d`MF%(eIb&>JmOZsQF z(kAtm6a3oM48JtjIh+I+Kv_z4h6r4oXQ&rS?Upzz4JKn)3H>rQ@o4kRK6q9k_Hc7} zb4?TZO?7s6tDr=sR(Hv_K7Xj^`volB4L;)aZ3KuVxB1Mtp4#MNwx*;!Mc~(RtmRT4 zay={5>Sq(eg+d~kc_Qy-!axOOxd&R>#&MA2fV-S1(R;LtLjKmYhBuhFc93>lMm<1@ z;FFkrbyeTwa9JF(r+J&7G<%zLzEB|Pd^hd9a$&4R&`TB5v_Emwzxy0J`^ir~rat2$ zNde$#e=xK0w#1_#*0J!Y8h7u0zw+4zY2$WfgVSmEdnT$J)Yl^*a-CCK`dpV7GY3R` zc70(?G%P$o=$4)wUsBe26GVJDXcvPCN8DZ6PwFTAg~A;g_B!8dhOj=HN~LN3w*p!U zmGO2M5UG-wB!&%NK~pLZt9(wrl>LzE1ykP@{;swx;j2rN!8B1{Tr7E;SMQXnVlinM zcV*Jp=voW$W~O$Cn1=er$=#=OG^e=E-%Z~7F8h3kZ9^K1mmkk;MB~}d2To1oqD2Em z3YzDQQ{E^ArVh4Fz$zZchZZr3;(qhf!BC=`S?wW6?f3bam@>N>prml)6t56K5X)6JG;{Xjh54S*ZT?`7{iWoyjzTbIZa>b0q8 zKpsdg_8xnI#00iR!MAG<*v$7PJ<40bGiVZYROo+s$Da0_cYr@jDM>f^Zx+B@W>vNx ze{H(k1iIMHl}zg0C?rglatJ3)LhefBvN71Qw&shKsPJ3=>B4V`>`bIKdaN#KIqZ zT@}tydb$v>@E<66)9yp|3D?CawznxaRUV-Xk3#WCEPg^WzaSi@(##wL_1f)QK>h1- zaZOfsj*4Bl>IiPrgy)_={S`E^b(V?a$|%zS(f}C;H`-~T6DiOUu}vrpVwJs|kN&rp zYf%B9)s~8f{6j`U3Tku-7Oqa3T1&xe?)0n_sF(%Bete;w&6@y&MAx zf4cq1q-LDvJ#^iSNMQ%zf$*>#C@WfhK|L{J;+Nt2^~`IY=>~=95N72;e8r&GLg(FU zNWsLO6(TzNR&{1-A``RSI6pWr&Z^Mb1kBxd^G(=Xe&0oyF2*9@M_CtZ?aBsCxfn+y z+2KFWJHrF&Up#Oker5nEnb;S9)-Oj$IDUh)~z5GIA<-lJ2yI30;eh<`z;myE%`cl7sx3G5LB zf_DFE)EhW*ZA)gWj+o!AW7Qgxtj5`z&(#-d2}MRqouqq+mLUCLEEH6hep6f zi}MT}Z7x|aq!gHGfKqzn-RZmbt!zr>a~%!?<`fVn_|UZl4f_TtSB#=(x~Q&GMqw^U zMrUQU7tBE8o~~VQ#SH)%KAaHPz8plYFfYy0_I(B#_tF8V17W7UPpLcO=jXq6=6JDo zn0p+7#;`yl!vP#gnmJFQ5T8kjzCHflvC)NRV(iSgaQUw{i+p;b*Vv0g`U^5FHov8$ z=qcA7x4q9;84aafRmARS`gui*DoE4*ATx9*k0gKhQ*FQ$@zK8h0Klv_NOQY|yLbN}QtK*dRc3x`??AlyQ!96nE@IDEAu>J?exf zFr2Q)?jpT{E++b{c%SZ4P->IF;ZPy7x4Wko|5NyM3~PO(@Kuhc9L47b!U_|KaHgy1 z`SF3r(3*3;Cf)XHYT6H+`vurOKOcCXE(nJBY(PCly|0Y1^h#&r-)_342JBT^HHWs9 zHpPUyf%JQpM0z=$Q^@yy1X9PQzH+5Z2Vkaw$NAy(Yf6ypB>$Ups*c@RZ9O020Ms}Crtpb*)?v|ONCsU>4bH?&@W&*D>?&40v zSyWn(ltQ^^^qv|$F9l}kGBj54KG!4BHok2 z-SV$_fc_zqhz9f9GLB}Y&oS&ql)GeKR)!2zI?*d3lS+~~O12%xH=@F0rrrU6UG z(Jk3AGb0p9PbeOf;4=`c#4G{d8DSSap>GJ<dLw`R3xTN0ye^FhH7F1pCEeR)m3^2OClGYS z;+q^fQHwl7ihmr^4KSQ+#y9qzfkj*04F9Gv`P_9fGzI*@1t61D>%vU3m%G0%Or;Mg zIiocYE>Xj@L7?C%_W43Jn;2=ZA435*X$W97D`8+GkUY4T%+5Rt^z^68M02#Ytwv^j z+9lY%K9fi9-l|7GiooM&5-J}Pv6q*bwiS!ZY6NK8(9waIg+(l6SxNNBGK%Nd6_|`6`QIwc=tGsgCuRR1e8*S$HP2>@nI)SSaqQ|WL%cXf&@2(5 znil}zGmgHMT8jc&Kw53qI954y9*k(>6CCjB{Gk6-!i#byXPEozd;xQ4@lpxzJl~?M z?Cx4)?DNBVatlQ{5iu-WVTCZgf>l@x_DR{`T}~D)>KuC*|FMP~qqxM{GtfAbUH1FX zv7Pkhi>h{0Oah;u)M4Vc6OfU`9^$XW|e?s>Swa`<9w386oJ-Lbj27`&* zaa5W#f3RuA<7ZF?*ug>#5_<}Q^z1wKx>|>PLSkDh1?m0do1-d!_YWERZ<6T#ALqHQ zf&C|_#f4DD6l&o)s=;qEhg4{Q?TPUyF4wkGAZHC4XN-4v1PQw}k6-TEj({xUwc zpx25`jAat*n?MoH4tDw-=E#oUVNrX8NgXcAPOX?p0yZ4s90@AS3HBifJDwM`mTm4g z?68DUaknxt2iVI)U$fW?8Je1LQFzF$(pwXNvwaa@YHo~7jy0$!4{3Q?q2#uxPzgxl z10kj?8~L-5oF;FVCA;Vqo@_|(NK_^otFX}4$(2Fj%^kn@2j)EYhaa0Ddx}cP^U^i# ze*yeD1NrW(Q`2#b)^o}<GEfbXr?= z%;lzELXFnfAZEb_*km#~_ct6c&WN6Tu?`x#Dnt|yg!2VjLX}N`ZDt$z1sXM$u*d5* z7FJ?YBdP{M8;~{q#@zEJ&9;~MBu1yTSfVinRcQ++NalN`Q6|vXln&rW;B_AQFxIfx z^cMs*O(t58dVPmTz!Fp?mfxeQwXo4+AGLE~B*~6OV$OH3Aa1o?F+ftVGrH)j!p+n6 zwn^#SGc0#MO@4E1LbO5yR!ok1tc)? zWJ$C`UDSYhniC%H?5+gM9Ky1*v8H&DfHBD^L_zVBnunq|s(z=#ZKRMkcyue{O%p2p zajCCj^;f{$4r|UPdgwZ6FHeEGJNtlA*5ptLpvj~UYS~^rbz{7VJtO_`w4|iDHIZx6 zrd+Y}@G;FNd5#pvo#jVa4h!6G1zKYw7TKisq7=w7?T6V*JF;!df@H#tl>@S!a_N>5 z*TpFZX)Uh*?$)CH{FE;9)y!e#9hT%Y{7Kt#SGmsahdiou9^&yw_cf<4fl26&#|l7D zF{?9ypJ}8b1h_9S20pb-O$?M+Af#O!@HPL*8^@oc1rwMRjo~U+8HhR2b-YzqUh{JkLPavbjo4~5znCdW38TouS)Y6h1Lm(FMvk{kRnK?Nvo#F>v{5Bv0wwy~& zNMVmU%i&}=@~c?cv6KxhJiyTUT)Lr~gP5$$qR(~s0<`ZcJYDq(Ajec8Jkl7k)blpc zqD+M`T=GQE21_iN#C1{pq-XBJqwAuREmJcS-^)vy4P2V;eCQcso`^Y#Y zr3igmM^n$vVWr#xD+auy!V~2 zI=wT6r%5L#vu3JHKSi~l_YOFXksf_hoi_;Tfjq9tg5Sv46q^%Mr(hJ8VL^!hVm%~# zjG4KBj{oLDVBm(1b*7mjnIlCOPzI{Lh|fjDCP4nKu$i;f1$bmUC-lDefN*oByH_#!9?gKC% zwDMN`dli@##^-p8-VB};u_InvT4lZ~*Ke=|^ zo+SI*vEpF_ZGX%&8)ig7>`o4msfd_)-@66LsgGu(8m+OyCf+I;Pci+6LCkD)YKnjPoawt_pJavSxk8@; zfB6&nB4a;m!&?EPuQ6pyRaLY-D1iBMBf_4uc>N2+yDz`s>iigom1ENx|HQ!37B~ep zXXljZ{5|Th*;TuI5`SsXHhcqAk=OLOzjAGGr49K`mrlm@?%d&*;` zlf2-i{Vv;XRkgihZ+NEFR8H&7Y2$)ezE{Lwch;|;L??%^AGB$Ij?El(bvl+%mu@cx zu411a5M*3%P8i7IA6iN%*Lks_X*sPF&8y_mhLPQ_RsDi#Is8Ol(oZAuYwzuPuI?En zD#z!GKZF=f`}u&9-DXgO%`1H2z1ofzaA@AxG4?}LByvpDKXCBAm}=oKO>w94?d+9h z*4G!O*gU8&)0l;4Q7BO6Cu8$VC1eX7MU+WVrdBIS{#(0=fwtL^#+MyQ0TYxB9K+Zl z?u@yhT*9k0V+qG~7Er)jiq#&r>!B5+rD(q*e+~qjHW;}>4L6M=ifbVv6d=v-cH3<* zu|r-ab<|8=HhcyvhshL0c!FE^(vFn6k4#GXv9dQCC*j{#{Zfa^I86`T`1tE&awX8*Xzd#wtmBodVJ+jWCNl$LHR&vFv= zO`DZNK=xFMfNc2t$BbfekxrFq^u54^{-A9Ua$2Esc*df%wHc=bC3=gLv(FooS5TH0x}}QRj}{+%lA-aHp+w!W+sxWfWV-Jo#ana#cHI`WsstB?OUd zG$~nJH)a_~KHp^hj<%dY@fkskdZPYXPmU$at%p5oH&>oFPmh?=?qRx9j~92T0*|jk z!Un12Anj6Y`cK@$mdraOm65qmPPS_SJ2{lmwKULJBFnmuufThUaW0M~-sKgrSlc!5 zS5sg&e8DE#6z1_vB2+pZ;dAjk8njVir>%(;T=IlZs|$C&Dk1z9A<;=i6l82A7#Pz{ zdm^m&PAk8xZ6{K0NrJ$Te-AQ_iU4YGNJ7038q!oCdW{IAjZZS?`soCZdL#5QgClz- zH0|(dNGl#u+rwM$W;>KeJ?1v4A2?{ERpy&!}-PoogMzl)n}XNTLN0EbuV$j=*rJzna)k{Hs(^%-?!76vDu&~U-iQwlR0ss zz{)B<^X*FQWxql@DG;~!wo37KdK0^Vvo(|@pk<+W?ms+RC7pnKT1c6bJ#0vqgh9d7 z>fV$+O*J-5hjvQ!Pp9?z89>Cg=)NEr2q|u&t6U?PUqBnYRd9u=HOUH#h%vEDs5sQe zjGjTceSV2@@b>#x*t_&36e7qTj4%dzx^`|0BAjy29Tpb0i{}ied>WVcMOn%s8Py4; zpkW#*-nLGhA;}C8H3&Xe%J0tL0v`NU$D(J%WJrxG^Ez}otgnNs;4I0{Hf&M^#y)61 zF_}vI49&J+RHE=bMR*7F;z-4sX(fYTrX`XWi{5bF4@9;(lW}+-h*LjF>@@60gX{

sCGugB04EMGOul)aDurtnxv6)TGGoQJnI9}2IemFlcQCR~M*Z))8lR#~!2>_g)n zoFm?~PsbkOJaY2`QwvyU!m|4rj%qZ;eY%j?CfkuaS1Ny8ahW6@Nxrq0dWmIS)b8rZIu_U`N^N!V)5-4cckU->zQ|v8%wgEl3IbCT@`|TKvLE z6N~tJ&t(8H*mi2EiXf=cyBjoQr@x<)v{La4E0v-EW-;rEWPFBIUS3J$nr)%z>yq4_XdTIq z&Xbs&5gu%w5P0e2|8&0HtAGN?VI>K(&oE%8BeA$eN0I;Y#Q9pI&1Z zv_Z-`?JhNTagvWp>{D!lOlEy3Ta^WG<;!{#@N%ufZ?B5sU#l=deaD~Gb$L+`dJi${ z8o$r~6e|@$Fc_9@T^JR=m!*u)$OkX@%tYgseQ3tkt%T>+rF4B@U$}8YqeE!w|D))tgP&FtP&YH2geARMOGp!qk{^^vG>Rfhs^9m zso#CxpYi?t{)0N_xbN$Fy&liU(<`mFsP70{HBGP1uX_M((>!8^jEK>K1Fr&lp|zcu5;vm&VWO~Gf8#f%LyxWSctT#@HFP) zn&|37B?V2!G&3d^EuY@#aO=0jY+T9l_w!4T`M2NFMLa8Np7(9j3`{lV0s}S#&&z$r z3u-2=ggNGEdcNr_KPo)DI_uvjpW8Mlb9=m(y}OYU80F zEJQ@8ipOXKqo2-yzr#7A_}%;5JO4#}0zp1WeoOf5u4XPRp5jNJlxrK!OzipYBt$21 z?3laHfA~Jt;M#|qMiI=>7zy}S0Qz~EBq`$+9^9XKk`$Mh8-CE!GrW>fM4w+o)ISm6 zM-NIp2S2(NgQ+Bsm3#Ua%lkkGH35HcY*KLB5`(X93`irD{T|fPW}-Q5F}+KEl_O48 zy;nd7rw@w+i_VBhm2mhFxns*RJDXgm1&ldf!(vhWy<^893vVr|!*tD0?$JUMIga=1ct24rc${g{-!F@d-HIgh^21bbv(ZNX}V z)!()$dlHQVic6lr*^*c}u?zc-8kuFT)x&kGo{WgDHdiecSH)W{oCJP_vmVf8n+ZGl zE0EY!BqvliUsZRO)U#;NoB+|-Em(~@k-*1C|%2Hoc7_zGP;Y@X9cJpO$J5KJDUZ6VIrE9n7ncC5*`?1cr zFfdQuOjH{($Ld+u{vy&orUNaB;GTu62zvsySdTncSMfZUXRZfX zAQ_}_7F-HqA}(hc<(?nXHmbx||73PckfLLV;pzx!MIyDETt;ZN+`m6NY0gmZs?3A?n~ zh2M)OP_b68A78Xis~UQin&@W!gH`!P`^X^YXoX+SDAg-f{ zR8^s$vVSNfEGr@sPK;C7k8_fQ?B6rEyn9d`EaiGJqCPCn* z#|cqu2eT&8OLL=w&6JzL5c&pn<}29EZ%VN=L(E8#qUVq_+APxI0uKYY=YWuc z07t3WP^N#x>^YMs(oE&rOwX_G!lPbzNeGqbYpdTe;Y5=7N_ykl#ifD!q}CV=+~@ap zdb><}5f#|J%ip}&CRpB(x_V;a+W`YMb1 z*zJ9k4lB-$>ngsDB`(&zZRZgEUed$(0|G4uxijlKaQLkz!^qyOW zZ*I}p-^N$z$-e~Nb05PC?-J=&^6r64Y1a7!Eb%N z(eS+3`G?Z?$Mq6LEFC|-A#{&31uQ>Z*M+%Gve zwH6X|tWp1w{r19A?2VOqQr9=O$oBMhifWbfst2YSHGE8t*&0b%ig((T&#E^9y^8^ILF!)G9-AXX+2lGVsKf!Tc@ zsN6a0Y#?YSEO3q0CRuE~8R*rvJ{-3Jx?IM7iT-t~N<1qcD=Ws^$7Gvwbfh1uH)h-F z<9m9x3{ardOSR509Zc%g!#D=rgMtJj`+X!OMMAt#$eLD7Kdx@ZS>=6#qhUSdZX_{r z46=5AaTd%H++BX9&viE#_O@Nl&~_xn>nm+4e!t(a@q~=q#r#XKH@P zh6nxDw#Syr!oD8Lk8)nq%B($UmJWFOteKvA%_tyyi`cq2E>*3NUcFl>Q3+Wyc&AY5 zlHQScEq3!#wQK12bjClGaj+|NgeslEFAhrHSH}9pWtyv2{#FejeMe`Mf1Gpf{5L(G zWlwb`>~7ZR_07QdxTqf3lCiv>)_DZgr@t&gx9nAT=i`T|-H9lQE;RrsX#e-O=ZVI; z)ImB{R}CzQXC{D52|O1KxS>p)PFWD=2-o=L{F<$-D#T}42(0PyQg05uRLl(x-hGvo zH!>lcpf{w)gcqLZ_mW^1evy2XbLeo-wrI7Ky$a?I^;HR8Er}r;v=k?rwc#LL4+uVY zpkhKwX(R113b8!Z4npanf_PDRaDGT@Sub>=;G#Vp!O=!UnxRgGsH z-Rm+Yv(ql&vceo<8#9ci$V#n~)xl%w#`-7NI%F^yU^E3M0i^hf2zH6IgSU&6wOo1SWlN!*ji{GR@Hi7O^y}kS$(I{TYa=@BI=(Q5!w%pP&UQ17gnM3=2as%4DHdB&WeI zlhj;CFLQj>f1xh4FTY|Zc0`e?6M>sO==0;j-6R^=-#Pd}h-?Bt88hslC1DKVcuc3J{zYWV2L4Jy|GhYM>TdiRU1^ zJG0BnNTzr0m&Wk4Bj+;Hpr5G*S)(of;!2n(BA#fep^$m-T7H#q-UqLhpclMqQ0P4Z2a|hGoN}Hz*=fi1M}=jw(&mC_@ar zm)G8Kln<@-&Ks_SWh*!Q`V}_>jq>Y00@X_T-c16JP8DvEAbk4u{=pLnmw9ihV;$gV zeG~jO-A{|_$KI4`Rnf}8Xl1$7S7EkY_`dqY{95Mz7qB=-+!Fl6JB1_Ki$q$?Syisw zJ?5G_aw{HW;dda@jHzO!qQslEvzG6Ym-HO^eF@$OJeo-_LA+Q{BL^8_QoFbzj|TpQ z7UwGs0PV&ePzu%bcE1^?Q+Aro{aq^-!*Bq9aQSmb;^eUl&+C8oGlgx0D+|A%=5M)F z(YMqJ0{V|&KeMW94f!p|A&AlsV>}1mD_Mp(i2yOX_rk=5hQMCShpk%G_9rbT)cK_!H=D5ZT#KhL#C#PPFYGZNC zg-i(Tp8ji-x$WY(G`K?N)VrsyB^jTR--}?D`U#H3KuN5%I@}6W%+Xkp8$;4?_Sa8d z5x^S4d%-<$q%b3XC||&{D|v;d!`R>sAvdNUY)SId2V10evE?au1b$T{s3XhCEW6=~P#t zW%xFaiZU4g2YtY;FTBf39RpeoGOR*ht|+Z}m08>dTgF&GgIn};`B(T%I&l89r_q0c zw{7wxr{L9F$*RZ4@@ici z{c?hVlW3wP*wTME1xodDa%e{C(96)v=&dRtOAgM3|Ak9LZvj_6{ruJ>ufqr0cgtuF zV*UD_xTrzQNfY-pjuib9U@ov1JuiZRvO?VJ_Xyn%EoWJo+s6=r%e?l@_Y@o!l=lu( zB!5Y(P3Sa5W3<~ooDI6WToKdh*QbJdrKs=S&%|6c8Q3@6(Ofux>uKhO_e=V?-W-aj?Ye z8O;~W_5>JHfdBIgJ_M^y@ThKEyf^|STvKt{_5CLZeCTv}cEf;lAxhUA^AGtV_m{oH zn8LyR42ObabR9B$;LNlwQ+)8{s8mkiUkgLC*q$rN<_jNm?=PXr9R>AL(VvQQpyqj; z>SdRQ9CM|Zax^EJjbAVJzUyr}``X{|nO`dknHKzW9A1UBK3!7`mD71CGZ1l>$V=4Z z`%*n;j~+3z2Qq`^T!K{@?QQohP8Bb$sO*%aP2lI*{ttsi8D$7OQ$DbzFTYfC)MELT zw>&L@@K$6s8Gg&|dk0Qu@`i1+WbPW_{?~6yd3{ZY-2IdRc(!MF_oHRn4%4YbiZ=#? z8-j-O9PX!W{#HAGGvh286Kb-Rw|h@2qFYc6`Uh%9FHD7>^e&qHv!E8VCsOfR=yLNu z$FnREhvD3Bqqn$6NIN#S-oD&@uexP)JQ9MBWeHr#Uze(F`+*GP6;a%gm=g46#x8s} zcxI=zxgO&GWScY*OI*E5?908aAI;Cbp;ldPcAA$YwYq;RnKo}c&WHaCdRCymYfdYk zLlDc3ztxJ`4BcfYQbT#yYbRjrV9B5k6^1c&F($<5a!!3qAOu@{p4~U z|DQq8(p(d=yRxUl?1msx+1$cq}yC*N;;At)h=AI9&^?DcvK6Kn=U9xOO5(GFr%i|HhYf z+xM6>0^Ojh5gbWMkuFw_0p&+(2Fi%z6izANW?ujG(z}YbXwT-?j-9A7KTVz5zO}H~ z;`fK)>*rY`Tl4(0cqU#HeMHVp%wMlxX$g80s}C1vE!FV?y9+-*G`?X_>ii(Z>_h04 z_L=xy`nxVtlSGPx-3vL5zTsPf^eyT2ewK~?B22+?hu_^D+gt0DRv)Ocs!Ry9XNmhHU-@;O#oeLs<>Oss<$B8oBLOVitHR=_B5d!dW z6grT1RP3LdRGGm7lu0=SfR%b_J<2sXM*A;rK0LQ)IqSYd#}KR*!V0o>zS6bg6s|K{7y2dcyZCjGGO8 z&1UMQmit8}HLj@-AGTDTQpw(Dtp{P$PsSLFG>RMFhW+9FA9w@9O8L(@6kcv_OeS7+ z^8 z-!nHL7H?JUXT(d10yY!HG|y;1eiOROlQxhVDQ9pnGp0DMH~=^JKXK}H-Y7@8mTvJM zAgU4u3M%m-DuQ7U#g0tsWZ2J{GJ6^Ct2~d?XFk()L`Rd^xT8H5rprJcfNb~Ax^q>m zs){iD2;S;_cq%PC6g$HT1f6VjP zBA~w1Q4h4mRYIaKa2$NA1FA)@zZ>G-s7CZcOn_>zEvcLh4Ve5J8Ct3cp5IEF{~h_4 zJOdqVt1A(;f3A02xkFD?!beg3mL^(Wcga`wu}6b(f{Cs~g(2e_R#KEIrF3o<}1+Pd0riO}i=0F-^PIEpR#U?VqjhT}x# z8pXj_^)?LJ{p-JjD#L8*ePW*l4R4@EJ}hoL%y+6?d&0a$NhyDqf`BiuAWHd0Bx_b&+Y>s z4q5rr<7tM9N|b1mM?XxHk|qupBfem514pzs{sEc5c1!Pu32O6FOyfX25UrQrXhs8W z0ZBs;{oOdHr%#U66qt$+9(9oA(w2aA2Y8~8Gos4?pLuirj9e7C4KT_ZQ67dx+<^N; z@899YjTF6(X)lh_$ixpYUW%L`GL{oMEPPt`%3@0P#?t}K%kSpB(qkelEj-ro3un`= z$AzTQ!_<}k+65B0qVfT{u0mTXIfhKPO2VjMC=nF*RM0O6}k^-e@P z`5>)#E9rOHxT?fov@tB{rc8bM~i9fBBhFz+@m7Ep6N)E24ecqvv?9D*w59BGy zaV-5a_p0v5qKq;pyKP1O zNq9qoz{uhDe3@-dIN=3QZ6h=wxU?a6<}dU7PT*5ORXC~VA4g(}%P`)4Q)c=<_vQPl zRA&P2TT^poe*BUupuHP`LN-~^2Fn7;wHDn?_36f~fSu_Yxx{-7&6=z~TOTbl2voOp zEbFMA83PIq{A)|VaL)T(RM~K6r~cMiUfjbZv4n6At=D3(d}i(VJaR(o5X6mS?8xE0 z1z?#Q#CmC-l|$xqAiB)&%~=T`P#Y1<$D-fWGjW}|f`&`V#_hf@T*_biPy`Fw4IXK~OB%mv~Nv48HBO(TRKLZ^A4xVm_i5x5u z8bYK}25#0>1Sx_03C{(t8%VE$<5C!UW0h;*xc)*rosI$F7!S!!@ z!-?2{7$&zt2y{J@bOgJlx&nHz|Ad>1Evf*}E}c9Y(@;+nKCG5|0*^l7C%EShxS+yT^zebjcjGqQ`Ry5S81o1 zccOFEtx3S6HB`^gg}e>zBIo6u$()hr6XWf-_A}Q`{@>k1Z5MQ;JuLkNC6>AYK zPJ+M*i2Xl*5zE@cbdr8X{F0YJ!%}dSjyKs5#mmRrZy4!`Kn11}56v3GG0^3ssEiE* zH2U}KpDzz3eH1DGtL^pq>C!HRu~gMhfqN@xqda8PNhM;{;rGncXjWHrdTCAEyM48_ zcYgpu*YT8 zkrJcoYyry%2Vh9-QAfJvi`K6Jm_{bdmEGd`cU3GhmRSjaqhFxQ*N$xC1~|jpSh{6U z7zZCUR`0FiK&P)WMCsP&kuLePpWjTo_TUsQfR)dkj5by^m#*?^a z-0{Tay}b2p-nUs%WOde9hb??wOp}kFpLrv1GQ${v+?d|P&F}}p!k-%he|TkFlt+^t ze^6btv!4@>fS;fhX9sFnU^S%<_`f(GXkGi4=H|fL0e{!3YrY~;OQv7*+3A|il(7`} zrTHxtuG{fjy8pBa9a!B<{KgAlCfJEQAto^;pe8Pn3_*1P5d}*;IWsC3C+H&qgg9kS z5`ZBC;(p5$U2;9mbHLvrrz|wE=EgWRs@EEa#M z%p@!;XzRM~7TDEvIweRWk9cVA($LIs<{5G8gFC?Ms4=;Rp<# z9MII^;XxytAVFZI$isOKw_Ia9>AHj{K%CSaeaf?{a~+7!)Fa?0J|dp$@B}@m*adb} zCVNyqnPAs;ptpR?_<_ARtYB03L!+nLp9L<;An8Y4&f)W0_7-F}sOUJw0R^pCcu%fE zK&nbwkY6}@5FlcIZa|cHe`x*G5iS4NG)$JWr>dL*%~a>~&yX4?&a9+?Lb2y&a57UJD)lXI+cj z&bJ}S(DSqFi)ZhQP9r_9wU9n%?Y**r$_iy=>7b`)@aV8XY4%|l|8GZu|T@uIaMZbaK8G~Zax)0xXIiMgOc>Q0}sw^8o zbVV#j$TB<5g^ujB5H(*o=%i!K0y+KmmzW+mH34ta%V$gSGRyJ!$H4nZ9B2V*zY^!y zLQL?{y||EUAFma3?AW{sw#~RUb=e{{P0CvszwCcnbe7Z~LMrGVzaC^`f%wRvv!b-S zuV9*UMQ$-XAoX!Oq-Gr?FnUp-&wq?b7`=Tzc0XFDkzrvs{JT;Yxz6LtPc<-W%#1+_ z3FhK9ly4^vv`Z#rX>I0(ZkBs2gjiEEN}Bp|ZjzcO(*gkYg&I0s)Dg_$Y(27vTv5%5_5dHwwVz(Iq zLm0`}21;3L{8H+PT_bf+n_RnKD(d(P`{V$tV5U^R%^tb+P1?K)FbrIYV}ML-#+QW3 z!0Nt>@4hyll6$LJwA+_OPnisN=?v-6E_C4F_1WPqP*Q+5?O}>Kbipwg#M1J}<*QG! zSl{j;___H>hNcc!fz;7Tz#wwDLqsaH?ba)y_cbRz~)*%%VlB%F0scRGFq2tl}j3 zi(63KyPuQT6log4t?MxrXN~$Z3E-;%W{+`~UNMsE(7$NbaQlgfS>>NW_*VsOFT6HT ze#WTr$y!c6p|+bhtXS}9)49%qEB9^m}8hfo#LVhMl4z5j6m%(iaHAixo(1#@!PAEs3* zS5ueBW~4Pk>@9+nRO7Rvn^QHR?P13mf9g~~L6Y&~w|J{W*v=hw@-YE3uX!GMO#D@^ zmpS;K3pZdM0@$qX2Dc)+2AvBOr@$Ln1PA_FCMMTQIZe6v@tGeFTWxG3=2F{zYWll` zoXnyj`~Qr0@1KsBs$?whlOt{hF&n@oCVyrqYZ)gZ5i2WwH5hNqL5HP(W>DdJQ2IP? z5q++Zghd`d)Ed1BM0$4(W1EWJ0v*alR)Ww znyyj)nzM^N;8opFS3k-SxmJxnB_`8UNYZNHL0l%2VlCfZhJU#bEGhWCcTg`M=EX>hErT3zhhn78G!S=RZ2OG$R#-9*`Bg^e5lI?gpp`u?Ntg zWvqb@5QaITUm?Fwqx=X@ZJBn(D&K2_dVD9J5tqrGZlQ%)j|83HwAjxsJ$qEb_g9Yn zVKusnLmeb`KI*k#AyFC_WA(o6QEKVPFEPFqw(~oKBic;bvWDOg$A$H_x%Nmol_moB ztx2C3(c$+ZEgV`9_L*{D#ge^G72C(ExuKNsJruzH&zsoKXSSjP?C0CQGgQKULOBmx z24t%KpWo*y`QDkB7)+O*;!ddse-z3Q5F#`E7sO{1i~)T(dFBwt;SrROF+L&U;j!4X6&>M{}9!-vO@4db~9 zVuI!z7#_(kXUf66gXhNWfA)%UjUrEG0W*aIfRl3j0$0cc_ZN+A<(wv% zThuHFlleopbAtS{s9^K7(6B7dWX&K`i+sh+F8%=me+RCS#or2}`m4p93wB^SvQJ%7}R^F;Z~s+W@{wZAh&h(`~?Q#8#DSQSuOH+DLJ_6=Q+MPQ=vk_ zr=ef)uFLr6Rm<_o)7Fy1y-dcnqwe_X7rTmzD?$kh3H1AvjiT;{l&?_a#621--jim~ zG6epjIYC_()_>`!^VcZG9S$0`3Yv1u8?UkcZeD<8AY2ZuH0+JWN-w$oIecx?Yi@Kx zdQZO>(`5nR)v^*T4q@25|M0Z>N{7>;d`*x(EL+u^MZ>LM4ymPoYZDM@)GENZC2$5- z{y}>lYf5+z)T{3N$AJb-I|Qfq4V?7MqEh^htZaHzpH6Bnb6(#W9R$>@GD~j^CqX0~ zzBMi0lq0fVR3K^fogI|T9OxwdqPgt#SN87a{dc}aTh)Q^t)p4N{T+ z>%uFc`_IpoLR9$M#%9S4H&zx$OyY<;#n7i2AlCeyWcs8 z6bNw!i!ZN8X@L2(o0t&@#}OAL%x!TGi0^pKZ^ zZWoVim!r~w4os&O#vWuxM-Yii{q;H03g#0NPeV8Q*uDzJx=~^=>4u~@2Tq8=4KPd^ zQ5n;4qQJF8GYme^pp=X}#Qw1)3JmTPmJzlD4Q|P4h2PdQ_Gy`SOF!a`%KYV{a z>kY%q8>5Dgw`=VPY~^LQc1FqmUZLtWD(^Raq=mJM3fKM?!l=_u;XN?T+qk1OrCLVK z0D6vqUidHO&NdjT=jB^3+fk0v`!OkruT zdzW@=&EwawLzCbKEDIqYux2P6i?&Yq?1Jo8&A7li!1A`7Dm>vQ4{VXUf}DEvZ4M*AOBrHd`44Y71w0mwm0kmT(+G!5nTrj zeiITLHt$M@2o{I}JkXdiu1nyY=UV*VwMf(_1j{dWw!P`y*~nU5+(FnZ*YZ=~p1gjU zg6b8`m?cuCAIxrshzg-YNNqu)yT9NW6QaI=oE)WIGP5ViN;}sGDk{`3tb2t42dy{Qc9n?B3=0wc5=e3o1x4;W&f}Rdkw8 z4IgKI_dxM)^xVr(6O;j=m`Ou--F3xTBVx*y+7m<_aeNU#n5Qb zW^IjkUu;fEC9qC&iqwC#rC%WjY6cph{>-`Y`lp;WUeCXLk$>ei#bw(0r9rsxH{Kk} zc2?Q4_OzK)C~uQuU^hb3QeSdQads7o*f6pHdtvM*@Vd`G8iTuh6y3gXwEjocuF<1-Led-+jqMB-@Z@wj03pbt$EvR$f1}T00{K zTl>7B`6^>q;0@gJdkz^r%6UiyB9JeFCmxX&>Y)aa=Z2FwCRm*iNidb5N%f3v$?PGX ze@a&{H1P&xt4%I)+N%cLVRLoFDS)OYs!2N~gU$J3xh6XcnAg0R57pLepAC&=W&y4% zUT0pmlJIy_eN`4;BAa6QI}j%VZU`z6OomC@3Hl0Q?_G~b(lw?zmYe)!VwZCNpzz}+ zl@4u*uA4jrSFv#3+>hdeQMzP%zfd}#2qDTuz=Mga0KqO}^s`67gw9x(7Z-2lkICF2 z2gRsgf2ACDn0y7xNLu-dw;|+t;T83GltuM`ZHxQ4u_3z1^tAUt>B~=Y zoXo!AkttJ-7+DRO7P$q9t#TI)&?{o;5c3RT@7HX}*oE^7xrIz@DY`-!5bF73?qbdSYRN zV=V|n9$QE2e0rMM4T_GzEeG6zz`*?1{#{$gR+TTv+$`X5kJq41SV@HSfo<`6CAN7~-Uf4mxV91AE5MqGT5Adnd<_Mjye`n-v3?2c%m=PMzWs zT+|tDFW5V>UV%iY%Me3E?y3Drb>`DthMOnSH!?aNkv?ZKNKLs5h9@hmWtG&>ypIaes_->{@PJEY~f;v4EJCE@6zUsApH>#`&k>AFS{y z-xL9W%Ij?LAjMc_wMhZ7fCH{-MFb>IJyJjm^@4*5HFW?#;;2a=t!Bd|*7N;z@ixbk ze0vtxoD)P8H86%V-_)Bp9}_DUnJqzfTd)5N^LJkMT5s`hNHjQ1f0pqNOL1nc63Aox zR95pFY9RmMWj`{=r}Srnj#W3+EX-%3=I_p?SKGImGGFi1Yy@V1O~@aDbLmuShOm)3 zO6(`u187E5yk=tVbT`G}d-+5~9?P3|gI08SHHqmK3NBwJBam#LiDFm3>i!+T1@_@x zcwf_i^bc|9>tP8m4!4gUhbEy)jEK{)U9sea8);tO&nylne0GdQxURe(m>PxHuYp`4 z49xH2t+Kuj7uTU>I@Tnf&cPXttaD({r~SV*D5iWPLAGmQoRJP_qC1MXZV|7m7R4i+87krwRDcd~W9poW5%BrqESRch5 zTrsp4(5}U9%_6(E_nHRQ*Guy1b=*u2(i3i3^_02zg6M#Vx_|lTD}mWgfSm z)D!EM>Z5$_BJ(ku5)p60?MtAF6|1X%W?Kbg2Ikl9@~*?IKXQny7sP}I9u*j5q=lAC zY&#|OLv;kr=hM=p2Y$!fa4E(cMm!0;<3NXtDPEPL4`^42di?{cn3pO=_%zl+r!x8Q z$lmgDlK}o1-?!uKkWintMujvv%V#%?bs{_jiFn_B6ALt5>1*yO8$Nfg)VUeW_M1cU zkn)f_pD!{Fu$~fQBI-PW1x_Ic{Y4^{uyusV&ZlqRe0iD)1T1N4&Ex zYP_rgc$gLoAuI=`(#2Zg{)kksZZmAL}S6H5WX-Ubwx|=^ohm(r@6=13o$< zd+8rb3|)1X1~Jg?S7Ew3tc2D{`XVZ_Lisv|hHE?JcVMS{ z$1J0$elqHloAS!nO}QLdsGPq*z=0$bx$C&2Rf0@54^?s7gc#zPS$shv#W@G){T{ZF zXSTS4y_>y-i$`|nIK8)3Ee{XY>(qtCtkOmSVkzgC`10TG6CdxgqG~>(0%F(|y^~=csQ0&){(wSc`l+^rYX&^`=;uXsq+XfE4 ziV8QMFZ9qjTk>GvE&!gcJORhtS8T`(%uj*nycbue{&NwM6a^h=4X;sTjX1KrWcN*@ zv<8|A06D+--`1DcNfb;bbe^Feuk15BLE6I=A;SEM@7lw_A|OcYJN(P*aEqUo+V#@; z%mt}6blJT{UD2|C2)aRkq!4{Ju@tMn3K3;QaN-D0@H7OyAUP-$bkdIJ@UYKTbEP^h zP$$Z-)O3DB6z|U<=#_YHJvP#Wb4yL$3deoI?{OC(bH%z(+BDaYyI>SiH8ZVy<3aPw ze@?Q&H@WWkK9nBp5z|O!=S>x1`&~5ip;l#7TT#2z25UF*A;lpfj?+Kd2YNSNeczLaY_wuV*1*b|UTb>6` zvSfV5xK@S@a%?H%rsJ97->`Hv^x_ z{Y2zS1lds4Y|VA(TfDa5g_Zn+eY@>z^1#pirQa}}ZH8qqUj&2A;i71nHzu-3ZebBfMQdOBK6CyTW}ujwXWqB18I~SFfnB@ zU=nIYd45@kiaB@5inks9Dha!(ChTNXbvY z*O+i8`F8(9hCM9X!kho{b&bY;EU3#Ue3>m_PYW&>g_Fq>3M$Q+asAupKR@tGm`P7% z&AY(K_#P}d4<1$CY_`t4?=vuhfACCZG*gnL&%A$oTiG}NXUaXMKeB{r@d*))>%{2W znQ5>?OEr58*PhO!uup|!GO)FYsn$o(U7M3V-52d~D7_g3Y(xcF*#b&~EN z`{MgcQQ8&Hs00c}D9inA!pX!3M8NS?(t4fak8wv%=b#zaf%|S{(v!?e^I#f6fk6p> zKy8R=kp|WxN%X)#8E}TMQal&J#rZE0h{!?(bE4zm3I{jOIqB;6D7C$av7CkU<(@S} zO0&VKR2$fJv&b8BJ%(#Eo?@`+j=gm7Y8(j=Norc=TGxn9WH=Elzpr*YfnQmLewRM{ z80;=GdSOKT@#2JnhuG`r{Xq~h-!#a_R3yl4TPB#u=39yPTRJQg4$^EYS8CQd3)F0J z&x8)W6e#JrCD{HITp}KG={0ThXVa_AT0jak?b6qWrm=%ayTR$Nvg#>K*PPtz2vYm- z7%mNST`JPW2ogiWQBTc{3GKguQ*^IV`!vn>;$mbs zJ3!*p609`0TSGr&92WHa!zO?#q!kp>jyFoYJnfF<+UT%R!@gH;cEdE*h(&11@)<+l zo>!hMfkZ8#YOE^yGu`Jn$9|?Z&j-7(1}8H5M~wsC0{^d+i!}CY}HB8qX?u zRWCXh)!Y+a(F&YR#hcgA4>CDTw0-FMfZ_K3T;b>p$j? zeL3$7TYh%8+k0?S^jAf6b0?i1U?kQN0ggrkq*PC@ZT^&j9^|#piSM6z-1)5Y5!L=> zo*Hw{j7m=2yWd?b;^z|+geGC()rXUJSXUG41x4 zg*`CsZP~1=p^>uTX;1MYcOs5vvF4mP-}A)Y`@;u`Ap!<% z#Dn62Q){Iux%I^)wDa$x#chZh&oSnQGkCDZ7g|CXcn*9&ndTQ{zc$sPHJ44IK26dW zHOS^?Y+1AJS(||5OKS!H`awa0IPsq|@PbWk{Y{jr1itjX63D}=J$4nPnAGlPiNpH( zeZoC-M#%(vve(inicE@DZOA5A_cWNj?T zR_BfW9s$2kYK$j~ZCFEsiokIgt^cI5c_R&MdJyaXy3tJxrWHb1lm*V`@$y1BSKRm=hc(Z4<^8zK)A9EP{itRSc=^Z&nwmFFUiTKwt_oP*J z@DukzZ4>f(p6nnqn?FAz-4HzSp6+KK%G?U40c;@ssAVTw&_{n>{Edfs0pcj~crN3o zg2_Stz)UuhQuC{*jFbnAjel>PMLWX6t#>GBNOJgeD(A;}kz}*h_|^!=lXs^(EVJmV zIUIc;I;5KMWZ&D@x#iWa1iWh$W=cp}GfdB;<|sd5=C8;2yKgLZ*)9@}A1OZTJ`k^EC=*f5MJX_tfoa}kX_oW#C~`B1&MX7X5X4V_4rK`%Rn;1 z=bj0Kn$tE{PQO4EwRH%Qz^~xXj59kb-birh`BA7}?NsUK9UjPsiQ-iZ!W(P%IiCN< z4u2`8;t?y~CK_qG>@f*3E>Hk9ea{x=*0^EsnLlxdrga2sgQJKl`2!<}smvxzy?-$% zr?GbS_#YbFd+N@DAE+_9;Hr-_P)XmPGc+he8E&^1Xmgt%x{$PfbLB^9vmRm8cHp{S zUbcxSCtnS_&tGOfpKTseq%nUb+ve_!V$j#=Mij7Hq~9?P3)7ViQKOba+4(cZqsv z1s6p`c8D4MLzb*|l6xmYxkw(-sfdKuE25tPaJ8?}J0SGpzgGfZP^c;@-6mSz&OU+jp2e7za<=UYLg;@OO2z|0Y>J zVf}H@CGq>R$62oR#J1+|-|!61^O$zq8>~Y+r5-v2l8*Tsxi8M4R>#3TpDIR<78w+F zfYPCa_ciqu!8pDJd7Zg!WWA3Q;ixVA+nnzlM$Na#9NfDown0{h&jK_&`<|a@SfnM4 z2>Lph56lXn6ge+3pSOwG`}pPpB~XpDvU$0_lemqOIS_2Fe^kqlh@u>DdU?4V(NL1$ zbI1K5k~fQl;tdFHeZCMIbOC%dCBbN=mmeZ}s;eQ!m}tES&GHyX(pi3z^)gxHGhX6- zk)vr+l5Yx%0`=KlQ2zwfhTY`|PyN|;~99I_i+PF6sWx57&oIz2whitSADd*Iu|^o8V& zxL=hix@dn|WBlhRo(xYArZ=1PNWJWkhLWg!HA%|X$wM!RDT*<67|YY+UWe_VTaU1O zP2ZeGB*7L8gg86W)S*Fq>i_>Yp8e7Z_}uyF(Te-Gvm#YNzd&&f%=@RG><)|gw< zeUttFOs>V=VD8_qRL}(^MUm^2zjyl0%p%ohZg7sz7acCS6YwTLX7i&fX49@8o7{)N zY3}ANJgZ4K+qxlpL0u8&czBye%s&A<$DgD5Ns?m+-@`z8Uq9TM!vHWj`UOu@EjHU$-jbH=?U#D_b2E}576a)p^T{aRV{MF30`jLaVjZZdt(Guw5 z41l3ZUm5dYB;dnWvfJFCvVlJUIy))3s;cbz z=Dfn|N^8N<#6E;&np-pvUKk!Td#rv8Ov2|r7C*+0OiN1^C>*OHL^6y5`>sSLS_4Da z0XDeAk0F4X+G+qD;^qXHZAj8)y*)dc&rfx%ZcbO%_ve;xs&$-yJRR$`2fo!0#$fCE zR;CHr+px&jG6-l}XLu5yKBEeZ+1HJy<+3kuEU+I>Kw|t1-=DoVxkAb5R@J6y&dJ?< zt2urng8N(Qh2dxP_duCYA&MBJPq2R|SuupI5EdD`c5@#9Wll7rVLdbz%U@K#0ZCot zcfzHm6wWC^2uT$`ErH*PavxlLi_1DfmMmn?HMGZz!cVf6gH;3z*^r>c&GQr+`^sWR zTU*Oh0LU+;Fb)-t>Ah~<&mxqfVS87M8r|B%SIwpgUr|SYr130O+YFT6ot7L7@lRA= ztJdm4camx>cV9Ni)tM>_LvEe{A_;W}>}VsMacrH>tf}_w*FEic;w%H5Z!dL#e`T;- zyMAffS2l&*0WZ_4zL~}QP`dK9ZAc24Op@@-f_%kHVS%?kdjwn_OG~|SO z4->drQ!KCv&gZ@_4_f7F)wZ2LB5|m2?KoA_hR3m&`4Qe2pU(z{P?Hysy>6~!-~0CR zB3d2Obsk={6&U2-eKbz2faa1cI>1V7sC0FvyR1bl2gF%Ub8re|Qg-=E$k=txk2dMypX!uFwPu4vlDIG>;j#QL z0Tqq#C{B8y`$ZxjtCngeVG2AEga$Lk2_A)OpaMvV=QUE(3)pzgTNNP<$xKF{cG9JC z?-jrh$|2JTf6m~5o zA5HyPkPWE?k(^!jBSj$XEg+z24k^`*_}sF%(GAEN;S~bO^7^nm-UwTPNil^~&TsTy zZuD4uX`XWh`o5mot^B@vw2S8#WmHMtTkT3d|7cZ#c_%tq<#U(GweX&84Y9 zo&ahp?ta}dKwOKaNR=U17Q!$C?guW+=>-3-=jMa%{ly@m9g$6unx|}ve4N!R_18dh zsqAyy&H;xEpH`W)Lum)t*Jp+dxJcpwJw5Q1!McPg9RSkcaw{UI5gE=zqzL$BiYL6w zl0onSU`QjC&O=qr26Vy;KrK!@$^xL{#_@Y5I#t{UL~}S`Lo@>T9WG>Omk8FuyLm3W zMb7;>oA_=muDb?>kAki?_zgfeBzyg@V)|(v07#s4&v7;2@Rs}UUe?Vue6|J;ENR5(qK)z`|)Wd zVTcB{lOm^+e7-{dZ#lIP8uyl8O3T5Jnrs`9@8jik)$58YC1DDUT^1d7ds36UH<&Nq znCR@V^nuX=IEW%h{r!JMD@EcbGqDf!zwM|asgBeTgb-%Q8@m) zkp;!5oHO_qyix;_}O#8mT%i|Rr4BWSOwAe+nzY<6E-(>~Gfv_G1 zR_@Bs;}Mf&)A4bN`Z>rs%O7wQmH75v!B5pCb*~PrN+9%|GYYpUXKhs1Vd}-#zWO1& z`#v0rgN)Qt-I!4(wqYZ)x-){~F#9$46e(RONb>tJ?_Pl<-_;n07T}qXj%mO-Gbvo= zwgGiu`!8c`Wpn-H3bhmaecJ{gaE7SepP&b9Lqyo4tEUbz7Ce}&v9@tb@N2I?0@#?t zp8<@{ua^$l-#^=Ued=vuH{f8(Giu5hM{AW%V*EbelS0o>FAM(;79q$b#s_$Pkn+k? znxg0jeVt-PjVTR7iJ_y8X_6$V9XwIDIdm&}#t=esr#%%%A*ZJmVd_3DlB~_>rX7XW z=XK}D;vy&@<;6Wta*g_xj)LqSgO9A&;Dg-~|?E37k9DNVT#Fpb@I}<#0p;V^pfeZlpW)gVo z%>G+%WRo6o0WhG?fa*QrxmTlzkG_*chV{9 z*w_~9xtF;w2R4kHlAPY$*{_%nk*6iQ5nlit(@(e3gvxhp)3n(Ez7Goa^N|yb&JlaG zYn)eoUyWhm@ZTF3JEA9BnfR1c;>`Oa^fi;Y5#T%zY1mFBf#i<}?tzo%^_D>sMw;iDDs`oo5L_1p<15na0X(OU z>%lKwY+Vw4DezmoQ4Y*{PO?K_nw$4lDViXe-YdWu8VEwmI>R-wmsq!f?d1=OZi@eU zdMmeMmhuWIs8lIe6|dVT$qI0&H5ho5sMpwdL^j1yBN-K%+JJRG8DVmQ5(gW_c ziwjZbq8VOzt1>?`?OR|MzR(RVZZ1dnRIX~gcxyhZ(I81UuOvK1^~{^5)zI%JJ9#sE zdp_M$&5O2K-A}Yny}9Y?xeL*?>6puPy|EGrdNH3=cH=r$Y@RS{zpXxB{IY~$4Jg>& z`JO0KHyvdICderHtIOJ%#S}^7<7l7j{FV=d*OgGSp~NZDE8Lfu$qitlfH^sf-~+0% zT^iZ%Oz1WBJ!`hAY*byVZ!h$3lcvOin&x}>3H2_P`!R*Am8+9exqDx$MSESsm z!{_)Z2k~bhGZ=<#IJ%#fBX)?CO8ujs$5*uFPc8&1cWuX2gk@Pg3UfbCEna>+k(my7 zf04|io#To{SZVV3metc1pUvs5SFMCkIoA->CdgZaVRJh4Wsfc!?EP@*Q(nC6Q%%Zy z@-i^QyT`G5KH)TGJ(Sp$KkNLsku_w!|FgYn^JqVL^2X}E6FZkv>qQlmTlMa8l(a!3 zu;Tm2r#E{gdrvD(TpQBc%{Vu&q24mgbyqfao?2_08;}WC1Ujn#zIZ+yKj??aj5BYf zsbwsb!M{dOnI?7InzA%*Vd_74%ojg(0>x8vru3=+*blMIG)vMvOMRk+c6WQ%bGwpa zQL?VbA#T^g;gW;zB;K{}qY2T?s;!`4;YT|PuH(~+rw(0#Q6+rzQ}e01n&`g7zF2CIB3w5ZIUfvW{aM z0i*w264!V!edD@FeS!L43d|>1#J~T%(2qA7l|ZEMD>VV(j!(K9sM@8Kd|R&4k~?ur zn!~wZo#s2}o8nHvvX=!qbT{tk%uX!4OmJHC`7&dbQL%B13XK!U0b~=iXwh@rA;QIt zs1ah_+|W$d8&eifKhYDfNAX4m#f|)@7js!_5g)i+kfO&mmInG}RM{Z9qA&*N)!w6I zy+;o2p*?+$Zf*D#Q4P3iTz`13(gs}42;$6iJ+*jjI6e8+RxTbNy~KRTjN3G`x_WHG zMmV8+-WccdZWOR@vU0Ag_o`;x#%iWnTu=>XX?W3nIeE@mZ!T_oD(uL5!U(N8_B|y$ zG|y-D=EB1I-T4V}rE%VObz$R1W(DD7??Fg;B{Nt7FvZBpTLFw?)kn?6v!wg-P$6$} z@Tf7?>dHCtI{4r=-7^lEj$Zdp=dvNTaV~s?q_LW7H%*sSwW$a`*ag>P)*=30=Q2`Z zsp$EkbuoH7t@B@QM&^p9(u2ih^F~&h&$o-Kikams`7+57?j2U>@nc4WN%?(iuQwvm>TY`<7{z1SrMSO>adC ze_oNDkV9Ydr3YJx>^e=(l%o8aOhZdke3Lkp$UM|7<%S6hB4H|JwHjuzjxoO)UD!^A za9^>W_ik7<1-rt*!L8W_M7n4lWoKH&Ofj(iq>z`F+pC!C$+t)npfAgS(n$omZ+?PwTzXsJW&1Et1!< zwAS8xkRwVj8yvLNEKdCWw|A$D-Cpt($p2SPe1z&Z^V{I;On3cmlYlh4h z89@vWzep_`RAiStc(gWK5=aR}{GUmDX$buT6&fj~#>cL9t;9QJdG61Rot1A)cfq z7RU)-Ou#TqkX-@x=-a5mR{DN1xo4MKc3bKQYa}OT(p7e3tmclk-Ef9OchYZY_sJGN zHfX;;p$n8Dep6^4i$WGHq`zjLZZR;On@H3%lzfz5e(o22e_04~6y>?#} zU<}`+>_eKE231pq{I>4UOZtgQdAf~G>yTjmOjO@Rw(nv(&(#1Z-XR<3&$9dWSsM!l zK}_GqItyav7y~VBfC`K*^jm?3g`ig%p|C1+H%_hkJRnOAEb$xd+Z4SK%dSuBhV}V{ z$VY}nYiB+!U8hT4L8WSHo5xM2uT?TXxQO?C4&AGCZC|d4tK+~U)&MpSI^(5WN(VY*ZuwK1^fOTRWx64c{;0bm{@PnR?S?KFL#Rn5#Q}{Ud4Cut7 zm+DOy_bDz;w6M8-HP0RPN9>}>cjRKfRWcf1}G8{vQaVe z>sTtSHE>nH7n!>NmqG=+e4~B*bnfdLs3f$2_RZ3{-b~Rq;DOa*>1wr_|LAe$iu7jR zCo8j!c=fbdVRem3(6~Bf(r{U^+H-tip=~d@o5>6Qx|czP2)F17MfJ!PY!^xG3{uv?Z>Vd*qhd&McyJ!I>S?Nd-?_l6G zq^bDj6~f#P>^v;8Z_Xv0pT=l_CYl-#Mijs9C|vK+5JRoJY&_Ds%lM7$P56Ubk)SR7 ztqAVN!6L~J{(xx+nF0p234!NfGoB@JtDw8C*FkQu2&NWjXPu+ac%%(Ro!w zH=hkh{nyvIE=D)M&l=0?hoBP>OE6$+$mPF)Age2nWR~^=UM2;H?L$zr+uw(idO2wS z&$e$#Jitd7jOdwyXL{fpPBJkjQGArk^eZ^Uv9lW61D#;|mx}cSB$#Bp_VFm(m<=nr z!WFlvbiiK~VZ(R_^fsfe3%o!knUZYwYGi(K8+!t6rBI+OEp@8X^u)rkLE?izO#JkNi?&9q^fEL%=(2kHw z@=fCtN~6aUyw*)4+{ocSn@ybq==8X+DbqZyG{HD zr_hH_df{^Fhx)z*PYsuy>RXbU-tMo*&g(2`nt~PU*8*H^i?azf#TSQkN+bvUnngN+ zf9BzFtyHj~Z{SgEnS@`t+c=kbuP)ZW8A)qFxTV+0C95|QzVUL$0gysquH*JM2=igK zQ$roDZ25Kg$0ZLksqno(RnG+AIhDs%?aWYxr?UNzPY1_9DkT~0eqXqCf-_*AYXD*w z>>6JA?+ZYOh?#EI$|c6q2&AmeK~k5hxiD~?RQdkA!YG6rG$DV*_Kt{!wIt=wkT@(A;j8$E4=@?CVAsXDANt$&zZumT{e=69kSP27CAzT=`zE3w!UuS-{2qm#&x)H#@F?##6in`RlP78ji=S9?6-}u3ynKNlM zF&cuHMDxz~ens7r_OE8TZV))&hqGg|1!FYGY`|Y@V|>_#%XZriWnz2gmm`<&4|=?e z_9eP0@tLQpb7VdtU>m!%Hg^st-XHKUEV~g|14bm(k>`n{9G_s5@|O6ko)y(ZsQGT@ zlqvx)1LFqfN#7^QpcBu%oEzT-(e|^_M#L_TL7AcexqDujp6oBb)zC zsGL{c&4g>0(}IuVS>-qbH7sxiYq&A+351Wu&x@)!BxW>W$>;oAY+fz;zsM{-@YRV} zP3f@wM}&LyG2FLcEJxy~*n|zg(=^J@e&ZU5rD{M(MDklkD2`W}88ATZ8z{$HdoP@u z)kXzL{d6TlOa8ts7Cvg$mGb&#R@O(c%1mEk`QM)Ju0(UkUMQmjs*7tIdQB=S5?X)Q z&h9$h$Q|IKw7IWh|L8_Lh*>d4Xq9sGmMFpWHZ2TlL5lG3)v&>{S`V68@tvLt` zWvAC0(ekEv0T$bchPW=^Tfs-3d4AqkkS{#-S?7eBxCy3~k__N|@nW50g~A5djpEpZ ze44}>sz=7YuS-2W3i16(pZ^?4toycG#D3vQ6%jPBp!fw!K8cq3IQbQ9672XrQ}r!v zcgOPhzwPqOl~i`ccVCBIYQAB3{LY;SjOEh>Mv5iX-H)T)5dun`_gTcUr#WYf)>)Or zB}2cY$)UTGXnR{bHH+e2G#RLUA50}Dez^6#qpndBcNt$`p-s2y`}QpBL$4RQel+>j z{$RH_%Gu^req#llcP`v9)CuzYO9%Ox9yKncG*KNy?NZ@2kIk4lRJ6^)I;|ViNc|~h zHNo#_tP$jJAvqOGSZ>pX0V>~k`IK8#JLwNgCk!tGlURvzBq4SYE|!m0ACGI;B^$SpVH7jL*)eYykgNXNZhE;q0 zeXkcHAg7-8ij<*tX}>wYvT@ZF{O{}viUMXhUrzrv*;bNoD`3gC(R`5 zb*KNA1ppW=59ZcS+W8+B>Oj8OKoKAb-@uQqju3(nY+|oDxuEZKb2Kl!hM%FxTnTvS zYJXaEt(N(7?;onm{0xX3{7ZmGR)KJ(sL@$*z=XKBZT|Q-q9+2nDKx*xLkv?hr6H zeHhpJ$3?MP5Vnm0BUMCb9W@Fj%4;AT${LZhiiL)eQpZn1-= z(5cHkN;8m^L~Sixa{@HyOuRK;{@PsJw@!L9BjE~IwO&dHfMW;QDXNz2fu?8xY#1;S z$+bc|CvXaf=oZpysYD*2bVnuNxMEQpq*=9Yt4K$C*wBlV1u$Na0PqQLgZ>s9x=|c!zifxIpixw;o8HsqyESr| z{aOcj_bZxRUhetP7cZ|NL{J=OgF`5=3L~YjP!xBs!y9DHG5g7DeRjTjD(_leCc9L^m?>A-3S?CcalRwjBYJwHsG$DVJCxlP z7#&W3p=~!Qx+nxQ`b>~4)kr+#{Vuw&kz=~b>Yjnlc~8dgs9)e)SkL=tKGkG&N&4eEQP~W zkj^~+4Y!})>??@*UX@QgJnOo_A7D9jD-jdysBu`BJtKFll)gg2oACY?YT(vd_26%& zySKol{mp%iK^yyD9M!f+$N^k>TvL?!A|699G+&d^SzAydRf?N!(LZ=crxt<8*;#x;fTouwNsjXYEV#%a zXdQ)(DVo6jR;uNcfD$Gg$2B_P(;BXYdBE5!C;tksqT{D13h%8*y$+imEWoDsz^Yt z=5ErI;#P|7jr2AaYnf0E*h`MCHsqX8?%$W;6!KAdo)7pr7useV;Gt7%I3UA$?>k>> z{o95U35~C;_eMr+ZA@7@i14%oVTl75g72>S$?86mUCnr@9DJ)I)-=F|3&Fhq+Wc{< z`$%2yH4Thoe#@hXLVibuj@RrAfD5Er=3JEREGxuL#1=ax)KY|*(f94E45`A+UdY}R zSjLdD>{W!+s1YBBhqa);lpv5BNN_GiU!|k{))VXKzga)0cQg^_FRQ-w%yd+C5H+LE zvn!%dF{KwVDxxqjrN?T*><%LfZ&Hk<=fJJy_!9+~Asve-H1tfuZp)a!W_McEVHeq@ zk|U{MjNw+0N(x#tdd>qGv5PU~0(GKETa(J|j4paTJ!Sf()UcCCb`aigTukmiUAM}p zP&0Z8zA73$XQct_%u<|=YV+vFB)bcv8&qs}2BXFd-y(|0)%qwd%~)guh7%$!S!Gf0 z&ZDiPaC!mYAHWAO9St;=>6E0Sp~^7Sx&GFxAyid42u_uv`>KSg`d6!~8tFbYEHG?8 zEzCU9!@#%kvu{z_f#q0xSoBBl3Xed<^oFlK%UN@kW>hxP=?*8th{QO~Z&sz~c$H)s zJ5Ih0kZ|=*xkJ0d)HA2m7QN+3_rI7_B$P}uOB1Y~mLM&rTF#I^5`0)$1WqYlrPvUK zZd@2aC8n!r0WscvIj>04OLUjxfb_VwY7v`2;uDi-pWIpgVv6^AXG3$XF6SN><-=Vq z6GH;GwW*eMlo#`=e6D>39b3D+&VzVM8nH(lP<*a~mkzEa{eX*O9@|0`To~g0gs#ps z*3{CF<%59v^e!nqd0EFnXPr^b`S9$+* zG&Mde$;vo)yj+b~Qjo87^OT0%UbIs4dhI`t+dhxs-OJ2&PS-+JCKZ9CP`3^~K{{6@ zMLi#-36ftgl%m=E>F=a0nt#hQERYwx+CMyNJ z$)0Lv(rNn)MY((PWXnm%$%$_)4AcO?9#{^3uYE+y+Wj&_ z@a?x94rjc18kBkd`2;F3Xfk271nZ$8iiNA9Z zRH3G~uvDzoFSViLTwVwywWcFl)`xs1tqci^GBM6~^PdYz?c=i}g zy^}9K`Pzlow{iEtct4QkO2K)EbhrBK9`}avVFlYdF*Nf?;b8@+0V%Q!JhgTO!IcOB zEDxWl3Q;A~ve38mIit9~W0t&>P$%=y&q1=R{`^QK8dXcj5_GajmxW|!TVbEf{vIF< zN}Na77?svtsp0x|xs9qs@J5K04DyNvuKbsePfcS_tiFYa&=3agKki_q#H9=uckZ&{Gj~3M(C}C`zYpBa$JV*KR@61nm^7sQiiC1PI_2pW8X>n` ziNrp$x(!SwE?qqP?@zzKP#A|fE$w?*8sEhiwe6PM=LYVF@cq2Gn-IP4dri!NiHc!qp+))*KhiyU82l;G; zCu8MU&2ws>SDe>5VxPqwkmLX&p=rd|ZC`DlWHyL~KUsr3FB+8asuTh$10Q(tkJ~x6 zRfvS>CEFYBe&C+Jxl{U`ih=!bt%!}ci1i*M5Z~7YJsu3)hX67-h5#xXtYv+n={c@b zOWRZ%f)vX|1SSL;gzXhxiutYLtOjr1SW2Bts~P}AV#=w5h-uNvOChg|FqX((=&-2n zKDu~zjc^)*B@M+4J%uAi&ymCzt@YO+FGEpY2@U%dd7n8Ei*nDyw64P3?(v>UKgK2| zMA?e;(+6$7M+z^LZIq2zX2}?8*jasaY7q+B07R3N&wJyx&K7@&Od1|>M*_6XrJ!av zlW2<{1YlxUJ`X1~Bf2%=e+Fa2t-73moKgH@(W4Ue3TfZNwyz5kRcj+`BiSzs(~`BX z9#7T!NQ{jVE+9Z86>gRjmSF6%{Dr-e~`K~>-DNDz`5K*eoB5_BJ4ZPbzG-IjaQT))@x40Pom!DdsXBQc`nupv$z zebF!eu@`V6#z&csDHJ8_(;Il+E5H6_`P9oe6U=k_x;MP<1t^Jl(kG3Gi+?4agR8yn znKCd>K_HjzOO{1P#uv5duG!#=>Fw3WcHI{*($=95ZU=T9X)7?=yXENrfeG{Q^LmohNK<)47j_F$lDJMq!@?h=5<#TCdW3i)ATtmHYf*&^}YC_ii7wJ;8C zrjg%Hpwfnd<%V}9#t6dyQbyy_^T!FLTXj(C^dq@M+AvL5-Z*7A@u=z~l?i)!)|W?p zGxp%>+F9d9d5+W5MfYJ#a`k&`Qqhzw`#$7YS5MNy7gjb$V5TPV#JeFH`N;o37CYa_ zesGLAykgBy$J@S!C*^5#=MTMvy!<5Z`J!8LMz@kt>v_l`6L<`FZw4Lx2@(BIzxEH^ zzZSEB1ObJec8_n(O(+Rvh0mJYluyY0;QaGcx}f^&qWmYhe$FY(W*%POdCr z2RRPNZIdTkzh3YP{*0!(j5 zXZ`9zmz+8rc^2Nx>Z7IV_sk?LMG6V4N=DrsCG6U~IpssIM@Y`B`3XcO{!J07`UxVW zMhJ~_0I;=Af@kn%8r!pmW@Gff!W{>N3?ygSOR!v6}JEv1a zeDN>LuyG9AC`eHHV(qr)73VFjR;-A9_b&wtK5-_yFg??;_iRN>jt;Tea|nHVc2Nww z4T$9{?TS&9{QLuzVHu8)U*&bSon7^_@`A83=i5TXitM~VPy{qS84F^=ojeeZ~6@CV~*;5_zM>v~W zZ%B~KDBjWYRF*4*5q;PI ziEAnD>jvT2lu_QnvbfZ{HhKE1kQr*!D-r0-hWq~c*6FS*9#{v%Y%S*Vr)C7ftriCD z%=M`6GV{X7s&zB5gLsp!S3}GGMa}i(#5njdCiXG3Gs@;X9^^ciDKLJ8pao&A;9W0{ zWZ12Yu>U7rVErjd8H++ASln#Gjnl{%iKMTCymmBZ93uJkKt9_Jz^o5x5Rdm}3M zH#0VRh})As_Quf^h-|2ybg-W}D?CAK^OV;;MLWnw$vw?_;=G6urmHYBapVIwoj|%D zJB)rbj6j9l|9{hbL5hgXG3g3sL?@p^S9Uc`=C`IG;4^ohYAAhM81!=GZ)i);Umb^p z65!`zvz6ER5huaF<)xDyXm@+o$ox)(rpTFMe}=&KW0|agEWa@x@dTg`4U$8-Ikn6da3-jD}`N}wdesEUnR3WNg-97syRZywk~+) z1%!BM9up}fUO>?}N4nO5p^7AT#Ix%{16w{a0d-Bt6;ZiNH6*nz*ftZm!tVYPfnHnR zv85U!G6apNxH=S9DUqLF1RyOr(l48tL;&*&{(qyt-a+Iu5jK<&SU z9&o{)LK^^^jYynAT?AASQ-ZK(#q)!rnCTeH-`yp?nhd!a1crSq-(bd+PZ|hM3|SMM zqFBKYMlcIu{6txZU$dz5r$Mi$ov!;kT0A60F)1)TC28cuJ`E*fL;1k`ZurX-!h7K#ScMJ z+{cq}7#Z)7DN#nSp7ag`Kd%Sg{~TjO61EGZi;Xe=&!ZMAJo{ZhubzkOg@ID)&#gaR zX-qh))NdPb%LTp8(&94C@aD z@i4W`C|xV0qDSukUjCaFz3Rdn$o)_{0G-~q#?a4Tk(N;OFcgNP?=AQdKYIos=1jm$*ZUd@6w$Dm02n?(}l-HVU{=uvTSk1%&gZ{uG8|PX6N_JXCPQujyNr zDv2vYLW(3JIqEBRa4oG;HR6t{?bV2Mp&bEiB3Nu)h67m;ad1 zGyH9I`tPr?e@e>jAsQn$iLsfwND`I`?-W4_*tdPwEeM5Hy-8^*5w~7%QcQO_4+l*$0N1WN3#Tg?1N~-~~HDj~oWw zu>Ry=d6^35u$J26M-t&*CfpP4B0f7<`Kh>^zp)o|mN z!7=EpeD(6btHgmYKj9DTqyN|eF_p!dV5}JR7aMUJRU)w*Pf9G5Qqf$=JqfxUzQD*{ z{J7~S9J+API*SS)OgWNPf0vQRcM*Cd1#;hg{H;~3Ey#X0I_#SfI$r^kpakOMWG1cK zFm{)=DCD=3p}IH?hMy?yO51JJXIXOB4tdtQSWYAJQbEi0mZ1b?HtuNiSskA4BsTc0EtZZ=zr``QddKF@Klq-gr;!;X*k&sckd9+TLs zv;KQ}uE|(g;F;V|KQlQr7+(Ux#$ky10bE}`0BOoS@v!-fH%=Uve#d{;;4BGBaKr*! zS-xZ|&_Hgl$oP+r&3yj%aheVF6UL)JU$&&Nt5sj$j}Zvbz)nlbqP*^(5jCaQ>mW>N z&89G38!EwI%61WS76>9`pXMxKjtv6mO7R1;(;e@*JLNm5=UHp9GcayN{2R!c7qMpl z2bEW~=00OseG<+3u0m*?_QxS!_x%7_+CG>h^0DYM^QAfFPf3*&#VXBT8w{6k?_L8! z66$V3_t_7`@5z@=yzKH>7$WBGtoKxkPj*?jZbd#*003nqG3l%2j-Q7!^0R#l^(yO8 zMN$q`k%t&F;Lk038Uy^|_kF@oWgO3vm*ci>srKhImpbtIP;u*p`M*!Q9AmXXjKNgD za1^Zi;2cl{Z+25|m65cJL-*BL_SAgs7jEAzAGtTrNqY(ec&XB?47~aQPBloX_V=J! zg~((j^^Jn3lBBT~24wP31RJlWrZ!fo1l8MLega~?+aw^wn3|Qg$+MfYb!z4tC8*Aw zWPBR*L%Heomout8vBS;L&H}tuaLWJf+{sw&R@pydw6Uq*n#%w4Pu~6o?Q>*?!$RLn z(tvLK=}r$1c9J51Lvl}fdo6!hAAIMyX1)b6VkXJb8AWwaj1O+8gSS((Aqvtt68cKh zDIl(}@O?a>+QPN7te?7($U=>R6&Z=l6{QZP>*zZ##|8Uvo)25@cg*}#5^q_knAX|`U0ND`K5j<)Ub9u;I% z6?;j_d0AS`mJIipckSUF1{>9xR1Ycj>MVE?sGY&K&S zt+xvC5330(+-#@f;vrEOjrz|+AZ5%b#!TTKRv$QX!8qiL=a|PcZyJ+kh^ACP-UX=# zFNChlJnNT|41Bl+KZ=<9;m4x;GWWOKp71?WvE*VyWha;{wI&CtO}&bv7;VIvJ$*zL z+^YV;QQKe)JI(t*nUqLgq~Nb}kK&_CR%L+g{XWc7w#8erZk`RQxsdR zXRBw>UE#dwTAY+v_382GHR}I)PN?7eyDLpK{n;wo5r(6azA=i~*%LPspNbrUtGU;= z6^&v+k3;xO!o|Pjk!>SC?PD{PjJR~W>E)8?b7c)G_WAv@U)T28I`z%3U zi+loW&Dm|T>(4B=F1)6> zPh4T_m(i_1@@qWTxa86zZ<++GmlI{P&joV2=Uu2yb-Jam8wx1&*L_QFWSbPn(vjHd zl%f9&PKU4loAUx9px~ZGk;&+$!7H^c3MhFTBN}y4WgMX~QBYa1&e-dbD z`CuZ^9IVeJyCU12(Ehw3z1*HypS9QmG$aWAH#|gloWUccw(-B#S@+C&7JKY#lPXI) z^6$}f-w(J-C~S-wJt8Ft{MMUaHF|P_Y&?|+a5L;TX=!xb>C#w%?x@hc*?tZ37ogCN z+MT*b0qA!`x&gEi@1>iS^LigOpFl>!WmEej-t19VlbI&EXoaO!I&_!X!VaQhl%Ns@ zx1@94J-F~ViQWPq>&~vH>W(Noxtrs}&3C_REGOF^Rl8gTP&yZtl)8NOIZkMA{=Fz7 zR?#F*I*X9|m_O`jyR@i|RG-F#Le#C9&fj+bKuFr`&gR4rUaPU-^lx z3pft;n|&AG|2q-xN%lw#J}%R9xsH=~aBlZzHUaBC1AIdfh^$6KkcSSsq330(@C5O0 z#(*+nK!Wd-Q%oTaIA8Ke+hM2`;(MNdl4r2b@QdVxZRu{PP_8N zNELi*{tfz!UeYBqM1=>vrM7W>s zc|x$hUnB!1ro_8avrxfHs#_GR;44Vf-tx+=p(F^NH>KA$_tP_7MHCdjNq0!aw8au# z^@C)*Vetd@qj>B!I#d$kp7>n1_VB2?@{hJE+5NvsDo}^~KdQbvp346JKSIas*xAmp z_o#$q9P8K|dq<>f%1Fp|>|+zM_ueEU*-EkSEs1#YU^`3tA9+|6@Q<>Bu-vkV?=#%(uP%<`ysocn zsu=f9G{)07qLLHdt<`^j?a+iRuVH0jB))fOe_K48-0uv!uahcdxnP12c(%q?ztm+w zStyop3bew|iRdqxH0xgu_`0b&!IdlZud=c>QJs;t*kUAnm366%vD%EXa#QQJt%D1| zP$;wf8+x%?dkayj?D5nc?>n}rhK;kbDPm@d;35`J_a}yTO(=UZPMF1a__56Lg?-zO z!fr|J_^vO|^JR}!$}NR&e=GKtmpKGl&^8H*g-4hN|E#+I$o6K*jlg(2 z+r0y9*zH#vk7Su!f+pehqtUw9vv+y)^d24Qi_aml0^CP$KUHn}#$_tDVC4yw$$W-Q z3kPqGd)EE6b0Zbs&I7$Oa<~wG6bCTJ*=+$5G$hm4-pt0q_)*!a4HIni_TuzrzhFON zrd13#-h6cmYz>88XRijZ6RDrd&4zY`-G=cGvHR`~;jA45|Z~YqWWrz>sVY$Y=bpqPL31FG>u(3b=JC zys-jN9HrCuI!`}U0tVcBo6`jm8o$9!Ku6DU_bc$_`HhWAGNPwH?-#s16Fzm{s%$2x z(kp@^oUYSVJ$Negb4$_u9>4!~4Ns;DyQ+y(stcNZH^tPMFu5}=)#c^f9Q0?QMg*g! z-uwp(na^6--Rhs$r|__rZLhdk?`bWq=y2*kOa=<1PLB&B%e4d7r&{{IKnV=c+7MG< z1u7(Cwy3psWG&V}2a=kWL$eM>zNXyxerrf?xRU(&L@A6X`abnCW+|M7Ebqo?Qv=%k z^;x|pr{qZ0G1e#w&d@8Eb8d6$ndTFN=AhusJ)197^;Yk&jjhEz0VKn zlSK`-ndW&bR6$NpSk^?7xny|Op%gq`YcV znb%!m&#UwIJS*>Hr9+oX)m8(8MgED#i*LEiizj{e!tyS#2~6N|PCnG0@D$lZbw_SU zs;l7FNm50@XrQByrLTY_f!PXhjeZW7Nqgs7^)7Qdf<3!Z9nD$CRO<*0w11so_+~Ss zIwQyXXX|>9jh&Xx@zmU*pqtJ~H4UY4F1KaLWxU ziC{#K=0VPko`k)5&p83mTUhMjBW~C_z~nn%YshNx(j#~FKHPNK0W8--lr>13BKdRt3>9Z%?Pvc*+oQ;MDF;-AYOax>oiQQUb3ij1E;>b^3Nk2vF{*Lg$4N=eB3{e_yE=ZdS6Z@iDXV_20RCoPGo zA+b0GmnKCFh!CoTf7R_zX>FsyPz|cYL{6i9~X+Q^IE; z@wJxxJ$%gsV|W9kBnCAiA09BVH32H17*tGNuw~3fL@R7V)6DzT(#@3L#O{Px-Dr2Lp}--k>LS^=alnrG?vetNjI zQ1fN)xCi)z^Evn1R0B%BOIrIt(X<`Mr0TP=?0vN+57lj1=u120>Za#ILY@ELr;!Uq z;qAaGTO02G1^48jvinNy_H->Mf&p$Kxmw9mGjdmWm0U!mJG1^WP#89j1~l-dNiP)J z;Tm>4r$9j}BYT>7E>bpR$c$ZqgkmFt>kSB(d}CZ;l*pL1k3S(!@-Uy};t9%o*#pX& z+}{d<;Boe{juDZgsRXSAhr(p_Q{n1meK2vIeCS%du|_V=j6tlv1zv_I^iFA5GO13f zS;`e{P*Y|&V^Kk&kA7= zGVgG-4u3S$#k`n~_w4`#d%+j{me1d0NS0@qmZ#7>F}n+QBib?LW>CD7a7FfFr8@Ec zy%2%LFv=j^2PHQtsm^ZLKcke~3nlEWPx}%2<8DMQ^LgLR209N8-!SPLZ#uk~*BZ}) zix2EROzvU~QaNEIPiKSSQ?aKl)4-`jcR5V^mZPU#9}gz=q;tUTwEWdA8?0;b-_PNp z2^b#aX<{!qoc@xGv92mM_*r>b@`8rGYQq-n<( zAY}qi_&CiSzlM=eOJ5gEsvWqf3*?CQ-CcX7s&CWU)aY}#sV%2FeddUGUx0KdE%)E^ zi1iE?U+2S`NKMr)WfVR*hEzxJX2i|1Yo0%%Bp)yl!n=L-#p0~iu6E*O3fuqgbD&v; z{Yi0oM}5OzY~~=~+VXYC3qYXRh_xVuzox+99NFfQ-j6UEi4IAN`S@efqsH7i_x=e^9`(j2|BzP%&qJz&9jWFa}6g zX!nf-W;=kE99%VamLQ^1WfhQR2z%oOP_jp1yJ_=5ywi|Cf#ZCONOyG>+p@;0)%!^C zbtBh$_YFBU{HSw1B|I?%#l~bz2_pO)BnHx)F3w3#)o9^%_gi!@uMf|LA}8a0c?2i3 z7gs+_pWMrfn+M5yLboS~_4&t+0JM}W@6dGd%mCb$ROj!aRY;QZYYuP3Bfb^+*fzJ$ z3{ZlCyLQ$Ek3QD~-e(D5x+~&7WfQG|Uuw{PLd%k*K0uXR-;~#(%8X7Kr10;w4ym`=K#wyR7Jys6Qs~v`@Ru7;jyGB4 zV!A&66%t@$SR%Tt6<7(;y+-B6|N0{$WCPBkL_axit-pEc+@q@KjpX6^$(Kh+u0Igx z#^98G0`Mg1tlK3Fh80HSz_?TAYSu&*M8gx36G~n;Lh%zOjnI4pB#g*0L-4R#C2b-^ zE;cNoc6&a-zQ-0)i3+AF2X#}VdYIX!30NLRu;!nKNMHuN26XYLsGW2jHErd}*j@vj z=Cj(UZi2xGv#q<3L2kNIKJphb7aLmFX)|IQDVN$;d1N5M-1p+&N$Y-q?#)yzB$1Lw zf0O6G&zv6iO>z6o{0V7?k~x2Y99Q1k8TMTc>e+I}%?x|0r%^V79f;VhBd<$+7}*|O z&rukI^UT?|8#1?ch%I1Idx}mYYNq^@j1$$H@ZU>9)+8Apzbs6o?#4G2Rxe+-_`r!* z@7?B0Odfs5V3%qJ?(Y|JPqNXHIQaqg3MxmOp70Mq}xQ0yjA5nah1(fE><>+`p?*Vh% zvG&rT&xH=-wB{)9_2p0HC=}8lQp;Xt11KekG)fg#m|zu-PnifBQ9gS=Md7_GOd*Op zD`7d<$fMjlqsQzCZ79)*AAy@3E^4};Mw8U9n)&2b$`RwevyPnLsZUjbf1+!)P9KUpq?s+94*Bk*-sx{`$ca^Pk83ibdwt=6p_ zRK=Il1&%Q{xV5m!YJdbP#ePvFzLtTbbDNe6t9diJOXWQ{Sr$vx%It(P_6cM17EWvZ zk#Q*vlQEoe;;=juEMpYz<@Sj(g;qYCf0*RBM*JBMytWlYa4eV^xT8|E5J%H z7hE#~6w;VFRMaYI8M8k8Y-ZFlwjhomDP;4ue10c=OaqV*YB zK+zxTr~M0%@CIvlXr@jm5`&=~L6|a?L4dBs4Lu zTObWTS~RXIYUs|1!+!wW+86u!kqKfz~p||1i(73D#wUn~>Uh7}V*LP)sY6I$dWwZ0#=3 z@J+efv_{!?*;!@&Kyk<;ksV}Tn;n0CF*C74e`@*@3*B zgBXl6B&#+%ZI zhkqKxUSxFmLONj!2nig>$g5KTK_VQZLqQ3QmLqaKR1||Q$UK3j8EA_92PQ&zV?<_p zbqJ%pHIRtF60^33WLqpv{Jq;LBlcyPc1Id=9%6d|!$-12=G58vX$N_X9=u&DVno`A zL<{K9cF3legc8#g`SWYZ0HgDx!E>M-jIl@1qX10149*7H(<_0@pflUU=gkgB7zPAO zX=M!SI`|v1UH#P)8T6Y}&)lm~u5|6O$Ec>wn^wO8XCTyK*anquuJtZ|8`--+*l)D)rH9fk5qz!0l(X zrIz|W&0F7H{{8J@A2LalEJecyXxjSU$($XR8kZ^?83_a^wc6@3tm0GHtPLes#`&(Y z$@M|*ooNgt0C5(*1iEl4Ud4U!-os1*2!dbPQ+h4Kc2MuRW8FJ{X^J3fIXvPkUC)%& zR~a-qhqcr^kFFOafEAE(>)qWVAXmcKAY`pu!fq%G{K!k15N%D%u?sY=O`HMUoyLNbmynaOac+L$@CxRqU=523_Y4 z?!+WlD=?KZc*O84_L(rpxfX9KX7VP+jBiQ{Bbg4L@dwK+g-`~~G}$bKxuu+Z;2P&6 z|MtwcZy4_e?zqCW?jvAPG6y$NZ77m{vLiMgRvu=LIvc-wn#c+1shaHk;jlV*uVikr zRhyxmH#e#g-EJrpA;5W8J!$_zqUrWgJN0$NTs|9uId#2HUMyeKTL0rAfVt1;9KBZP(o#>LR4oi$L)pQxtJ7Ie`Cb{~pv&u)QhJNWc-BZSZj%ZU#Vx;$xN9BwYt);4WPnAuCc8 zuQd>JVT@K#t*b0OpKueKc)J|Q2czb`b1nJ1FGT~s@^Bfjdj4Qd?!{GlqCdPKVtq{rA*3=s+Fb&K*PH-NZr)zV^J%l;Y@=N3+pSesVB3i^31SG;vO9 zjaB`-!vJP_Cfrv(s>w>J=HLGt?fCuiP{eRfJZ>l`JyKgX7T_GMYDK8(Y8C^f+@~m( z^!4Gmk!C%N%S3L074tyXytRb0&?%yIHee9ry$?AC=)fr;ut=I*t)1LcFBi_VDiTCM zr}jW?Uya&+T&UKRJeeaRT!lL7tNeDvVx+eq;Zo4{%hh;g>%qH|HEYMnE)_T&VpIF} z6o?vP@K~ykLGia57=)x1@DKE2`x#-(NZYMRO|yN))Au1TFeD6KkLTAxF&F>o6%eV! zVs(?7M<7sH>|h?&V=vDsULxS996=aoao(mon`B8cQAoWXzUM`H)J3XU_<(BCuC`Fu z|MY@|TwBOhA#ZarL}1jj*T3jBP#M0+=$CREl@8>o{Lmkn%-+`!3aQj)_chCOq~smY zy?E0xdOA|xdHF=+_-f(>Bz3lM=f|Taw0*hv^efiaKEoy$ueI!hkfqXz$&yS>O3aSZD(?V)M8`@hRFN*1kGKgU-k8v+ME58mUqY^_ndy7rOv_d2 zZa#dysK%rll%YV;bPY$fx9XEl?`uYizJR#RC?YH3X4sVd^)tTB7fyyC+(iB}<3rmO zS?QIp32_d`x-M1kt0!7(2)g;xzL6KV^wj>;OGQKQv3b!JP=hT@ZdgP~)*1jmZnbf#OuT3X4Q6w<5&hPdqaCGW)vT2ka zqdpCAtr<2?d=R^10%SA=DZoxnVzMD-Rza?gi=zXr18{L;=h$sURtTpC|K6d_T1lpvi%;iO+6 zQ-6V2ehN#LkA^2msR<^Q-Qv$4WStX+$79kZcD&FrKlolo4wsQv(XxSWb7kPu1`0(v)Y-cZYjd_fu)Y-JaHGN-tC^zWnL zh3a<(1G*UQybq70q#1Bwhd?u}(}NO(G;pPZQ(O?AgMo$uYq|DR;p=c8x1d8e-}g57 z*1GLmwBQD`MD|`nTt|xJ*ARl__M@CD*>7cP>nce#u`C{AaX;kM2Jk6mIbrHFwdSVs zGj%caUyp}|cl)e9mkg(#{xt?-7jYX_;Gx<~$eYCfol9~-w~`p1vjFDO+&ocnSKWguXEy=Tg|-?1;4;WtylUTN^bL9rQ)?)d_F)+B%gza(Hbx zwZHDf(|V*e={=Rt{J^aiol!)i1Q}uYj|?itiQ6o&ChUNa%WYr?AExE?;6kJay|#=P z=bNf^*Jc+ig%OB%zWJ8-(FMpC4gN*dpQcK88kMFXr~#Xu%~2m>WiIiN>wz|-Ui2C+ z$!hS4@!*mfSnRA-JsRD4b;5daLL;T_yA7m5Y$Vtt@$7KIU` z>jh<9w$_=3=`+hci;4DghSQ4ee+3DDexX0`0c`1|98VlJ$pko<(?C27!(b8+PQEt$D$uwV9!gJgV_%5WoE+ zn@2B$2%$jmfm`UF(kQfno5tLZg5v(<^}Kmb-Ch6VwtQfY^=<$OiF-?=Ku>_1Huege zpd{w{y7KaV^7spk;sT^UzB~OpO(4Q+Q||s+>bV)5Y)6m8RDnB4H$!pE{-)SRzI7j= zi-M+{YNd~uTp(z({jgBTk7s#iQ;XYO16N|<1!AyxIw5t7I*4vRGDmTWs1e$SAz*L> zA8mSpKW6|L5PV|7j&D=IoO-($JG=RgqLqWT|IHOfU$LWd%ho-$zl6;w7cRljfbg(g zufp9KWp3!WClKN~6|c-pcYRMT$qN}G3fdj7Q1ocw7!1t`jo{d&NWhnS^h{W_O^#Aj z$xlZuGEk*70`&UMc2t(IZ1-98H@PvWL42k3+(cv<72WE6_Q%B`-U<^m-HEVd3=N_5 z(=|ha5Gnk-42?Xt%?0N76_!iy?P$GJkYt-fwJoQ`wn=RUR71xmVhoG+Avw~g_Z!PJ z`qUAQZ#Rl63zOd(u;l(EZ%mNfH)c{%%vF0D>rG4c9xsMajQ9Days@Sq-_7-8l>jTC zkG1Yo4I{qw`>R}Wy{pDYULQHoga+OJbwX=M|0|8Lk8z6(5>`*-!T z@c-xsfgc$u&UJq^JAriSo0EOQ1el~K82wDc@YOu1uE=2^n26+QnA9GOOx>Z3vSGlr zr!SJ4FUXgQ_PNRj(M$hY*pbefvFZc$5~P3i-0uGNs_sNSlHEqYnjkkMv%vrGeqrgY z24uE#Prv-P z2~~SSBF>ZVx!UZOyam9f#=owAmj3NXMN7B-dVY_ELvTEkHu_Ws*YfK_n;l>XXV^XO zIo{0Yc4LZbSmQdEd+_uaw3vN$zD{1Ba)q>1OWt9!Dxa57Ufg|$p~ri91Vy%g{+?Ln zP(R35)dHR9aQqE0xlB#_Zso)c*ApqxT@8!32}E2Xh{gSG zlaz~MGzR?n!RJkPZhg;ZttJ=yfD%~g%XuS%Q1;d9VH*RLhUBdB4U}Tz0FWkS%N7H2 zAzZK~egB1_X9NCvqHddV7%bZ7hOIKcHjM3thJ!?QN^Q(6+S@jasK(v@JUHozYL zh*H)@A|YiF^BG1m75Dk7Km6LFY!91ss$SwfkO>6rc)6bCqklr||FHY|tg1v+M+5$9 zQL6Mpkp((UQV|KY@2#Fxc5uG&%zIIaLm+KB z*1H6RbN$?(60;9{mW0%A)|ygWbe|b_yX0p+)!4xk5dDI9eCrq{D=79giS9HqBsWu@ z;*>>GOpPLn(n=zOxB44YW>apxizc^4+g0D0*F<4wZt?PnJ7JVyz-U$P1*D+|t@s4e zk`>&;q%X0Se__HKCoWG?p0Vq@eqWBNt;n#bRay4_sNs{_%kMF6iBd?vMDp%{v`lT8 z&DdS-N9DSgy;_-ZNNUYsp?x;qDL}%ScL*?w%IA+EhUzsChxiZqnRws&llHr-D}dxe z^07d4cF^T?=cR{P>gNa@2T>Twb3Rga@c-p;dLsbfB&yz`UhglQ@&NjfZe!BWa$XXU zZ9dZ0Y~&)_B;?8jZyH~>9%}ce#9wu4o}ZFDntAjnAOEHE%#sP?j5K2+ehz<5qS zEtHg5ZxYV$bORFAh2XT!M>de7uiumcukz?iB7F1+H=@qvV+p;wTzG_g$r{xc4&w(l z1;@`>11dsaWz{G?hd$tO3{@97%@;fz6eE+LIqN)$_fj?R>l`FkH5qbB^yr>1aaa}T zely1Oo{x0ADwB`pG{P$Cq5swIMV5?Si*(Xn_sFe9j_O2i`I7Stg!d{%LkvPB zT7HeGJ-X-96OG~B0-WR%SmH9cCrfELlmY{&6&(Cdhb_r)=MYDEPeK^7VL#pgK|gW> z5jyvIuTaOqH>GFZ0(n8uZJfItEEATDK0k8F8K_01%C+`$^Yb6w6#Z z4FR7FYWY*_LBXGFEyon$3UKENf6K3Vfpv@(_Pq5`ocX^erEy4Kq` zjvnZRD#&u{k{_s0M#BzRYiO5kG@3F0!{xfHj=f$@OJZjVtR|#Kbo;f+hd4}OhtK(bKbsz|zF_y49G~Ai&2dI4ST@{(Gnu|F z?R7Pn(Ao!4s1t3nsm2thkR|{z-u@_Pv6sNhyyjTY>v*a$wmZPYYpkk$0*VN{10z7V zA<#SrcPIkU2{l0KD2Qu^#$w=h0bSjM`}0U;wQ(NyMu$aOsMqM*?~nA-snQqXA7UEu zE~?soM!v0kXEBqYL-{-yow?plfpZw7plE_9M?Y70TqhNqVbvS8rrS#WrF`b@G}0C_ zqqle-5kzgZd1=+%)fYEl;gl?sq)k-SzZ-UVV_Nar@!0oSS1eB_2y`hMwaXb-WjC0- zFMx1J|8AhQRqH-VmQKk2Z&f%|0_dgFs=#;t{~TvsnUIIqG(|jsU~2q9F6N3DTWq)9 z6d>eazJ4SprYkx_-M@FXRqbL9B(dZ^tn;1hGHJo%y)QyeioD$z1fomIQAB1vA5*FQ zm>EP`E6qg`c-(JRX247LcuD$4cF=jIgzt4 zaS0_PsE4YXXro$#%{Y9Y@-QfK#id(gAocB+L$D{P(+f!ak~hycWW=;Afw@CEF4ILP zIUgZS4$HJe55+{~aIFM6n8%I-<|Zf0zhiCy-GYZ`GgN*x~rn$E4c5U^5^HoIgWJ zn^eAv^$E#TybQqtHuk)y?(Quih9Px^pDVmVqBzDp5JfO)ln4aZ>#JM8cAy0?Nyy+J9(>6|CD2(Jd;f$86E-B zu~8RQ71q=CR{(QdZ32Ammf+IrR~K91VoZZ!AM4F57IWusj)#*tX+IXGz?;ZbAqHTm z-{BD?OAZwYIZ}#fL;>`Q*tlE(YId3>^-GQF>zWZI2&88t*qJ+FjySDGalX2ke)WK2 z&fN0N8QhaLFa3(mp1gsxF)8qEDzD6BJzJpk?Y|oj&;}3$+=y&}BLDs+O#zRjf(2-X zn<}%H=j};(e+Rk-cyoyrv-Sw`wMFPwt2!^w$QQcm4DES>_to>2uaU=k^HqPbv~qVZ z=#W3k&;#$*sk6^;4`a50tJ9O!I@xDR2}aeLBIUq!gW3CG&yUQ^vCqzg&}bQuS3nZl zI!XXW&OL!#TR=;SS~4iBGI>ns)JD&*MxW&Svv3a5(q#g<}WJ+CAl02okTEXzT{Ja6tl2E0Y<0jed~zbPlG=89sYL zS+&K%LV1f*=xZvfj??ZJ7u9>1sgTZXbH^hQ1tROb&>6_`-U2GB$ zLe@q#HGx%|H42-_kH67&9zmhQeVzTam%T}@`U!Qmj`Ni+DVxI|#{4E*^>^4>Y)iiN zgjP-buAPZEXfJ!rWuuMz@hREe3V)v7-zFS0(Tu#3(HIy8E$+UUsq9|a|L>MSfI zK(D|FYU)Y7``zY4Q;GpXmoWI1T6jDGU}bRADtn}WOv}gQi06l75(hbT2rK2W>Yd&V znz|1SAxe3>Q|aR8aJ~??sDp?22_ek}{@t#dxxcEs+WD*>AIYDL&Dw zuNh86!mQv!y@sf2W-ae+3KAnF; z{>^3c^M!(W8w>fKF zX8$vwW7*MHvcS#vE9CDwtr&QXI`NZsNqp5i%+M5ou-;Ng;Yi0Oz4RO?sFK1)=#%Ix zR3-Kp7tFy?1AWDBd^T-^Jh6$6FsG)krOH5o$p|a20C7>6ATS-Q3r;M?GpF#f$pU(~ z960kndvewg&C;~P@&~Gb6kTGsfDpN)jE_c$-WmnRDU||?gRG>L2DgHyn2CqMtc_0* zI%aGw?O?UN0-F#Xb(C7ZVB41NFT`$SsM@TloZU2|8$0-pQzDwQuKMD18=c_)T|Y>7HWD1ACSPma98U z?>X587!D<*GuZ3M=@L?$#6IfMT)4|zd~rpt&;Iu*F3VvhD7vY}34b0|5e-hTxoX$2 zU|WU9=~`<4V{Wk*#?*2zEjx5P)3f*No4>2;P9S#aYXML2zJ0buO1H({rQlDsJUIxw%3Py`nsQVaR| z>b(CIUhCtaAPVyD1J?UByrW|;%q$O#cvTGTV8ytETJUxL^R$dZYXXB!hYkW7cgNxQA0kq zqChqLjHoJRS)Su$5(CU;Cr+|6n1+8rnuqpS2Ex8vn)n}|+_G$!`%-urM;fiAuNx3S z`jcpRWNhr>c(Jn)*!dN_gY$$pi+cvBY+~1b0lzVO?q{SvI~`8nvYxu)y#^)&TGF3( zYVZI+xXvcFN`BpKSfJVL7oLDRICcQM3PFUqU>sISFh!2yA^aP6_w7_=Y@!H3yt=&4 zZ6K3hS$B%29X?OG4L$k5X{E`3ub4h8i}VeF5#FioQ%CnpBXtT z8m|qSh8>;7piR9g67@L^Ln2?CEQRNjZn0=s5aO)(6q4Rt(HdDx zdaZ1pAlvvsJ)1CzT$C^P3M>*gisjjg9!m%$ghkRAV*oQXzRriv_?NGRMVvH>^8{Lfl1&6vtF^(s=bGP=Ip)2=1j4YToua*$emv?rUw4DHVnNPwGNY6Y*&4$J`d>~^W9PTn=$8!;&gbEhS~Z4 z4^5YaJI;{bT~HJtq5^qing!q0WgyVfm-lW@6d~S_mN=Gi$Q?VM>6fNa{phdyx!ADE zjE+6R7nOKZzwB3`S}FngxRyntFMlRV6}erPo9Ie6;Oz7cWr&0>-2hKvYYg~zXw;t1 z)Z9Ezff-m(4RO+9ljG8CV0t>RNJ4eec0dcwPw(!V;Ac(YxRUDoIVvvQJ5RN-tVV7FdU*0ih3HZy@}&Q4-~VT$ zt@YuXmp=(J;_CrHmzO$8ZDK-NOz5^Eg3nTBIxR#}gW(@Q{Q=fs&$HlyrmyPVB3lz8ab){VfYYO`YQvF5bg2ZVzg3sl^ zh+fjMl(2)?{p?CowkF5NAz+H-K$(uctNn+`bpEaa!~FuoVG`Tny#-+Rq+#HgaduE? z{Azv`93cM&iy$s7&Q3=!(sJ$-ZDz$m92?dAn7R!>I6Z})Php4wlKu_(L;Tp|)(hW6 z6QoHX-9dXFtdL^us>>`%8F|M93cgQy&zGK;i=l+MY9|%oYs&z zf9+=wu^^H(0d*_M4xmOHoEp9;`3Y@QS>o~7K8ynXH$;tOqK|=WRVOCbE*v>}n!F-^ zi%S`CTn)U+w}oM!04rLO-M#tex*;pF-f4QWXjY2&MP!G1J0yULVnvrkOCQWgC%ooyt`UVK_4Yyb>msqURTNZ(4yo1ndi+2a%q-ue@O|>WWNG(dibF|Ajt&W3r z;Aa|WLx`d)5MatwXFy_qA;MBz0U-IUKiffbI`K`t1V?k*+S^`4{`Q#ci*36vKN}hW zkxu(o0X`J%r+8mqy2pK?d3~7e;O*a!DHtl8Ht}S%eC@@*Lx?O3h&7|c-Wkvb{^>~z z*2*aL1zZ=gEvN};0_zX#!Edpu1#3CzqY@;zbJ_QM-H;V(XXh)77s_z^F#H;R>nPas zxn-lhzPb&oHn#&d`e+J3W|_+Bb>ew0jGJp>!^o4RF#11&0R-&c8Gfw{xhb55om%GN zY`T-#O|}_G?d^J{LY)_YcCB(1D=k z@3dNkr?@god6AZUHi0qwQIFxxQ&G)XDsquF60Ru+wCZYgS%nw8g6$#KK&BEO7J=TO!*p5(98mGCCEPrd$-bwB-~NvH||kCd;V$$ z8p^+4#HuiXe!376)KQ8u8sM?FU0ek8tCfIu;r6n=?$4OASN6@Xp%}pM@YLL`ms(D2 z<0fzu&Z2>7BdCOlT4l^c$p>NNW*LK}C$fKUG&s``v9@yWoe zj-SjM_0P_~dxsQuY>o{%~68oB8BTzP}1I}%4V%x7P^^LDZm+Y~_*g=*(o@;ir zd1#w&*owl{-R{cC2C09Z&O;F>is=7H%4xW92@rY#SU<$pW_dMPH=zn_(mo&DX22kP zB>7|upWp@EJBanRV3ymKkDOD>qlw%4Q$X+`jhej^ydE$=gpN71WELMd7MSV0exSGG z8!N65%leQ%bUF-mJKyo%c);@wh7y$1EhGu`+yS5&Ar;lL#Cf@ktOWK*E)bOR0Vvr$#UKYmvH3tDkJ*}BGX7v1Njb-X z>384n>elPsYJzhnii2`EsI8)?8ut_Y%TN~Q^b3HU#TsR@iL05P`AaSuk~$;X&IeiL z!RKKRP!SRVYV5xD%(z-)*lFS7`YJ32B&``z)dI_uMM+(6i;^dz0OQ4L($D5}+IDrC zezbJoFL;y%o7w$q7k%BEkB!bef>iBJ`u#_mq@BO=54d$@R-u2&__Mh2<)4yOEb_yTeFv$y>i(Jk9A~3axS1?u*)iUa zKff_I2dX-973xWz0XScYrqq6qeX_p6(+s0!VMGc>OW?!+WLsb@R4=e$v57yCGNau6f&Pjp7uxb1uFQo*mz2fwBek01>~0 zfRy2ohOgtG#~zdY?dL_YZ#~_!Pv`#bo26KyP4}SnyZN7EZ}b4<>Rn<|av|9MYi#rK z_wG$Dr2Bf>qgdcQCOv#4q5Wu{z4-IUWF`!+CM(Ny*3qWo*>|r))^D?}V-Fad9_HZ* zz+~UZ_2k=0!r7O+wXQLz{HC!E3`#8fw3u>RH?T=9=pE1pK8bBcS(UWSEF4XlxJiEr z;y%7u1E7}=4U9?UAf_2;&CI1SPpea<~VXP-kWS4~OkU+0gSAtWS9LTXM#nM~cBO%Yz%w5&?v`&<6mZ$^g-s zkYnk7@z?xk)B4kelN>UC&3?BS0pOY`(=N;F^`BqgXV$LM+TRj&b|aY*@li>M${WyW zey&LQNx}H{fe8*j8^$%GKJ|$kV8;E<#K=?8Vm}@aT6`Ri6W&d4u1ON#M0B+G`Y4q6DTK5csz%N7QAGrT5?szE^eM zcrfGM0zcUHl*mUqxcv?9;hqnIF ze`}B?0Mj;6{=zi>9n>h50#y6HYXgM?28Y1D80rHE&XH`fB}Xji7{sjLRi&1tonD?)rThYMtjL@wA8Ein5^th#ElsqU8n@+?E6MQhkn%$r_`U7) z7pHh=VucquTI4nCS@O?-gszZhALQhR&>#T4p7U@nnc{XO13W03vG;$1*2zU+^T8F3 zj2XIVwitlxtfaa@1%ERyxm7`lH|^bcV2Zc&we{JDjnweO$R@Hphj$Lg zCGAOhX3F0njqy>GJi%IbD0Qqee;MP4^;#*eXs9?Dl@%L z^==kWw}0sP-T7|v3@4*UOv2)7@8u3v{FdSiQ;;c+M7dK63BIMDMeOAIYpBB{z8v3= zKkeOWXB>D9tdP8^nZlo&4#CoV5)~7QDgR`E2PCvZh2E&%w zfUtnIuB5CP;L^1{W4%;58~HsK-o226pE3nC@hJPfJNvbH3?fj#;o#t)pG=kh{!A^F zyg@IMvX_()&huR=PUh149UFsR*vU=-%Q0_C%eQD?Fd~f&8 zKcis|6CMdQW90IkMjtXwO6w;}-Jz4f$#ZM?ngDGpJdog9h}FEvjlQ{wu=&ILSDmP4&TWm*`|3KEG5XES_r6X zYmVhJUaNZWpFSYM;@^tDnSw6kq=)YXXG0ud^fwtbjU(s%Ht^a^}zYGgK?3b~*k$w+IL@ zP)95!EB`SD3A=(^W%<|ZB?uByObU|`diBjDI|06*?oKNRN~kfD7xyzhQ(q-pOL~r2 zZFuHfW_G%SVi*s{EMx(}wv&KNWZdA*C(X^oPA5{F5zz$H=2(JZY?;)X%A|y4*#(*P z_%;L0N%j;aS^gJ?L`ZXXp2CT?OK` zEZfg1s`XwIo{q`uu(il&>Jk($?u&fC-x|eDwbZNN9_+jbtVV{4oA|4M@vHGQYlBIt zF7s`1&=Z$!Jfz46$Yv%*G8*bXE(B>XcR4oGQ{onwd*r_QpW~1XPh2w9%w6R!aR1J5%QLs`6DddL-+6hoy$J|Yh+#11x7gT$ zBzDlZc=U;4oAI@>(BfJmq+f=5CEUx1Yq3flZlUl{-`55Sl zHhHV=((Sw))HWfw(6q<%y9B~OH1L{#J$S|`aEYSZvAhE+4=@5uKaOJlwF9(j@a z=P@>fUc>$M|A_j^uqeMTTEIaE6r@D~=`KMTy1N@`kZzO?X^?IhT3Wh8X+)(<1O%j% z9tmmbdtQJ4d+#?NAHm^pV(+!qUJDAW$I0Nk7xCn51*ucom0%5DfRb^tX-7MS8cZ(c zzMFl*`z&cxbj$1$x4gwSfZplufNG{rNrWP^SUCUs#*nNK=C>Sl=a&`KsH-{J-PMzk zMJo&HuKmP^$@mLn>noP{Zv{ARl+R}@dNQ8I?Ct|Dt20H|jCix^uE`AT@a*%ULQDz; zF)VQf941+Inhgj_np2V7PviN!X%cZ!|B$fl3bcDLd3##faoCZ3K1*t%z7h_H0 zC(i}^F>UmO}3nVQ8e1;xT|bgj8hXJ%|insCrUxjo-{e3` zwl(FJxG2A??@Dr_8yK@AHG}!z3YDwjsLcBrD7U-R2(+Q$ECz0G?;C!Q8t+pTB6 z|F@8jWyRun~}a)|zu~G}W+KPOylr z-2gl|=^1BhvwTA+s&}Pnl-}Kfy8DxGk@BFs3oB;A^T$S4JBLz#zJsqHe0{juf!|--Anf59X3w`~Nqb5*&RsR>;YvSlv}E zU9GgA!WV$DS6^EA2>Nx&@Iis#o`3y|5Aq`sADtTznu-lTpY6WRH%b-qC3kSVvlW{D zOXamn<(2r^VV0$aEBEp6s;3+7#Tcd zb+=ZJYk#|8Ohy>b=zItHNYx@~#vfbXRwPk1+L3Rg~h)XqFx2 zCgr&G{7c-HNx!NqnSu)l=gucTS!dI#aLC&q7}^`!lQC|q?wU3E_*9BLi%pHek#K59 z_sfTNPW!kA*^@1B(cYo7nR>oit>#|^ojw7ldWClzv8=(z6#)x%q_*Mb z?MDn9K*QBgzZPE{>hca64HM7P>&;T>NpWIfUm(9Taj^9z`oSG)Ij*8u+vjt4!_zSc z`TJYQEB@lpL=&V)iny*1qkut9%zT*n6-U>F3GB##EXn3P{!uFJGew&53izgCFNrIm?Y-69VyP0jG>synzK4o49R*1CC>@kR z5MCKEOm_8C)1&^^y`C7Oa!UtoE~BlBQ?Xa>{@)*ttv|ZYypZ@+ZmnQE?UVj_hS^e9 zxwFw}&c^@tTxt3K_iB2D|1IIIaI_LrPNOT|`ckG~FA^w<$0R4k6uG}~fxC&1Oz}<=nxFc|iekJ4n1b-82xHzHE z-&aQIexQUWer`kU+k~mu!92Octr}T+gYt|69qw$M0S^A?0^5TMPe4l9F*P>Kx6UW8pnFXb2$avx7LTqs=i!D(YUmvKD z6x~7J7oddDCTkh`*YzP|=lHj{JGhEk?D)+bNqw6Ib*W zzq`KZ;nuHm84T;wD@ClJTyiS z#DLFqRqlY0F=_=9gwk2aTbUzI1DM6FgI<)sx{?m~*46%7@;v*pmZ-&LfyO7+;5;#{ zGw70hb}X}9bc3Y(-L>VdNfV>t{Psz86hROLI^_xN#j9_sbYuSsOD^CRQ6VRFLni+z z-|*ls7kNQN{!$H0wYT@ndhLIC@ceLnB)Yf;%%u*OSvkU{%WiSr%t0J0S-?!KRZYi9 zRB=;(KHF6|8hz6Imod?s*LccHm+QRd<}`z17D6SbVCppHq!AFGT0r;xK7{nqo(lh@ zaeomVPM@Ux(caHHf}x>n$0@@^R>($LI=;h6UC4&m0j2|Q|2l@uosL8+2y^k?Pe?ie zqRu!iqI1Q%nzs}LTZFlPrJ8_+oouy2DC$qWg&fvZ&faUrV0*^wml8}{Zvw`e_dpJw zxCG>Sk!WR6)H>?kc)v)q<>Zg+>J4F#zogi>yvanhBHAazkn$5Cp^2D;_babfm?t)V zxK#}x(*9jdbNRqC!ZlGf8mq?Mcy7g(GkWBCOuLoTb$~TkRT~=LH2z)kT zxBs_MeW6ud^kvX1{P$G*V58IFxZkD;*GSBaCR{#*ia@y5(AFElCp+_jBQN=6t#KSB zWb%K#PI-ZRvB6-zLp4X%^%GCu>Kugp^|l$UB>ERqr6T5R3aQ{tPvcx!F<(qN7HhB` zqe{%N{fUMlo~-EU^5?X%W6#eyYe!lZJ49p2*ptx;$!6?W$NPy=mougP)yFCRcS+bH zgp#*w_JnAeC18A|s|j4Pwl)l7XddQU$M;wvzi*YIueoI9JV)fdv-{o!#yUpTU%oACv8r!Bw$Gg8k*i z(vAO-JAXf@t^S?yBL*-4ezNX5p#L8}_!LfMMy#T&K0aXkrm?P#m!RoeaC-}Q{VD)L zD?t7@6wP`CvMvp3@{dhUfW_kPf zZ!NP4Dk?Pg_~Lu^C6pQ~#y};UXhQm8A1KHw{;AZjvbI05`ry8ewVXvTC5@T`Y)yRH zN8~MWLBAm8v-;!pEax~h_*&&Z5brOq_cRYGED z&S3H=znsT83B-=jelPW{biMB&-g4(7MV7P1%H5u__j{|w6cweSZ9BZX_gmOL=y>|| zl}q~no3HB5qehiY(eofFA%XIZ%^Rrrnb+u^z1kpZ)XcswFP7l~ z5zBE0VN?>!YP&n2WBsYt=z4vryhjyiQBRbW%$wAhFHmqKIXHyV&%7>Wwe=Vy`z0=a zuqq`{N`7gIC@J-e!Y9o4nbh+fqmv(1=}jW@hVb6kX)Y*?g}HY+Ahy`PtOXKmYd+uj=Kt_3lSxJHWssrn-U zWYdurE=J^d^hN%3AH87&)wkHvgfm`_14-BpiGc$?S(y6!tWRpAnSHfcb9^qEU)N*J zdqpgEz$HLdA(zm8UfuoRxXLv}#Bkeg!LPVdhP%d&{$gagI&>_)d_OJC9uq3+R%QIzYg_JBG( zgl>7Bb~6Fd9}F^w`H4(&@NNsEnd`hDhw(K7MX+oFKe_0jAe|F(7YJ1;L6wA$Cq8*S z)OD$t;1Gd8C&E2H+sHKm(6{0g<^w~uD)5omlN%kv#O|g(qn`uSG}-4bAmuYND^B?j z7p-(98`DXmgvZwc=_v!}4q_NSH$3BdVb7npKNp($M>vx|{9`)C?s?)cy8y*A?+}qV zeLPD*ylpD}IoYl%v@ca;m6wU6eK4@54**hsDX|LW-z%&LSP8g+Z!SbFQLoC;Rx$3eC)GI zRkAG1EeA8Eok48YO1#h-fh<-Ab-~B=06>Doj4QMNf!OC7gv>XA;Q$j1?^!mS(9L_K zR20lEcE0QZ&Ou{p&hFUorU6I|Uld;)l1+)iwd2g1RxA0-HZ&=XRz};GOTU+`S3dG| zOz0`nKy{&QcR`R92AeRQ8$U3%2zvbnvg8R%5R;*@!*qBp!ma&fV)x!zbat}mU$Bl* zRPn{l+!pk;7srDU?D8xBnW%RS>#01iIw}#4P2^fQ^F=+dZ&TQGg z?5^_jiE2a&s4nA(uRZF;-Qt?WQnwV6Px4@C`uwru!HLEoV~Z8@9Oi>WB!uZ7F(cxu zE>D36ffu#)F`*Pgz0Q*_LYt5wNzo&GU1rX#TZ038r*czH$+?zne}8vf{~nC?6Q6uk zGNf?Vo#$~nYb)hxwATLx6vrQe3(@0^$u$(sP7uT#g4PpbPW6Bdlf>F!SJGk-nh!c{ z0o^^O_uQ7UtCE9{E66_ROA;HJQx&~BvTGPY+)eG3lgR(}du@;L0s0u(D|5Y)2Tfn& z11?uk5S_X9Hd{9?HtX-8UpNjUOr9~Q;bnbVL!~X+=l*-3H$`FBb~b^3H<49Wo|Jkk|7)*t z@t*u_UOfh>y;vAwv@d~MZP<#F?GI>B=jA&}p!Mp1HS@9fR|#@l-J5z-1SsV0$cu^p zKoPX7)Z}*UJ;op(0)r16h)9XYucU+nI6qfeb39lAD@?BV6Sy{tNBUcd0{ zMram(Canq*7W%_jY(flk(oEsHPKL*wPp=mlcm zLkoyALih^wr)2S@jL(Za{qA`?xz^Z9mT$F2wNj6v@-@fRH|t*ct8_bm;8Gan20u{G z65LW%u9%3elNOFnalemo3OLA`3ShuOyB>N<&p^RYenZh7$xnI~KNI2-FnB z6s7pf$!FOJCA$yzm6nCxe!FKfWFSln%O;O;xvx^>)%@*Egc!EnXh(J|sumbv@Tzbt z6}`RkVw)nvWT$86E;eQBKqHHXODto*@tQXFUuoF-RuVo~Q+?rKeD!{HT8pzNrTN`b z!&Qp+wir^%w4A29`q4^^^La?8yt<;LN{-#Gq>*XqoK?E zW1Ol>HgL#qKMc|u$36Hq<_jS~K{A1~e?W1r1@1>xrN8(L)g2IJT&S2 z;pZDU#b7*H0xuClc5?TVRe)5)A%ffz!gV_*>&P!7jjh$apAWxk0{ngLF5jX7|I@uEN=h6F!+i9+Jj;USEpf1G?!*`$}{C^Bcop z(sL{V9RboXRJPHx=2XwFrvZfX5|_x>-XiLRv^hnUO^HC2lMgnpdA~zTU@j@i%DKw- zpj-T;yoRe7@7u0#{qIWL|5(8EdoT?r?uVrtJpYIVpuAB+@^7WQglw@_Z%effO8$lB z9<(U3gEdNXfI7G9z`BzWgPpcJpADD65+PB`7CVW%wh8p_4E!Y%w*4KRz_>d6&>OF#ghJJ;*G))Gvx&+KtM*`Ces&3S6A;(!}LK05Ib<2fH#DO0-JY_%-*Zs_ecsBNn|@;AorQdb5(oT)Y+zZX&y>sPk$x5`DKC zQksA?!~mvSBs#8Ircx#HB0M%@a{t^WqfkDe8}Xx3ky6I3{%2TeYHn+#oOs~$!g~Wh z<6fNavhDTasLD=DQJgiiR2UJ)r}EcQ7j zlH2Sq{}7kh_{&EE%t%oKl_B)^*}_B1G~JPnG2dLQyk!$z$Q43IAR3TGkB+@BHrRD7 z^*L@BLoq`bCGl6AB8ofO7r(k~owJ`i3S!MgHu0n@Y6sq{y}SfcJe;*+>u2q?ShX^% zI{QyKeD7!f{KR0~U+Shs84#u^))*oKP z)H#%JY2{Z+`_HAsQ>1B#8+$hU0|p}Y^5=Qpt0n^$?eBH+tB^oL4c_(lS9D!QzBf%9 zEt-uJl?e}VE&nI(w7&UKB$p-!{I_4&V7`-NkO3?Bh>kuh*Zp%=?+eSML-iB3|bc%l%(3 zfVVj!&*m3tJq8c#PzGCatHW$p4Lf(6_a)ugqQn(HMC;eaYObjg1a(Svd+AO#Az)8+ z^t}ToQ*eI*Z_9g@Rl*oWkzY+p&F{YWowIWd!stoLR7f}sNfop=%ud&?-Xc=&XH#%| z9QD}ts#vb4g+CY96&DF@Am)*?KBc`=-+$6MJ!ntr^>4WU zJKmcF7;;1?SX;H|-{p|~8&yA)W`5>cDDAk?TfvNM*HZSh&aHP@G`vh-?pTJ7^a6f+ zB4qdL5Y->PP>7UW<2(WO#zril~~qDFKmdyWU}ep<7{y~6H2&(eGxfr8IultJ2Bm^){5R`OG6xd{kzzN z%(ci(j6?B#a7yavMHZ+4T*q;Si7-gd#zhSJHribyRlXvQ5 zi_q#BF&Rh z_-2D~uNcZDN|)Je#(ZTtDW6COZXix#i@RE*T`9winA2s(WAqf9LjB5D#$dKhEU>TX z@WAU1E~oc&WR=>I$_@8@GX3F=ea(koQB$BMArq&qXnT!#+abQIaYb8*iJkae$0_w9 z+a~AW?|)zpIw1Xge==K8{8!qbp%cDrL;n%DF~FD`gLMhn@q2gkwN@oqu4qToihAzO ze|QsFA6YkWj4w-$&sko=7Adv>C)ybG#)QEhC*@UTQ4K_N2t2Q2<#=LbCmB099Hk5 zJI#8H?Y-+S)~#!^xJJJr>Ry$iz5W96wJ73y{4ge_5mDdPsei@&Ek#6JllVq~bhMfF zEng1y#7>4LoF1Jn&aN`TER8$IVtnuQSDQq)Yd@J`rY}+3&f>N-byPUFjOmPJ{O^d( z!Y^|h-V9nir>XhhfSTHU4C#k2GJZ%uFw)}MpCn?~Dam$Ga0hgFbGw^E=XpYTCwVT{p5YA zn}R(odEUe(wc{zDw+mojtKXtEtsiOn@#w%)69?T7olD-|*ARtw)(=-f=eIL>TA(h)^0gdb#v_zmGCrtduIwT+y`en4#iE{d)seHK&}t%Acce zs?UMIPj~Y9+?_$Qr8Hm7Q}8;`glT=eicjt-Bo4L(3*Wl98U4xAO_;gc@A*#*dt zBf|d5jz#AcJ2V#QI7zb1wN9lvJd(&uK&Pyd_SblT;qbRH->?C(ubz;neL*?(QcuLrF6CBKw7$(em7;Ev2B;V<-~%8C`wiL-bAnGQ7wSm>d26WRiy zKsCsbQanNGQ2X&V{)g|Lc=gy>)v8;H=h>Z(O42TaIMJm>wWi_@tMqDxsshHjM`Rv( zS+LUV7KptryvZW`Fh--1M?#|J0~ApeV1|sVm-$5bQD`G^$D~992lNCIfb%3vlN9hD zL=_HCr=yjJ=p;K(|I}$Y&RV8bLB4JMyhB@v+1_PCc?XYI}av4~GV2CO7uT ziWPyJ>8Amr$N0-n=K3ROZ3_v5z_)EXop;1wy2IW?e`hgCpG7}Zj{>*$@Ggw{SK?jy z9-bQqVcr&&Vi8@(rm5i9`>!#t6F9?Cnhph;9)WzcItzW#%9WP;U5Zz;3itTqrUsUD zLT$*9P8H$GOihB{#~@n5oHmb=mN_vMBMXwZ!X%EUqud9*LfO|KWwc;!!^eJIzdzur zj3(lR276$NGZ=^BuAe2azdPQSS$ybgv7Hv;d49C@O0<{qV~&fXIE4j!l1}%X{2$5$ z-WEBBd;jj7UP-F&7;)ih{{J1ecv@2eN6l1drfw34VRf@=Vp zVq5Bz0H;09E~=7Tzx>gS*VeWq=qur!mh*WLCovDevm6>@`8BK*9}jS&f-8dP0%s$b zn7uk@yO3kZW(fw1v5pRve-`#zk{&(%6wcQ_ z?!fM=YIzf0|Tz% zeO7Aa3`Ar7IQ@ymnLJ;OUxX1$>}z=%F|%&hDt~Mo75E+M6PZK}o)dhSk|+~r6QiRZ zS;6AU%O;IRzqVn`Lvpuq@hLznJ{RPlR1m&>oGvkF3=Yr~gezt#dcOm`bvfku?3TSA zx*`fXshQa9x5e&QQq(OejE}d6p9_jtOXR?o$wb(cBKaQEcMU+;{uE< z??BSNv%wqJU%a-qh5Vw4Bi3g}=C3phHvQ%%Aojg;&#c!Eth%T^DU# zcMn>3^S;@99|6n~STrV3DH$a6pZ;6Fb2vJo4R!%&%d1)eFw>*gY&!^0NW5~Ls4$%r zjGaYpmtKo(&1{ws4|VeGB|V=HUi-h){AleQ_1%Cc_UD5koy4C)GzonE8Ty6VlPJMB zLMPe`fZg#&OeWn@>yGg0(ysTjZr5 zFNR6C-iH@Xj!n3+HlbzcA+Yo#<} z(`cG}zprRA$vZ+Ai@)8Z1*@oTwZm(kY@Mv`Kv)^X0$e{$8}KaE_QdGu!BaK}X+vP5 z*h}x(Q1y>%P`iVX5z>rHbV&Yh>$qW|t1wR`j671JaZIvEvd8+fmS$q?2pV3afvJ9G zna-2}uQVr7&Zl8`fA_!?u)=jKWLT8=G^5_KfNWU3MQ!0-kEP3_+hk(+m;tsrd@I>5 z*uds|tvUx5mHo-;86vd6q2NtH`qA*W6Svb0-`=ps+WjJ&;MI4;Hv4P|Pu^YuPaj@E$x1@kLKHD|A`>`MPZnNnK!PJzp#649#lv0O^dW28qP z7s}8<)qC%zB-;~$XFDLfhmHff`p5rPBPJwppr+XBpXLexVPEPsVg2Aiq*RPzCQu>~ zWY69n$fWYrKfUz3p$Z&s_=<2RekfNfQ|>r-U3st>G#Qe}XBA(-sSMeO-*F@lqP^msLsW3_&xjZ$-!A1I3Y`v^5xXAD(`hZmMA!=nR!S3rs-jTe=q zYc%qaBKbeLTq~p*=Ab+L&#~uOXy5<~;~^O9DRTnCn0|iyUu|DV478xx`=(F0f}t8^ z--g=a*+9*sqf;P0_V7|W$Rhd7<&^tyiiLT-txPp|*3$`FH0A5}ab+8LKQu7am!{`!r)2@Ow|d z>frwQG-d!{c#ATDzr>9DKp9AP;6W`2@w#%hrk<{$Iuzv?xW_$VcSEv^jsJcJ{w(Qm za5_%7GIkwP>OHQa(4?a*`{o`O-?L^UZoE2wR`IgYak|&rQkHMh;jUosS7Kee@Ovvy zTo)a=S8^t>_hqVD0IT(!NM$GceL&)FRpzJ1Td0RMb-jWH9KB}zcIO&x80 zOa5!KWxFQ6L)HDUptRv91py~95M$36bmiwI<)%D62etd1SF%^3wS`hol)R|xpQSz~_p zur5rtgh%QzZTL31DnIK+({DY2E69c}Q11F9iLwErTfmRC%!xm~7u~93D#HRp>C+&wsMmUICR{4<*GMM+Gn}d<4=JjYPn^R$^innWPuZtJW zb9nzT7G<%}7}CYr`U2Fc<7~@=!QhD+)Z1IW_TIWXb9SXf~cKd=m8ML9H5652E9k+WM zUckL>Pbv|8b@|Hia4ub5EW#!6X&g-QSCau-G-UBldz#x`vNhfuZPC#6jdHCdwd?EC z_gW9P3fOIE<%qE7WFLDMK_fvD)QrSIjdzq|Mdxyal00%ufD$^SNShJyncI%8`gLm# z3_nwvXY4IpNf>tIdAMLJY3H+b3$JOyyQ*b9lPgZ`dnPB|6R_7hW0s2(tPSuQ?+YKP z4clV+Gd~}yTfY-_xqU+Zv7?NYTc=}z(KkfvEw8h2Ws14)YCl8Lllv}8?|<_j>uWEn z(~NgP=d^2#!yfVr(S)JXC1b4tM99DhJQ|6fNWvBnaiA6c*vE!1#(gyt^8r1f?My-wnJdN_v|t66uwA5_qn6ewUxIm`^;^ zjuuYh9i3S0ACf1g^-OzGp5;ybtHczEUc)Gp7A`?7s%P38Le4Z#;Be9hFxweyKaIDq zSleJKuY1ACR%<8Iyj-%5(7)1OboJ$xOY43|B??TqOztq-jecDBa_7EoJSU&wJ|?Ji zqtx+(KcO-63m5aCMA>9(IrpW#{#|9v7!MuBvM^|a{q|m8fL=>VYN~S1@=nu_!Kh68 z>d4J5O9RLb46gx*u3VM2Q7$CPx7pW(pnQ>@s3UjwGb{dglZp(n;FLR2jWdh0)>6+< zz0X=3gy-2xN~s|i#R3Tpv%i9QUtT!>DmlTZYeCBHo8);in|mJYF+6hKq0wU4E)?=F z+Mll5uttzDb1_wQz;GQ$ziE-F-y!=>tqftzYE$Q;!E}8eKRVTTpC-o#irDN%yA?Z> z^zio90FEiK3rT@LIb~9;UdLR_&tX8jxpIT?9^{g92BQ%VgK#Bl$$LLt51qRlGb`OG zQJ<#ep1*N;M_^??AL4r`Y{10WWnL8iY6-fjOrPM}1jNpo{z~=C0lOwr3^=Zx+sh*_ zwGKvIj6H!IPYplnLzd0-7L$ux2@XDO3LJ^lmuUu6iD&!j2ZY#x)Ga`4mE}kTF|xzW z`qrtDI96K}uo({655NS_rY%GHbtg&*{Ts3r?pk~$9hxhZ^2u=006Z2X2U$pIFQhTB z14^EJBo70%KFlD2MPqjxP~P}bDJ&nobUno<{I|_1qn|Hqj2=Q)f-oF765(Zp!XP^U z^AN70cX^b@TVRyrIV8Q!&rWSSxw&2fezPNZL{iK3?7?DkdN3x;tC)vRga26GxqKHH zm{6Bcr5?cfu^jSvda=i3Go?6q>$Cyr;-rL8;vD~qwW|oQO|Je<){lZ$Z~Qx)D#amz z8_W;PEC&*(X#v~G)!~C`fyh}5WMoN>D}-65Z)dK$_++IgAnn(EX=5t)RoL-HYQq`tFx9Yj?)zf7nvWRHJobQ(XN zJG=W!`HNqNbx}TdU)D)Glp1dDHehwVsPtKdq{rj;FfxASJv2P5{P+I91{X_;oCxNu zJY$X)uWQ;*C*+@0PVwM5AY&tArwdUU+vjKG-YqCM5#8R_D1CFGLELL#ffn8hUh z4|M?hfn}M&f)#UFf#x7O77G%x*qx7OciZ8ZE6djucGYu!>HYho-SGU@g;@Ws=*uIr zJ~#<+Ph*+?Zu{;>_etx@U_+}ADE|_QT*W^Aqw0T<+;92&bi4F0^}FuI;2tNY@A9uea4-aHrGblM=!3Y!P?sqXl_} zq_Fh)0N!aXj(q8~s{Sr;V8Rz(>zE=!E*A&HtnC{zbyEC#X@?i!lWRi)*`lt$lO-a= zQNyjkqOJcVAQq2cvQfvge<;GRB(xA$^R@HV9;jN)!#!4*rOE53Jr`a#L*AuD7J0CM zg8@cbxR`AT$DA890+Rk3b|j6ESe)CEq}u`7xyOgM$h7Jyqxi#xnvAz2OrsyroPrqO zc<%v7&rTLAU%cwIlG?LQ`mNR0_g9bPP3cWi{*etr%r$pi4N{vHDVnFP`lb!(7)_0J zxc^W=wt|BI=I+A5QQG;5?D|i^WeLN>M;=A_M32+HOOWrQ40zoHdbZClV*}}%O|Nfx zo6?M$_3D zm6#bfbz|oWOa+!Sm9d;IXI%6ZO3duyRyiV01$&E-z0w(n&BBrPb5wqG+hefrwhUGQt6$m+G_wfp)*Xl`tgKfKQs@I zxXR7^%QTg}UrD^?6pdrRNkgtQia6^6wc&5l_kjoBEul&$hxGe=ngEN)hP|$OQT4%r zeqvs=4#kID2#g-5gq~^2F=l%xm(njPaP`KebJ71|hwBA*ep38C$drkh_az?|AQy;Z zwakp98rP$cfoN1Wz3lyUO(FsU1g01lEwb!pQ>wzj0&NliBWM)y0(k4Yqo}G;a zbRxB34vSzmgFK7F)H~hdz{_ zP<$XI3>K&ElTcW0&JY;H-a~!My>?S7LX|d0l4AS?jq}~4X(>lwE4Sxk(G}-A;Cc@i z;}e#C)H9V)C62!G0@`l!qT?3>*hW_ z%S(_W>4_OX!yrzZEB}=0dDYY_dg9>aM3KGA#Vl-W5;Epx+u>)O&`5!Lz^PgCQ}=}U zXtj!d|+l0d_~93Q<6~dxIQ>YxcQz|oF>c|c`YVJCiX;kRB`7G zX|Yans{|QKC>HlMNNb76u8kA_oEGbn`Qe)eXZbTn?ERpW;u!?P-Fy9dP+#mhkIX6! zc>5Imwy$FXuVaR#Jh02DY%on%8=!)kb_d(nJcfWsW4LOT(v?)zcZztKm2=$>Ot{*~*ygvRUK;`jK= z%QO>w?ONg4vPH--Jd^AZkGpNB?*lI0|CFNLospIjIZz?9RhXJ|@;mf2r(O1*jim5a z#ah2XKC~AME1lZ7iv3e!!)_t1*(^rp>bMir;|Yb<^02$SX;l$31wIy7^{}&l_}KX^ z^sCmH`-nSwZGbM)fvhr0)r&=I?QpOJ=db^E^<2-iYYlSQ>MD%dc_=9~yxR#nqz%Jt zWwvDwH5f?t1u65-T{lK!8@VttQr;_nZxYp)n&!se6akot2jyYA8Mw`rfMlUoO9=G$ z*6zTK_&KCw$6AD<_sBwkwAG4o{XObvdabYi<0ypF{r8q*U@r$}$>$UgwDBd#KV03tT z0PxH-=XE6GI3t(;F@TL0N57GhNs>`grOr$J?VWNKGS1pM-burMCqv((GTwk*Vqb=0 z}Y94eQSeqEju+!)dL03{$c!s+ZD2#HGqDg76HzzVv>p-oi|fSF!H%>}QO z*q=`Zi@hLsGDQ)lq4QTWraCb#|o4>h!<3%Dkhpm?c2*gmwwl>7=NAai4hGD>>NI#0#tPsgq7tZjX7s z0mNT&Fn!s;E4-Mxts)VQFOl6bjc|v!x{4wJ>iJsk&-=mhoyketAloMfr`O@7&Giaj z5l7RFMXdZbQIg&{sYlF6O0#tBkYYL2QX8ean^JOf<1u5BECYrnA;3sV)?G*1pu*q{ z{*yB97H;nrkor~*ViEZnbdkE6b))HAv=M==1ou7gLGm+FNa@0!f4feO1oiET8G$ssZPkc#nhAA`GzYh7xD7$L4*FR)806 z_V+V<4rL?rRi1xb;3fygb}U*R)xCNoQ;3*^04$f9Yh*UoQe&HR3>*FT%>-4gqE4LL zzPW>0;S2|zO`ZBeJ$3a!cmhd~{><@|^!SjBZB5s;4@`aP!7J9L=vT!5L~(ik;OVQP z{3c`$owZjmC4Hq*CcPRU(2a=5nuqds#78rWGFB(~Te#)tz~d{4Z&ASL67U&cRtvCS zQL#9v_GzMk*WSLup<*SlDvugbqVRX&PV5yEG#J@JJql{_qJHCUvniYD;-(hg^0@9j9Y=}1%3V=neFnqJ3 za)x2EdCM%@B%h1Ntz7+h+^`-J9(XN~T}k|;pfq(cRJq>v0Znohe(3b~V?U3fy;8@u z2FqL+(*t&WA`2$72Lr$P_KcMuAk>!$zHUE`$tz%V0^X6s-ZO_IaZXcN`V`Ue9GhQ2Fb&VjIq}YqfLNv z!^{Y8ADXy2}li2AIukI7|NOM)->R@9>q=KZgoM+wCiTYy%t^^~Gz zSLn1|!4=*P>L&adFL2L)vh}_`+mQV7b)NWexFZTJHLf^iBgfNtHAY*t2n7-!^dWqT zg0p0Gm1djoi>;t<6S12PCr9k$?VHQ78$;o%qi<7?_o7AkK7smgUvU*FtY1=Usdh>n zt4<_aiO}%zj4n||p)#a3nSu&1Y(%Id)OTzKz-GufVU<*g+T=?hc+u=BN6S|%PKCE~ z1WIOw?SIL&J|NKF*)mPxbxBF{U{>@H4p1Gvgq?@Z#5%LImjUrzG|#i|y$)UM1~`X3OAvRv{MLzQxIh)Y&I2fylYrKHrn5}~u2J^`Q7L2TT8pq>XoX5i+CgVe=Qh8nY z<7tW6h&2K^vpbMIK&_?}^(C%2-D+jQ&0VUq*i^TJZVuyiK5%=I>AaB>?ymM$ZmwDu zoSpm&8ly^M@XPhOI!Nn^F>Kz&nl(VpEva&Y!|)8Ub{SdDw1(V4MP$+RK%Vdwlg1-M zU`2^vMi!(vi>P=kRm&FwQv?Ah2?PPi^2JSL$vMAB@4E3DT-kn}2e zNvo2PF{L2!TVzq{d3U|qAT7UbTb=$3mR|x9r=R0UOGE(!rvn)%>mWwUuH)d!<99e~Q&skzToF4|d;IvD z(y$}Pm4{72Wc2QVldK4oSx7=t|0tA&p4c$G6jvg1{P6e+H1xp9&M3u{p>4f=JMEOg zac1I)w(4=+ftqk}_6w6@w?EevCRZp)joGWBrG{s8hlk=&eJk#F;JpM|&L=Dw3?$i2 zvXp*}P|!;`=(&X$qzZl@Fa-Z}-!dtP9OTPMiSK74OP{n1xF9R5 z^s9i|Xv!r(PTDbRU$o{XFI-saDZ~EMir@c|hT{W+CD17+-Kb{el zs%i;3i=h8X)NQ+@@Au2wkY?NjzWJkHVM}l3arw=bBpLdTq9I)-G09`qerjI!wza3t zisM)h74%qZKfM`nb@cy&OcB$1za4H*lRf*?{mAS)UO#Fptke%oXa5t!IApy7TEzr7 zK$GOhtrY)xDJ|bnoBpD$zv#YoU+aP4OY!3s${W6Zw4!a`IAexI@I}!mnLL?zbrO_o zge}F!z(-COMOOkYGE+RfTA{InUlQZHfCHjgoGu@hWGf?ogCs%|t5sctmy2`zQrvU@RC9x69br@fQwN7QKK+;MEykmr!5EF1(-^Kn5q zk%8qsqLfB1QcL!FWbBh`d{4R99%nAAI!B%BI?HvbV8Q5caIP#h7-}6yquF+30+SaS ze|=$yG47uO+0O_u9nlIHlfC0T)D1K)H8Z#9jjfj~WqJU`DIC;}u|m4A`_!gMw5LBH zmwPgIT@8C0w?XET&!dCc{DgU}QCN|%^9Jj&xMHzvDU}r!!5yL@-`}k&fQsI2`gNvetnPChNL!_}w5VZqASNGWLT z`6<#7CsL?f1%@sELdlvl=8(vy_w|n0MZM5WuTYAO;v22`IjXP+G(`i!bTX3)hcuqm z=L6m~aqi8h413gL5UhPn-ECYa+pj;U|1k^Dbv*t0<-V>=9~BXmGl;&sR@efAS1f_- z-_q1l|@3U_@C4f=^6d}%^3*8dD8gkB~!tHD3%o1 zOmr9_QD~TtOKyPub6Mxrj`JO?vY~(wN z_-ipwyUC;#<^;$Q|IO1uEZ}?LO1ltq^Id&rl{Su#2Hpa>EKU5VtnI;;Kl+aD6rcMk zT`3X~aD8cGE_Vx?XQj3!aGCvI{y+_sTA|zeqbRev@8=*2Lf7M|0(fX%9D)HI9NlHO% zEM#_6aqECWAp6Ls@Lj;+S@@WS8g?P{mWvPit+ATOORL5=1~F9RU-e%w)zYR0@}t$? z;`%LUQHikrBoJg+?u2?;RJG?D#Fh{h#IBW_E%|I#|6m@JZ&ckUf2fNvxQ*or#a=l# zaBba6$a{Ui6Q@l~&ps?-zEP{`6iCp+L!Ig^&74> zoR^gxKGW{oMMd5M+sg(yXTsD9~Xbn@Te#VnV zA6Euhq&$QPPC=2Wgp5PP_}$g-LG9K@K$RTF1`Uz~=0R%DAN)3ob_CdRD%kV_1&IGc z(^-Z^)wW%jW(Wys0YN}ODQSirx>LHPk?wAhZj|np?oOq7}Rl6kR7;q1pA=rmH3=5LeBaOvl<5N5HOXm-#HH3yshE{cK)BD)V= z!rBtFmB^xq4nu(SJ+!ZVrXXAYCTU%7&&_y@Vpa|<;DF8|-C|)6p*KtB8R`qy7njia z4Z6wPIs|&T_7!q3OjY%~t}77zIV}l1s1yIrRGJL zo$7g*;htd;M*l*~s(Z4Dc`qYLg#5R_K`fV7kaP>RaBfoQ`}+?y0#rG}`4U0Bw=Ynz ze|(7&dSb(r3Vko|&h>tas)JCvEz!5?G^0VIT>aa@p%8Gr1O&rmzo{gD-Guh&sNgJ9 z4cS=M6p$^lL$l1`5wx+XiJGM$Iepgl1BH1pH7hZO92Dz`&Pu6dpLR*_scE=a=G?HI z%i@f`V4anentYRl>lO;L7Y-%3v>cs;LBYn8cf3j{8j5=#W#I%Q z4+x5pQ{xJas`@mY)PW6<6EF^nx6bLLd@e3{ZKer)lU~9th6tD`GpV7ML6kmT;MoDO z1;LmK@Bh2U_WM=-mdFuWpD}U{oPMRil1|5IOb}TGxFlL^LB1PMV#rMb!aZ4J+%_*V zi7f=mAL@x62!G<3wZ0f?je;@m1*D;OaYxQ53jpAOg8DeV8VZ19w=%oTp!(?kIxk=I z(=u;+w+9v+R%B`!*@X=}*$y1K%)iY{bx9i6kN@2lMS1>A#T&|*%9^Oft^RcxV=cQ#*W|_zA<~{ zBZ09Gc}6W1+|`7IWm^LWtnN_>jE;e3WjsOIeZRTSMYFkYY*U%X8hX{Ln0EnffXI(b z5P*q`&@S$|q}>_fv790-xk@ zUO|i#)Tq`D@nw~5>>}bM|5{WdoXH(U^}vx-x!DY27XNu5Ql{}oLvF~{^qJ}NL?;Uv7{cYzBccs22OAl>^ZdU;*Il^38e*fxDF zK7WT87PkYgUQJ~=M#eTawcrtrx?RFHkLPl(5z|#9!WXYjU3H_kAyD5u`mk=P&tesP zH_D<8B5X!${6?AbQCLvNS?UpjOAe{&p4Ic=i=X`%<;B?#``PAL?~$!uu#Vj7d36Ax z7}qn1_rE|gaQUu$Q#oKlBv8t&0Z68swScVJt8T_y%iUoY7|h;Yr75aEz#EY!9R5He z83sBQQ}i*c1-eB$;HnM+xl9hj4?RV0^*swPsDBnYup4onu$ISwaDh*aL1GKK#knGZ z``Df<+gvEDs%2Gc+eM?PDD4zo2{j^${7e8U-4wChDE_A(N4`r=4 z3zbH(@1%2^`VVWB3V5lp&ECTP!_S5h)Td}?>+1Ta31h01$gTUY_P zrL(&I`UUJf#O{wypi)2p~MWAbspv`CwBz z_N`!u`Cy^Lx`l?ER`9sF=beQzv`lP>X$m%m{!Jec;Z}gYV3}RA5tv{%&qq>#b3CMD zO4zqmA3nPF2L+rwu~m(t6JWX(A&(u#y270koZG^h88Da2p0rT0#QY ze_`9~9Mf+bf1LrZgZe1R?goMP+#RRj&Dhlp;4PU|wb?Rw?+&=Vd+w8|O{Do`tz{Tn zCK{Ovw;q+nApER?*eTqJ3SW8AX}Shl>!rph)`1qh(|`HieOP~7MBaTwp8f}Qx5n8u z6VeE3jioi6PP`+}qi34sz*0J*Gj*V!4!@xwBsU8x5ju#>hnq0L*7bK^Al_AefVV>X zkYId<^~ISIevai5)Tz3~r&SPv8!PlE6pH%Ag4H{q&&vJC#oJ1Q{9E2HRa7{Q)iE-J zcTlG$C|NdxpKtMOjeWIcQ)G=2R6(R`N$}d|+E)i%Ba>Ozg_by>6Zb||()4H|oB-1l z!6nNUEz9iPf%c2u$a75C%$c7<^G~~EzD=^Ss)5fjCxu)2G#$+1K!oLr67E$`K$ZIt z3WEMh1{(*5vi`}G!5qrk5N@3sNYh#|a9ah!>gvNluA8Fxf~lXg8E}hI`Awn*yU9?D z!NO7R$4CBt9oUu3>VzdM2bJK8>)%z9!0@2cDU&Jhcf+o+(R%Ox0$vx<@=*q7fOQN# z*7D`)KC2{+*-(rtBqyt*QDj52>l6FWrL^A_7Sfbxu@*pxVj9?);}+p^<&uoi2SoVR z`Maip9ZXuF$oDJ-hEUeJkXQ@A3NhXWO(KJ6?`11hWGN($=@bGX-oWxUfIc5));SLW z+p^CQS(e|A*M(v%14OPn|EW`2(AJOilJQe6t?Bb{T~a~PrX=#evI-|aod@Z?>DQXT zQWquv5+!5xr!WYQUA5H`K<8#5n3xXZ04m#aC@`u$UiTZf;`n_E_Z2yfmrgaL!nDV4 zLHQbN?uqeNd5I_|zw} zfc=f~vCkH;93#7|`S zmWg->EB7WlHaiBE>6tBR{eBE- za2{Y!T{TYo9@}gE!|+K%oxzrIPrqpK=(;80N$18BkC97R9pJ1?YZ~P^rN`crMu<*R zCc8kecZ_C+s05e$Z>%gJLWSw1*5rZNw}xi!rh1q+tA+u#QmrvGeyIu=AbWMYxm=`7C{v%t@uW zFxYH9V1stfZB7iFV%=meGtygy_qROb^%M|u4W$XehDxL$kn4x5T10a!u{wAk55-X> zBqGBe)RzyFHQBL;ib%-AZ`2eN+*k6k14Kjtef z(Xzs8nEdmUQ0CQT7W|;86Tyk95ZQbbrKt0xl*W9pFRuFp+uYxd)-&wZ>fz0_9Q}9z z?TG{3lq39c8v%|&Gh*1t8-QjxY?}_A2h4XyT#HDi%xYikecW1=xh;!?)K2#b;oWz~R1;Q1Qe0qv{p=VBnes;RMfQT+yYR z?cD~pV7UtBqBM~^#Gr#}V>zd&-pvc*nNYTI<6ZncOnN<)MZn6B_rDldI#uW<@8r^q zpCjp67xg$NP6;)0NR=?RDQr$qeKIjeUCeu?79gWXsKEs06VcjG82x&LNUXPQ1!Z}R zH~<*dQM_(6UxC#pn^8EN>U`|K5QY8$MTd1_0+Ci81S?)>CfTFXm?N;(KW5l)c8>3o z^Ir+fe-RftwV}s|VZhT$V7m9!lg%f)QINNio4T0>ER z_0c1TfTgRW0i+->-!TX&O9kj#m0OL`?3D8QXW~7&fG==5geIME1QX5-N|;!+20|5r zTT?~uES1r-H(PP?k;8)`VX_k185d0dr<*(!NPda^4foe*CD(;Zki0>te~mD=N5Wrq z)qNv`<`1+;#amj@rvKzD#6gjt+^QFKYs(=i8RWVHQkDB9`=PWeQ#C#vqOWh1a#n8$ zH6?{=jqF@yO%Vcn9678nqhDS#_wv6~Q_ZD{xx?oT89rJ4imo9!%KF$yxoAdny|-1r z!|G4+5cIhJ%iYOd{y%xh&Lr7NLc#- z8~EPSfs2$TB$YvYSuSW5#6dXgNU)#wkGTuAls*8@z)xA61Z3^tBYaUfC#I2bCP>KX zuLjp~?7Zd?h){~rY0SLD?VxUjo>(v{^yTF;g^TpyrsL(6Nv4^x!g~=GtLagl?bj;i zi<107Kd2sji~#%bm82KFh?q~N1TV({C-~`!O0ao3J;n0n)DhZo8a#~8OagNmn?RA(ko6;nC`m|cb ztk|^bJgw4BWgze!TayEiCa1!P1@zHO1Q8};GhfJPzR;|aO-wTWqeMgQwZ~PIO9}qq zBLjzCfeWQ;9~U+vi>I4X_`LS|Jc;Kjd{NhP-_K(X$(&gPY*xjFkzi_Ss*h5J<-7p5uND4L_tnw3kyZXv3?qsUoAG&kUP8s)qFS7qc-1-%0Ox{q>q!^5+!Km0ul zHK4vgV7^(1hWWPqLU;miGQmxGM#ugx6^y>f$!$y)@bN~FiRks$2)#_CbGP|J9#HD~ zg-J#2^dPWMMf@fbt1my>zu|;#7bMFU!Fy^8ymLq+#a;xk=RQXs|33>r0-*`0;rTUb zfKojLn_Zz?Ie`=Ih{W|J^(XMplr;e*Vifgxh55##-jv@5Km4(PQQ?$!5^;;^RAM4G zD3Xd6>e_oU8|CQ>0(qf+XfZzd=ZKv1aM*hwdIUq)7Dl=mNJBFQZ8{Mx;bX zO|sl^(#68wD~q5+G6};>uYP$tE`1!*<2Z&%e#HF2$PEd2s?uBQdYY{t+=`D%H%e0n z(g!B|xjr9M4w4P8Z{h|Qpo s6y{3kPViDfc3U|JfEZvuAoPbKTTg*aiNOSG9t6} zSz?H2o{}Rz&2r$RqIxG!RperSO09KBexBzay&wk9iZba!H`s@h9lE3c&Oh5yERt!1 z6&+xIveZ}^|1Yx@*gLBMw^bMPqit&RiPSx)^&6_A8|;S^(y=cCVRA)a3CBuu)D%J- zP~BIS#t&9_sgrJr7#xthaMG(@asZem&yl_%C6bKn{&_7cY5*EoAr5cjn>j-1Dr&HV zUUqrP60`+0C!cfACAI1WAm_MdhVRgttEGO5{D2(Af<3OsVf%SNq}N;xTKLjJIm9ui zA7D!^&U0$U&ZevAn&B`4&Z-iGYnCf+Cp%8PpPv$ecnAwAm{WeQRXkQFz7m4C7O4M< zTNHBvDR!<|4>Q+jZG{*QDBA>LRT@Y}%Vvfrhf9%~13FzDKl`G^iV6h~!#je1?7op? zHxg#vAm%s&gmKrx3q+~B4g>gBS}R08#GiA-bm-xu{;HzgtMSW2*k8Yd&NCzZtG`@Q03ja;>*sXSOh4-;mM zw0*YNfvo<;rS3Da^Rw!1e^)F#g~0<5-w4tao{D`p)1ec({UsG1Rn_Q|O3*e7Xac|l zv)gN8=&|T{tMxWNgq%^t@J<55eFHZXIS)kge%WW1!LRf70_OskNeENHiqRu9LEaKB zI?Ux+^67PUc8kFS$V3Ypn;ZdB*0iwbVIqA%nD58fsX0r2q4>WX_AS$OTfelbj3lq2 zOt<)|iAe<}P3L{r<31$WRIyIZAA~})N{(_AivqfWV-%Bs5ZstZOc7{>FZpq5;Ix;= zCvCM@zI?i9iywUya1605uEKEYzWI?bPVO)OB~irt7?gMln{H5pLv*Xydshj z*80oiIPKLsR0VXOifVp1sudhI|y*RKhqYlJ`{}gn4cU=64_hfZZOuwx;DDYeJ9& z^Ou8Ldj)&{=Qnz8pFe4k4mMg>DYU3BbS2fV2XWh~=1Yy&%lxDHJ0pXKe%OX-m@%vT zDVRR@N1KIVFhExMa53H_ApKXiOLnPN5WzR|jr}py(|WG68n@Bm*)bZ6-`Pcy; z*xKAni(j83iy)Mnu{3?TFvEg4t`v0UIj?vrl7UjU;Hjud=u@3=@d?Q9X^Q%ij_2Ew z-cVP(i)EIt^AI^s-|#Oro~A+E{SD*{9HCs zYhSrcGG;cCb?K91+Dn`M#7MKn-`?Ui)sameSDAmEgl>`cW!x(?I0f7X0t?{ee-<@| z*B7Qps#Lr(=(d&}eFiWbu#Vw3Up&>uVtP5w>rrUM)6@Q=xZ;RsUVXz5K=yGRZWE>h^i7XiBJOnrkXmweM=C{*_e7(k z!VP*>coL9Bo<}*@lS?l;6|N>QmrP%V+l6ShE3I-%moKlX-u) z^s-#Cy_j}TQp@N}%-B@f+2Ws6C7m5%9a{;5`KL{0-duB82=7yTmE|AUbjR@4_|E3g z+Hl+5sqI#pbRNIFpEjMTD|uLXfb~XKm!E*QLYuTxRYL2=*2u%o5?CJc0`UOB2WYYe z?j9tcOHZsUq3+Q4);6BeFE+z0D6uC|VCi?HOKVdbXFiY!c{2@!T!}8NNkFJpL!|7EjTzv3`3vf1`LRlyTOs4^mO{cU1JHbg1wjZ{egLA+_^tT@99QT zPd|N@vF)K7;Ti91T21t=FSZ_!5zSq8;lV+~*6k^pGzfqjvE|uvY;-Pa_wbk2>2^!2 zCJv2KU69nd-R9k1S<)-VdzOA4aImWKHij!$CbX6Ql<~NzzZ}N(t4S`<S?8^@koGuzinkflK5u+l8YzEvLA-mFVt-=2^p!~m-sDS}1MkVNM+f=s zJh}9G=dI=9aJ@ifTbW9mkA-@XpN{|C{!OFtA&#P+@YdHwJsMmc$zyOQD=l=`$IS6s z9n5I#_1~U2DJM9TZ|VGYG7Tr)COSXdQgb|!KShjk{;S+``>$s|PUmXnh%CZp>f}#l z-3f2p$3+yzg#cgBt0jfg!B+C|Q(g21@F%5RFgIaMk+b|z^N36@4zp+V`eGiif=#N| zniy-{zmFM=RAiMBr5Y{#xYwukT&icKUC%sv_)G-5nXg}PWAgXQC#*U8n`o=^GA_9@ zsWx>m0B+gWI1}u-w#K`R2GC`Es{4DGFC0;c{51x>Qe+}5T09!hjapn=P6xvJN+_Cf zSYqj^UgsO#*dIR)KA>mnQct6|jF^N_{wVFuh;MZIEP?5Mo&Tg~*YbJIVG* zg!(Lx@nFXVJ#>V}b3!GO=ych4|MvLF`L2uAkm3!0t7^^R#rJG$M3?Veov>bN&Dd_7w!b`u`DgjB%?FEQ9)bjN)?b;f z9p49J*X^Zw5f1Lt?%KBheJx*ZbOthZj0jw1d3@h(0#vVgd< z|3er)18TSIycr5}2hnFM$l@d;tsR$}H%;cK`Nj|tCsa?CP|KkB{v*ZWtH5=|DB4psRX5>=kMCWBe?^E{SiO=fgupb1*AKF~$1{x%)$VZz$YiTx|P6F1wCc-s-!~-t+RT4i3%Uet;c>Ga#u3A4Zlan^K zu4XQ}ANqC`1+4niK{7xq*dFDI%x_u#Exd79yf~E~=uRb`@f5by3;e<}2f@0_dzOZ< z39IZ;zLI`*6GoqKqN&T77aC~}n{uX&?@X6FwDsBnck>@$$-_)y1z!}Q!qspwK9a`6 ziNub>4gT4J1*Rm$g11LeO(X;^$0fLS+iQKq7CDL8sqefqUQP*PRbixcMSF$ zz|u0Jj_V!uPUXRGAa3&pmWnX;w@wZPPKVgY=YV4AFE&ywRy-@h&zhjLYq^UfSb7^}d>U=4d-P>uh=jzCn(u>9gq4ZUHd-yh za`ECo&x$yU#T+W!0?{9C%x;b*KIIAO~KH)>eoZ)BqN{k4)faSAG2b-in8)3Ftu`VRB6A7s{rnT2_*=tyAz)^Lnn`wxLp zC~aBKU?aMkwL=x^@%4b- zKr=1KO^~swfwSj1NQyeQZZ#~`UR&fy7Va3GX)VC=7k;-3l34&VvRe&_?#Q>dZorBq z=<7%kN640PH4iYkivVWS!nSBr4#Zq};PQVKlJ2ZPcn$Q1++ zTyHzTxShXs)%vHSPS(=K?--()I)0jonm~Q?G3_IFYqfv~C&u%34@!P323q05iEMO% zD?E9v$#62B!zk3%yFj=bIR zW%JLapClcmZfvp*J&Lc{XtJV>^*;5@n}llCEvC*a{aqqZ^qf)Xn0p2m;9|mSujuU2 z)nbp?A){P6(8~bSRo`31CK4*B)qCH=*@i7L{LZ2TJ43@ipLK>^(QL!}A{Ck=;)V;x z#lDtiYGp%`m6o5;b{Mw(c?`15WH*EQk*1+g5Jh=-2BqH*DA*R!hQhR`$B7YpwekPl z641db)WINfjzGh|=46sxJPDGNK}(PjEFlsU^oZTL8t-~HBVgqhE&^lzJ@$K1a$6|w zmk`k@eju*S+{jV6Gsj~MBrD2*`$9Mo8?!jp5i=1d(aRQ%08yM_+#Nm8#$k52}B)*<@E# zKFyj7JD_-VkO(&ylx(A2a+Z1%%0i|@ye?9ogCJ@k&5^h~JrMR_D^@i%(hD?j_g|2N zO*LZPD7Oe>(kF_>A{8V0}RfQedU#-elLt0&NHgP|0*1E44)7*vF^{CvBr z2hE{pNeOKvFZH{rfJo~6;pEwz00>3#e~H9sVT>Xu@*#>=l0J%Au*OL9Ga@1i6`udc zDWd*VZYze5u%#gz)dd zPaK5LoOtHTQcOe_OxTmVqV12HbH9J~WgtKom)W!NKZuV%*j7Qt=5@+&e_ctc1K?Oi zxM2$=ZcSGv2zdS|N~x6*4Zg}Mx2M(VL&+AVN4Pv6X!J7L9 zx$hH!sQeIT#JTL-+M}w&>guUYB}OdLBjA>e-A6dgAy1v}cDy2(Y3KiYQ)Js2*rEO$ zS}Yv}q@36Zi`;NlB@SZqQ?}3&NeTr{IDfX^>AFCp6SE|%Tby*?nz?N}W#Jq=)n|&^ zpTyRnZN%PAgp9v~a*0!6$x?p4>c;ZGcD5i`OvxuWJJL}KeQ1gV%2ND`&8)BFN&;9& z1m~nam&^Tk%4#57$;!Q(kjbhWFDIZ^KU@Dp^h|13?DOc|;y)%@rbuIp5SK}cSy}Yh zS|`FMn`J)VP6Ms8&*o7>7mp%p|)z}$MU41}o*lxjD z{%3x`pVm?>hc(7nD}F5bC?=xNru5@puKDuwpc!^!H{VC&m!!f7^AC5jOA)WQUnuNq%b-GiL(qC;adHp*U0Zfu~iL>`1qS25DPT#`>> zbzGSCYJR*~PA`x;XdGSA((yW&0vC+j&lG*vs3+;sKC2S`S>RZ0t6xNtS>Vr-gwBG# z|2`T(^I7SYrg(V zelva!7YR8_(@C|Y@*zJWPcd5OSP6mP&r>!xs+vdn_B_31tG+JIHI6g>7H6(U2(hT0 z%DRD8r)ajqRB)-iZ7`T%>Z@oT(=4cf>aTh`J*WD>Rsvm`C8koCKx)yPzuZU&KUF+0JOS$$EBw zIO6}EXR_YWLXv4iq94heDYMQy_y_ObxE8;E!b)EQig?p;Bb+zv>gx0IY99G02PdrF zC!PfOEtzOzW{kPdi&zpC@5z~9G)4IYlOTNsuY(jd&4WaC3$%H*`paBc7sCXxd8i;f zwHTW!Qu{YB4&lgTVT=;-MFE&cjBdLb8e13qFk$n~%rM+db8FNu4FnOsZ_(bTze$X1 zWvrIPCte-BG*0+cQ|Hsc3LX-R@)tBg^uD^o^cgJP0P(LG?@fv@JkGNwwbp2_avEv z9Tm`}8VVs!$tCe#-Qc{VrJqa+Znfsqrjz%lic)rf}@XbHvjUN4EMuZUH zVt7p7qfI>J|5!r2Z{rkj8_#8k$-~7cVMX=@)kVXT)9Y8~`7*I!7@hf`>H8``5>Gd1 z3-{EFxGqM08yO*gy6s^s_N1mxANODSGL2I1?ak~&;J*;M9&MN9S^8i^Bi_t)!DS8! zl?K9k!>npkTLGV{p`C9_WlXHkQIiqP$2S+k!xi%*jTe#6Z= zCa#==X@T&8@Pj)ry=eOO!1$WyOQ=7vULP?fbsq$H7W#oLLJVXQy2?r;-tVSYHSyG=!2vgv{e6rt7wd%D}pWA@1 z2zyen?^Uhx+ZwlqYx51HTcf?#7Rw@g!r>qZwmcX5&6E<(?=_a7RL6uYhG>RTA({4- zPHW_!Mc`1%yetqCKUs7klo~}vuERv0gN^I2Z+|{U7+L6{BCP1%0tWoFR6ux-WAfUU zjvMtlaoB%iBYIbPjbUbJ@b0f_qGOuVF*l$rPdZQAkIdi1R8PbWZ^JzAXkXymp|6~v z>+AFQqO%Ynnv;x|1*V<*3)&#xPCz*F;=BAlBM+hYQhOH}V$$QIx(|V`!*Qz`Et-nq zgO#P$JMwMGVL%!N_>FKd)r38-b~geYoip=fuXnvDsaP46!0a)k3G*kxwVxtRaBgHOzPxY4F|Ehxhtc) zK1xlT+OTLDn$#XOMeu!}n*LS(X6{oa1KBjTSF9II%|VDtZ}8U?*B>0aVL6zkmdrlg zcI?32*z%!lWKa|){Vl-CEm@2khPx$8h%bkoSO2T` z#}v)k(vKIscWB7|@T%|A?xA=nssgkzikue!(Ivk-@F0NIEtQ879oNGjZbex`Gk3;2 z|7Zx?K0bP(!KDCf8Q~rEV53n+_KHPy{N@$KNq<$5HX|MFd>#1{4%+=W5W59QP~uY9 zg#l9%$H*Q|q8;M4H{y0iJ6Mn=C;}fRHSm(DF84*K8>0vy{MY#NiToxOW3CI>!ky+@ zYPH3pYqp0elKfiJwn4*#pb|A&vR3GqbhTNOw;-6(M_TLh82xHVYVR-DH2v3aXYHy+ zrhno%@b##-3S@=%Ql1^r9l?p0&l|CFz@qKny1m!)!0BdPF7fsHT&z zv)$v4F9{_u$6QF=%rp>YU*FUCy1D=pt9}Frc0bVGn82~vqSX^d^4NPX%MHCT?57mb zdXDQ^)|%xV8gnh3+q>q7ojt=V)&q`lQpY>xZ0`s8;|y+6Z|ir&j%KWTX2pl~{4HfS z*s@ku#adRt6oaeYuM{Iox4Gafq-B-!x`$*7{wcn!9m4(25cwNGb=2G!;0_|#vIo-RaMsOntj_~;ybSKC+O zR9TAlfvF^WSOwnnyEe=+ED><7tSQ-mCBn`G!F+`5L&XRB zPex@#$a7@&rXwyl)cP_rb=`OV-i<7w~ zB__^T!(>$NjDPSNqi&7%tguMlwn%?~68@%eNx@&dZsD|K)#fghfHOu6-VCM0avmrQ z@{u>i2YwAQ~`t&32&bLAGcH ztbb!&fAq_IEET25V%_mBM0gX9P-!$J+=^zK%KpyYFo5jAl&-zH=qwgzo{Cbp1MX?6 zu7gCtNOJo@;`bCzNy=#Lr{d4E97$pU%PdfW$APfrqg0pDeQyKkObe z@&d;RNgbxpPGP6e0FvLgk@rhq`Dy}=nYkI%+L)PEl3$>okXT!ncB#*h3= zRUpuxfBzu=dL=)Z(!;>J?j-jTPH8*J!R6KIAjb}&?6vTUt-S|GdIg-w#0AuA@bY8d z)w=Xvw)LpNQvy$ewZq7Z^)`A7Z8Bn0ou?$PW8csthJh7+f}_mhR;_M0m=waQC$__? z!G6+6F}Vq7L}@KR4^2^&qlAw8Z@P4w7(KG<4||2T*yTAdcO18n1?~j1 zg%^)P5bbL$i1~~lz5)DJaX9UhvZi?+jp}8D_0TGwee~tLjD@~=#5%NAXudVJ4R{K< z1jSpu48_YRv6K@|b{@<~re`@?mHWO&Vl;`8wN@r;SE@WjMQV?8Jc{Jc3phtv&h~b< zK9pnd$6;#pIg*K?~oHZA_cyu@1UHEEVlk#$I{i->rzglnov%Yk2Bs5{Px z2){2gXB^|Whrr*T`LM(j6IfMSFt6_CFq==nvSeF1Fs&q+Cf^BS_C>qJloc(Cur55A z(C?w>+|YXp3M`WExnNMfcg=q|`ssY03IxOK1v=_KYZ;-oJ`}j>%JDz_w7%9!FV)34 zol-+?<^NUJWESCUI(^NfxwMHxD$7<}ocZCPemtv;)fvHFQhN0FR3;s9w)>A7yl3wnyG?9Q-br#Zo2Gtj zwLqPKvltfmX6X4k9_7t8f8>^~c%s9Eu{8Q(SA$)PAr4;FkOg;Y)h<)EAm!=;Gm$|lOE;-|Po~bHa zr}#7OwxFc<2fv`)A@W~Wc47*?g-AdO_~I%N(^4^rZOAcw2O1o#fl|1CY`>kFp5N=^ zcrRYKcA?PZS{Y44=uU97d3HhV=R+FKvNmi4B;K3Es}dc=^Q?@zcOSG0RygJt+kJfC z*uudY6DPd|L^DnrolK5%ZoU||aH_n;x&+I6Q2xtX+J7Nkn?P`!{hIj!FJpoG3u02s zM-`Y-zLx4jiJs!_29Qkq#jkRz`APBJ^E4oDQ&fIknMDUklw(`J)O02NJ`$5 zAzvY3`hJe$e=wUMes%fT?2vq(uQ_IEF@0RWnhts61S@{m`K@>`ZZ(bd4w%wDE`O8< zaE>Ee@%v~z4*KDl5@d)QcQnOkKoV3!OXtvPvK#6mgiUZ9o|y&QUku<7C+`Ykh@Ru5 zOOrG{8DOn5V%kTz7oyR)q$j%D>$LFoO?VbPQ{4`!JAd~&Cqe#O=kr!BqlabO?g3@m zEXVRfa+`7i|4w=*btVx+45<$%gWA+#%&*7B>rkuzcg&|zan`0kAA1yaY<91G>S6g#44NbHj*?z(;%+$U@!l=p8UXnkC)uE}^&^|$Q zB7YT)>z<{Jc;8g7c1F#y8S(A<=(spp+DPiqep%)H4rhCkquOhDh`aB-`(nCmdN?I{ zQrcXrhfCF8Cw(GP%ToZAmkObN<)k(%%P1&1HN$mWM^1ui(lk?SFJrx<;c5nOi*dRe zWjiQfI*aW_;f$k%ja>E zdtb>Pxx?h(=Y~Y;>I_&K<)J$g%$noI9WaZ3lw(+1h3U?IYRO3oqmt+AKNL0b%bTPRD;MDoU{c9&@iO5Y_%qBpB)Vx^9|% zbc{)#SxZcn2{2aC3xp%{RYZ61R*`!H8XD=v_Mc~};v;nu4Tx%Mi-yl|z>eC3m|6r1 z>Dpwb;3B^CUFm~2V3W+R+_;U_azNBNrUoU7Fe@f`NVn$0=eTd6RFJ}$io>MvS z9&D-)N!b61{jx4!ZHK=+NAtn}{utJ1N3 zbq`F~G>Z?9?XQpf#a!f|;6$@cozzB5|} zv8>?76v17iq)dVjsTKphNk&PovPb-~{``j{OG9Gr$}kjm{=9=*J<;(j?~%E9Szy5Z zc8uuaAxrhJ9xU}n<3>B1d{k?sng8nC#=C_5Wax=cs*x48uFx8nK*w-d8B0npYJ zGKq(k0|G2(2kglEj&;DwDuJWO%fk=ut0RFWyPBuUEDI?-`4VGJPSf!tZm5U{*cAZJ zq3gGq%D?r9+7A!4_!GU|P zU@y;FfZh5LTs{KNbyGT}D>859-_s6JKWFQu{j604!n=>I<8=gvQ}AyiD`8M=b1)g> zn34}WB34_z;YNRy2A%q8`EtV=rdd~o`t)Egr{~DRk~fwTcp7X9rn=*x+0=z=6eoYTdYm}3Y^r3bG zcwTUAAln?!&e2mJnEQcnSNRW9X0{#}24VCnigB-fNNNoeCbzA^&)N|bJpIuJqRx^+ zw?Hl3qpL`;SCsaU_?Y%&M?H`AWJu$=6KYz`c@)#A4S-_oyD#UM?YA3Km7PC|2Rzo6 zG8q1`F`_z3Yz!Io72}3Qhvyzcb~@)im@yYb*8F>`P#;2Yq7s$r8hLYG&6snJeheC* z)W7y&Nj}V3tr`6NW0f+|Dw~+;YPi5-z94letJ}3G%=v1AAZ1pBQlZ_McEIq>X zp-}I^_e7hlRYk?a-H5Rg5Qp#9acr{7uiamwFuc(Cf=Lk?o7yH_obcSk z6HZ3)JilOOie~AZoDV(RnFa9ustv_V|CVn4*^?wJd~f`{kDiZ%p=9J7v9qKX0ja%S zt1F{`Uz!Ldj~T`=146b&$`RE9p5r;J{bkuXe2G?c2;3br(=^%wFLCfYFDiM+Y^qI9 z{TkC6xas<@HaU6ur6%d+iQm4b?`+3)>HbClE8?M5oL8MrUH!Sjs%H?{wHwm#&M&~s zxutMqyppr~O?r=*CyS|Pz<)jCsXrrk*ZsE(+?ky(%45Dh$I2h^*YiZ#*>wJ_l=e0L z&C|vNZF|Y&&Hl>eIDC|={#0wfaeve0{Ki!bByY5pJ!G;tx9c)W{ zOcc|#(5>$}{>zkTb6`+DqIDah9>o>G85(+T`vX^1m}@V=IWfP+)x$C%JyNl%Xzszh z4p^gBMVWZ9MLK1SdU?`?;qEwAQN^uNATB1;dEy&J%vw^t#;uAwA9i2fGdK?2X{hJJ zJIy^kK@Mj;u!SA^o}GwXvhBaW(THS z#~-tq^2Ru7ZZa6<&gkiEuG7FTew5tVFsPT&5zBVSq^jtJ%zJlk^z(6PB>bz-inv33 z)*ZDYPIibkzCYeyW85xt7UmD-4(pbQFym4&o{WD8apICK-;BH!_m4C1oT;^y;i4t6 zfPH9^>w$oNCgJo|ZgkvXjDjWVcI|Z!H5l(0t0i|rnn=FNA%BT;P+?-BWo%gU-{NIy zBhk4{^}OisUoW2NWgS@q7GoLSXf(;Js+id|u&%I<_6vR4rzCxtCB=guP2*T;i{rl1 z?By()?U2l%HC@L!$nPNP2a!6fbA0LI6pUYsdii4`vEb%z#b|Lw$ zAHNHXZ}XMMAWY$_-;-3V>N|; zm>&V7cVLp6)qFIImW@B8T&QsxVB3td>cD}N?M{d7+W)8-4w(?|EaK-zwduFG9WaTt4UNLrIb^h5jYsAi2+I*{3H4syun`QUV%=Dxp{6L_&PvWV=X zC;JU7j3AnNBeSfrry-Mrc9MIRvLh`U114cob!{cUzUf*_u0tc^OgBNtUsM~L?mlvD zD^y`_sV)k#g1=q%hcowl9o<$NV}kK!Bp1hV>%!y^!wQ+*8@qM?aEaf@HQn*XGejoH zUFUU{#+e^tcHRGVOs7?xY~F&~aA^O_V|)&L)YYm`c-=KYUoKOlzn(9=V4IyFvhKDK zrWKTkutCHfUfb=LI#O8t%5pPg80v2HS?viaJ_hBzmog{kP8+MZx{=Xt1T%NCKb}|2 zF(c({Yo%-aVGe(mA7eDZD{?xV(19q~Jl~~GZPXT(+7-6Ch#VCu-~!-7$*kbg$PlQp zYNMgtdDmnyl;VrWwo&Vr&@N1NCNL{rWeaRh^D1%g%qLqCFT>7$>r8R*NSgM=f%!N& z7P47kK85A>1ioyAJ{rftQ4L4k<&1)P_Pv|Ff0$}1!!&|a0Ou_8Z`d$QPs1BYfj*Dc zhK%9uNx0wV@40^70LQ5l0AZ%_&_5C9%#rS(#vGjRzuOb4rxNp36WMn-Js{l^A|Gmd=)9RG; zeHQP737x00y4`d6KDaN7LUb<=I;%DN2BUjFR!Jmbsi@%br=IiQbW}CDeminFM{4r@ z33E<+@SRfaF2(mgU=l1TXk(53oUgZI&+RL4b=|>+(Us%==>6BQ?N8I55R+F2>#C^Y z>=TMeUh{&%={&Oi=IQrur!{EwCa?E|5cy5bjNUn>rYe`FU>nP))eS12zlkdQXXsXw zqLMg038b(x)qze)*=p!fp z8Or``7*ks!i@D43!mlGPO+YqQM?~JE;O!={WOTWL70}MF7o~jYrmhlXM00TdJ0D__ zUcMN@{Lq0vZM8^v{F`GY0RGj6``**mycyZb7Ym7O%!p-!Yw^2E2E9AmygFVHiSa0# zokO%tRr(H6M)fL$3b4id0SNWfYSLibk1l#}dg`}F>Q-vvg zp)4#S5DfIy(=W1u<>RaVtVSlz1`{El1gBy4MH0pzK&VZyWlb$2(J!8uY9aNakyBx@ z?@xu*flu~oni%J5yWe2i2L?3TBu9L^CWhpv(-XA(A@Gc@rAQ$V*sCZ#mlY&X=>9>t z_=>>onvjw?2hw`r8iy!$gm!#ZQzJt}XN)UBA#6w^7WUE)K=`!B$&qR(XcBkXCui3M zZZ>27?H{Ynd;($G1SBW8_e;_zFohkq9P(t#lgd-#Vd2;;%A@KR}~_V6M9&Lt~&EeyjKU93K0e=q)=N|N{U<(AB;_VjCW^^wz*OY?b(N`@zGwVA5; z;y?+<^!`^Dvu8#}vt~d(@AA8ik^z4~blrQSz=9lU&64qRF13s!SANm!u1_4c>ciLW+_N|0C8ly3STF*Ke$Enq4snTs5HkX6rGIdRRCbiEG zt6QMsm2V%2x(WiTQ(tx4>z~?f^^-^{EJAgH196xMb&PJ;iH@+IBdaZUrz8Hy)mcVG z6}Rttf{<2eNu{Mhx&{>xNkO`0=%Kp@5D|d^X{13wq+5Dukd_>}ySq8V`;T?bS^ERu zSc}-RfBSi^`?{a|X5X7|J5@WpOI}UuaQ@HB09!#T;~M}VsQPIz1CJ)TqL(4XCvOfw z+t2qG+1j?TG|=c%9L8VlpesdPE@HS~mC4aLO;=R>Q2r@4b?CmsZyhqWyn2Z&iII1;>7~U&D4?3TM{V_b8 zVS&k*d%coUiciK47?EOtDC0<|OX<{(Q6S`B4~gjsT@_!$hrz8@G`yK{bf{|Ups8T{ z??Es<#YGo^O%4NI93atY67l#e!88?reQJ+auc`{tLE+u7r>d0!@U3ienkl<1=|Twd}w=UF+6S^qbii_bn4fQ&X3;^in? z$-)lfggT#(sH~g3gVdSq_WN1(CcspW@>)#Mo6M4%_je{S_B1X>EQk02woaQ7XTQ7) zRp9G&_>85Hk2$?i(_u++bo_7n`Z^zTNw))$sqjH-efoMYn;#l|?gL{&exKqd<<0ZH z3BTwfcM)ECm%`aur2zg~WPlpqBi5S0ON#!;0K~E6WT*W)*V*euReKLM? z9FPC7{it(<_h(wbijQZRQpz}8LQ+raS6+rX@ql`GVam!DU3*mZOd{a~8FG9hD2739 zvqljft#4U9?PtWT8}{s(fV?v7IjF_bK^C@-W13lDC9dTsT;nk}(IoE_V#?2aV4KhusBQdw7!3AwnD(R!<&my?#lW)628v}E*byum;!~C^c zvopt2DI5B?M+WF^^nYIt^y}E|_N!=SB2^eL2}Xo=?C3A$eM{Q016GInw+noF*7U@G z!i)vcCqJ)%b&%}wYb#yze%zud`C?GeMeqDF{ZzDIfxvh7cg0Olv0o{LgCL7uMk*(swmb!zLSJ2iGLv-goc)?Q3=f#=o)Y3C2kRp>7Uvy7Y>L`RN?MY92x1Dl)( zP+_yodwn=YhbHFh>J4mtSv2h}bBO^FEKu9)`(T=k-eqGd*cn)fU*?Q$tHJxchdDeh z;H>NHIxb4gsF%!UkoSr3VAmyWnjBE^@c`t-=g&`uu2#AIMFL6*gAz8q9ezYuR*jOK z0PcQffxS;V+U0uSaEuyUET>POP9H~2YW-4jnQ}1FTF<0Qm@6srpKHYpKcgmJF13q! z=BKU07taN$RavI*eZ?p4yHtZSkLs)@`yL=eGFa0e@+9uVLZ6VbLRAz_!BFw@4&f7; zu%^?HA&ot@|GoSE>n@zsCp;j(y!rmbX4-EDKQ{b@AYi=ihLf@t!E%pXT?;jQAFEo~ zwZH1FwM^@OJYi-5v>ZfNPIuZ~CinKA{%jB>G+>2+wF}Ovwyhh6S0#BXXCo0*7+K#Y zdNkB??M^*y{cR>bfl)8AmpsH&%tRj+F9>S-3J0EQt=z(FWn(qOy@Pl-&GbN`d5DSj z(`t@fndpfZS1Yxp_+hRNWEQ5qXK}mA=?jLwyFD*9IBfl*9)9UaemN?D7|;^q>79zj39*leG+lpxdB*eVn+gZ5yU;u^rcwqTa0yob=6_ z_}0u}iO7;%Om!?Pr(&-gJL|llQi7tM&^S^w@f$f?2t*t<{FD8kJK%r6BC~{%_wH;E z{zF~;vE`X|2%JZrmWw`lhu!b8#mWh9Aws42YXlrua?BH*Xj4DSnJkP=?Evxt-jQ;9 zCh>3>7rx~*P6(Ge%!Klk`2a25<9%#D*>gsDZWc3$EPQmJ6eE~aJF@8txkupU zXkU%56RYCgoH<8Wbd((P%gi22}s*2|yOH)|Uh&5`{KyX>0p8Yq8cEn+*2%y0Az-`*MA-bFNB zjhaI@-K;t!E?U`K6yb^%CyNBS45p};u|*tQkh+Q}ii9F7x8p1ix%hyJxz6szw_pan z2gv3By^ze#@aEm6`ImDV$a_RsB-i)$yq}nk1pX8b?9Z_+w>jRdkNi!Ei(+Vs`o7wc zkC`zG-K0>i|AYUI-H@0Z6x{h$q$goV>QIsr{4(^o%hKD!{(<$V5P`5Pd_ufBxqHEl z>D~u~y_e;rG?z2cpHo_C6oZeJ@e(G6aky~f#iH;SM0q48k|bx*vs$aJ@`S{e?)N&} zgngJpcW48rf?jLU&mS8Grc?tAk>Y%2O`wnH2x_N&9zx$7F&VkfmJ0)rP}%@<)kbB*5WScQ(~fx=&M>OOtj-&(c> zly5|H+Q@{K4aIYQ1onBgkQu(zjIFfko85nwO=Ro49tA>(WI$KL&>A*w_^IuA{5&r9 z|9=SvgKIA`2p_-G{q9-t)$`)-Q|AY}Cy8@+Vcg>-4<32e??v@&;;b%+HlSlsqC?*c ztFAqHd5uF5KerZF55K8XZDkcJVQI-ue(Hpwp*?Ur^8O-{#prr!;;A9YpsP;^haLq^ zAD*&WP1^_eZWhr6-ZcA`Wi)pCW9H#~%CM~jI@W|XL*p>$y&C8He3$AqXbc~JtCZ+d z2d^--mx#im?Ru*1TZ(IPT2ll#9%?Do!*R2$s9Be(wmmg;(c(8MXzst#n?}g~L3j3z zY+*0fSZ@&gwn+0_TS2cTRp3t4YkR-2$aCIICt{XbVs=i0f!Uhx6=O*d<%{xF4y8Y< zoeQCetJ#3z<|FpzXw1mNufVd19%uOPt1a(oU6|D2(W@@tB=aeHDx6ouYN!VIv)YXo zEtzc!(bpb-RzY|6G4fMOu?sP~+XrC+mMQ(XK;6YhnO8OV21x$81JIBWzDC@e)%}MP z`2T&Swg(&pGo+Ok>;~d-B?`|!DCiW$1|Nw0P(_YA=l{@95He0rVtcw2+A0${7|^n7 z$(IvG}RhYk}cWVScu;%q~-wHn>2cc z{*R45ppsKgLsMok=wc&^Za^YLVPSsU_f;vK!iXPZnHDSZ%=zlJ?MatOE|U- z_@e1<_WCcf(&;9tmhokuB`hAt){`ZPnbU`i{0%7I^{<#53TM_PoHj3 zP6zK7GhaVh9+o}in1xx_7xf=an>*5<2+6s2YwrookFCEr8nsZiv38$rvR@wN(A^gQ zp)tQ-aWOaEDPFlYwb~UiYrZE^yMGaT3_U+7GqSBN?T>f!#S*c!VVSr;n!WZ5eoJA` zk&yU&?Nc1X-T$}%7OoTA1xYs1Bo=^4uLA{wJQX3HczAS%QuE9U6Mobbp6Nza@({~A z^Kd0C@i4BB(0cEbvGG1}`{$~>-AX(;i4GE^pLEw&BDy{spgQpJo)MGmy;jMOSw|Yf zHF;hJ#SaM&el3kKD2xt1##q>Xkzt5sZX8Xp&3F}0=Ep44fl!S8HP6EC!j)E-$TCQG^LBia=HQ3KL{nSpsXlMBsHUQ9eegKmBoh;M9b3&ff zPchi^1;?6|@|RL*{PU-uH;_VgLKyfqJJIJ5GZoF)4of%HJ!3{lP*yw!tlX8{UzNNS zGc11TfNMB#RcYL^AjGNPSJ!;gG)GPEUylEpoD8#>ZsZS!#2R#>@Ts3%ahhawW@5#*>&45!Q9#1)=Ien^c`BIu-7&Mg%_j=Y zv1>OPFzH-|oAj&6On%FvGt=?;&H#J96~pt%&7k<=qitH#EINbzP{}`C$6tn?*?yEd zO`YdeAS*ZzzZ9BgrZnw7o>ic&KX}~y30oK8hg2Gv=i4u<(3qw=zaEgezvyu3SQ2lZ zkeJeUX%VVZq8n;mb3YX$SJIn5>!*UCKz(PcGgsr0o?L4)8tU&^mYHPH3UvRNwI+>` zk7-82JVe-i{x=?Y`Url96M%R}@9?Dn&*S}#fPJJMbhO$;=Z$56j3NEDXANq4Q{)sz+b-lIfD~_%#lgZ*QAi;9OKjvJg46`p!on&vnG9cb1H7cF5 z$?F@WS}4a=h$)g#iw@L;8w_Ind*Xsf?}-@6^ChJ8tL?$8>96rT1yla}t@WU$# zERFOxh75oSsd1hGfgwu@3<2MxJPL+eg(;GQIzDCjUncz zPkQV-CmLpQep!4Ntf(h@l{_E+q~yvc*?)_^x~8AqOLT(DE@n`u7Wv2y7n2t z-gKV%-^uKI5hAb7#T>e*W4*uZ!)sdTDvyH0N!O0?ElN4a)feVH-rW zpl*F>do8cgu4ZZ?^Qy(U1*0L}ZQABz{~wU!>g!oKyX&*EBBi6ryzoodpl!L7?Q#It z8R*wk@cz`2sg}Y_9_Q`~qU96IO?>wElHT!Qv+CqxjP8wJoPI~+CuWtkZUrv*m(wY* zCxgxg(NutOSe4(Fo^Ogz0yuLzZJ*X>-0h&C$nUa940}VWUY#y)k273rWHKwnmokWU zcUYuuafI6B!{AcWrBI%VZR3-4FO~lBkLeth_+xD69h&xTAN|oN*d!r!hPn%wu#NkA z7i|u7*$K#|y}TUw;<^J!a>oGH58NR?Y_K=J$}2m4&msDrVkN!SddI(Zpq7P?chflfgOUPnkdH}-PW(FyA`HO^l zcF81_BP^_^id|q2?|e5UR#R!I!sh?#9RY2gPIH zz(D~=`M7f$mZ+!MU?V^!$)p5AU;%$ABB2;(Mz|6mPXB)1?`kAn*$-_vm8i<6OVKu6 zJic5{SAiE*i-YL^ZU2^T`yr41wnho}p8hTJzek;Lb1FH>RSqs%+{l)hR9(&0JYd8)Zn{1_Hmyr*k>inFf5JDG2&pik%DK)LRfs zC`#uN7eahnNGbZYO*6fC{mWuzG0ytg@r{3;sv8b6>lG-Z59Y+V0ym%Bh}P zP2V|f|BB1BU~Z3wovh|HrmIoSA^vb*W+yn}_YT#wLhU0hD)rWXdqK3(L1s9e85JQa zIV=|0-Xjlb>}@W?Ra#R zMWOI2n6v-XVqAO0a>XBarU%A`PEgqHFW)21S9}y!f_=tTnT_gi_3E-yOS?{j@O>@4 zKBiWD+59YX%f;7`XL8Mif$H3SC$@{vwlYuncra%oImQR4|H{UF$Z@|x3sv&8H5Pq9xFWvHcDIWTbqC#zmNnM<#7dq2 z@KDeIpv5iLqXbGACU&@N$(mUy+nURFVU8tE&(C+dsyIR$pyjQF&(A`{GV- zS-*tFgO%KW;HFjJf!@{;{3@f8K0|2eD)Wn-Vf!CoXK!i{b55^%^YzS4n;1)Gtizf+ z7@Q+T;HjqAB&R@isCMY(7sJwo(@S`2Hn&J}Cc)`;vsGAXQjX~!k?2$25&tdQj`Nd& zKoTl6jaXX<>ZZ*uHR9_wo!cDA<<0HlEjHK_U>RCurnPCaUz0q2Ex6?E>}l7e9;t3X zpjOqE9(sILX%fC6iaj4DNz)7cez|aB#!l9Fe?WB4(?4veKfHErS)lM-FYOd<*2$X{ zhLO~DSDtZ}_kOWnLYvjz>J7VJLBG4J<6ZKv)t1?Aq1m)&1JQX~w_8iz8p^+$C+-Ck zqV|hVx6i#b=RJ$(WJm5%Z;yr<#T@CV5-Yy@u?TV9l^fVc;|zt>Kw}YqeEi3CEC|Yp zr9)J%)Lm)Bx+MyF$D>J1*alObZIZhS&E9oK$cwekd}Y0avmvXLaI>n>{Rq+1{epALu#~qn z=}p;Gdlo_)U?DsWg{h(+R-N1+hply}2t;*yaczp3x&x2wL|5 zYE1Kwy0c;Y7^+DK?Rt$=-{t1Yl-fK2E3G07JhBVh;t0<3Kww#-_OZD&liqgYIT zq!z)V3m8%Am=wEf*&;XQ>;U^jGQR|DLg6-BW)|wxT zHtT<7-EsKJVC%6p9lEPagVT%pW$TR;J36cT9k)Ua4pk5Rk5=D_g?YGP1A~0%1V8sn zpi}8Fz6r6!WqJrhH-aK)oW51>I)~xP)Dg|B17qYLLBLdQ-RgB~fYKVE(JbM?iTO$h z$<(KClDA@p&xtn142#f~b;B;6rHs zPtgvYreE24#9a7{=PrfvwY&$;NwGn_T88)vr^^QR+0@a*M^;47U$f{7w3|J8pjKeG z)=8$G*XjpcB)~)Pvn*lKGOlo>CFakE=&XFG2xv+Om^I>jJ3r1TO_?($l+^LiB%fu( z{HrPPZA7M^52yx6fP`Xh;4)(V`Mvy?bdUs}0&m&$sgU1?A*N*#)G3hPQ9L<-X?y38Iq{-?@A+Ddp(kvp%wq8cwtuR{#c4m^j&gLzm7w3F)u4n z6=AdND<|0b?ELW8R7(6t0?xEdiJ6ZW`*bge^z4zb>GP1i-$w7#z|_w}&h`49PJUN# zgM2|Z4i7V}rO!A|4yfT`J5d|I*{tquEmx3@-S59Av%}mVt7&Y=QgLjS8M8?Dbz6!+ z`cT_O$sswUkO=De{&G&4FV_xDqv(mi5uvsdj*scK;6ljY<}EJS4v{i{-!7}^bTS#q zR9X>%al$Oy63H=!RP>F4TNKm@?Fh?(;@+8({>f7LHh3X+?k@>Db+`5A8miVE1z#QM zIm=M(RMNsuuLOnd!E;U*Teb%@PmR+o-a{zx5$F)=P>M+M;_*juUG&P?E$_Q@!Lk- z7Kpw7tYseNr6v1~z?(`WYSQWosw{%W1X=3c_m%yx5e8Tb|A-SQQe+hWjWtyr{+%(q zGe#fNsvNCn=CQz);hYWx7hM+vKVE%_R-$Ow+&i1LSCPZ%1~3$U?y$#DEd{bV>*Vb5 z4QI+p5aUcKjWXXc->|nLYJ3 znJ!m^DOFPh938|Ed*3DX1Td58QK?8m+2E<($76yBWe7m1(l2Z4&A7PG(FBvwY8Fpa5MfHw`tpa!XS*% z)mQBzHDlLb?jr1N&qIwMi@AaB<$)r*CcjRGtB2Gy2Lm2Kx>b!FOio9+6F^t*1|Q04 zD@)%XFmtMhhivuI3PkJmx5lrnR{GpD0>neU7C9l#*Co$uJ0WI`-XDKWhf*&zwgeBEQ?dbAaJbm8!`QV|x&r>wl=ZaXzB-iIb{a~iXSkSOfpv!Kz zxINdoPbcmA&$z=`1khLZmBsK>O{e~IyPsQr`7U(4nB6%TU-D4Dc!*t0lnHY|et3-8gStY{3BV3De0F^Cg(h6*h6eVCiXbb?W@_*~p9;cKv5T?th<6BY;-} z3KPnI{COZ$eLy>)e8{so%e>u~?5FEjG|qjzI4&50Ud+T4z+*-nn9)m2?Tgn6HYNN`Mf43{$Uoaz5AG&z;l??0r2=`F9AKhY43X< zK#oS8FZSomcD}{~))Wcj!b%Ul0#@6lWt!GSewf*|IDOb`Yvi`3ADkGiR!a^*|w=j@yis6kV z&z%xx5uaaA3hB#R?gbceP5gah2QcX+87YhfO7z|e9lsPpm79?nb3nTPV*iVeUWeBX zC_t%iUw8JA7w2+B`>?N~1q)-&@V==}wubKFvBCQDIzE6GuBSN{M0}X(9!sH>@7TWz z8XdW&c+Xc>Se{a|l8_MI&|zP|Z=}0C4ly5`L$Qia=!>#Pem8C1&tA4PZ+vFN_ma+= z=@N=Rgh~%`#TVQ9mYyCdEh3jhAd)n4U38W2q*mQ z5UIr!R)$TWTQ~@E^)Rqw0q9C6+&RAuXkh+C$a<&4D_|mY_>>0&!(4UiBs-KCFT5s> z!Z^&R+$%(RM`=0VUo#^-B78^CftI!Rc?-IozT)p-u0`yALK_e@^)L!rsO693tc&GK z2E(E{UL<8u8-=|<#x+%2oyAnk5#TwI%}v^eDLfQJ{*t?1y#5s(^|^9No-Jyp9@$*BaiYQC0yJ(pmSV|*qc<4ok9H#w zHd6$$jfF(A%6ti8~6!0B;3QQR>%{ElP^9a{RU zeH`w#LD$fg!kV3?1SgFv*M9hGJ*EEQ&S9brbC;SFZP~8poO(ZpE_072S-hUV_xtgO zngRI)tgl8Z_gIx|IuApdnOyvtsC=!E+HC#7lrwDlC>ne^z4x7xj2k0+Qlq1din8_U z6Y0lXPDu5X7GY5#0R9qQh?7#}Bqc{QL{ho~V{kHqfmn;@#K!00`NI5z&d!)LDR_`q z2e=vbisbTI2*i|eAxm+L>h61oxTqHXGsjvCtYx7s|B=F5m*`)b% zcmz7coWYGPTu%)Pw&xk;-ngEz_H|C|P&JMXm%^)oFm>H(1NPK7bRRwC&`)OZ-<|N| zY@7jYBquM|SL$q`6Xnt1r=T6K%<>-Q8UZ_p7wIG%bd3dVj+TqY>hmw^w@6m{Hv;(HqP?%WNFXq`Y9qn)_Hc?FsT673>f*d{j7QyJS;oBzDc zq-`nVJnQ(6C8?56Io02WNiIHO-|P|C)d1AsiRYETq>#pMgU9SekvSO+ z=GriASt2TJ0L?2ZlDW~$;hrk-BhQe#(UhDPL4GHsT6L!BYG}mny&kKrfH8UUuLmn> z(_*g5PX~bEbnBA$2O(05S8Fp`a)cV>vI%1<&dCd6NY*2ovkW`}G8+SkIWjERs@|>h z0JBcKdGtR~@HI#e!3kV)vLEv&=1VuWK(o8D*DM_Hp|Di^fu-Q0y+H5v8?P>mw39M{ zDONK9>L>yB%T7Z1^#dv)9QMvwsgLJWJ)$&DZyz=Hq^R;4Yw^3S=1&KK*GLbGf}b2e zz0G!aJ8I#D=aEfprlgL$c}4QBdwls!BIjy7&&9r8aIC{A8ecB_uvh&8W=M`k?o_^Z z0LF5~e4Jk}C%xgz)D4v`Db$Y1UFh@FLCPsx`j!)jnvwm1{@r3RNV#Eb~PMT#hqig2ysAyQkF~Re2)7oA%jGlx{THz$-gzal=K#t7w_6z zONc`!P?YtqeL20=uE~GvT{EVsO^Qon)Tk{D?ZWW-Yz$?sQu+ye*A87b>@zbRj17b# z?kVT^F0P}Gq?CO20+$a^J9cMFUP;A7;s+2>%`W9!URj))i#sTN(sv^8V~UtlUOwq#Lr5{esl#1p^^$LjpA^8DX(FTaV|btDVIfN;48 zXJ4RSQKf!*Z)2oIN{okDM$UHjvI2`c?t9no(XYJ+A$NES6FF}e!UV6-xz+xxq`Qv5 zM-nTK%6%)=1P2L?TYZ-9J)oJtR1ST(=|V5R(%{vDVX0=`824mJuttk}=uHOD=)`3F z*4L6oO^fU^x%Dl2Ds8(I*`)Kz_hK<*1h4*VZ#J1vceuVV5Nkl_S+_DBt*ug|oFi=o z&0El;;|^sE%N{XQN4*(hwj2s&k;MLU(u&8;$n#s}KA_A^E0G(Z6S(3Db4s@;ah@$@ z=j}QHHcr$UiG>PsJ2s)$)apy0J>yb@da5to;^#|aJhZNKg?VDZFczt81*0o9(E<2? zyy35{1cQO63Doun4=Z&mtykg2@>17BOP2_NjC;eEc@h*QK| z^5(naX)w3X^MD`a8I&rNx(`{|e+sKG&Lbxgq%D9qYTl@k_{PFASqePaPRf;9kv>JeQM5Ub*aJ z5Jvy-QZ!}uoL^0>TtdXF_O*~iIxR?}XlcY?6`cc5_BpFa-!)^eblq<}&kZ&TGvT{5 zAR`p{RlXX~sT8KXR*m+1u@6VO*_$Qo_ElR&&XRpB#(mqqO>X4FYj!TGiwqOzeB0yU zkk;tA@qh~`@0Y_RtLFzO0rk8rat3&5Kj^|mcxJ^bM!xYf-IG0uNOG(i`4S%SgnIXt zU$KHejRfD%ArnfalhECla*GXvGG0qd_VrF3`)f6V{^IUNmv$o5QKalxk8KXx!kQbY zn-cO!2oFXs$d&vu_jg%IS&roxwOM!am+m~jCC-DyITju$0Lkiho_*ehm;^pa>@|>O z7fW8{pLc|h$f(B$tVk5W$3K6m8!v<}Sd*?VaF!3++HTk2f6RCdBeg6bYEUEpTFNCu zzd+Ek;XWD=?go_Nkw+5kyeigg0uGeyF&3B@AXK)y^UYGW=L}+oOJ|Ej z{=z`%<7d7;D~0!{^jp+_yc8M4r9HLweD&Q0oYVsqe3t->lO%A7GF*B?{aah&of>f>+U>iWELA8RqBYKN1>c1{FpMx-N53Y!}rZGjdG0J{G9=#U+iKy1Iy>?96 z>FP~}sMH%&%cl>c4VXhny`)$_b~9(;$J_%nuX&SOo5}-h@s{6A(K@McIF&fH0HpL! z=q*?#Wx3fa-!ZcHitGKorsmnN=_JbL)`Z=ZXV}HkMs}ofS4yJ#j%Ex-%ev0$44-}T_~{*JsCc`Z1J0$K zjgigewb>l{Ot9Md19##Wy}=W5WM_D4uOWpE{&eY zD25ThORwCL{EA@rQx_!F(UOI)2fFKN9#>O1h2IzreTnaOsLFV8or(Ynnwf%_-dKc_ zIE-b5DeB-+1%BG??yiC}gr)gaV8eP#?zT#DfoI}^#5{QKZqOansj$~j1CXRy-|rF$ zWxaVt4Z?-Zy!14L@b?ekA`0+YN-If8920Hvf(A{^4HYTNnCBO z>hEEHt)pwasw+HPfw1qS$Ou3%r+`~4U~myy7`=TyK%3e$Hq|!V*`kbmBfaM`cw>Hs6qtAuj{FUaBSmWo; z!&gc&dM9=t8Vs-@^;7Q0Uut!!TlWpeD59Cd$~uQGG;Y7WZ2T{r=ujBhdF;zHdT8*# zaK>7a`;~}v^^)(SKN(|LTZrPL79_ceVm`M`qzY-~9p6yayP@l9$6aCr6R@rjM(Th6 zP_qJd|J>+bj|)4Lhrra&*)$9wtUMTfoPQT}a~^67RJ7A?05qFlTwOK&o-yu4D8qKnMlH#-+r_SBkkS2Apq4OxF+ypr$2XdkGGj=)VZBlp1` zT}|%ZOGRi6aG*^SjSOPW!%h@1pE;d;{9^67>rGq0D?QB*0B&7U#R z1V=D2Ou*z$jLge*x6C)yCD*{-orFFUAE!QeA>6_3dT6@Fu8-k_^WrB#Ms9~XH!iq3 zG2w)6vT;DLpF-{le&O;A#R=!u$RrIqOD~_>9Rqr2!XHjkZ(=f%v%3{RuQ~XzyRYr?4LiL4>^l6GVsNm^WR7S5 zF!FM`z)L>=GR;=6Q$J}rXn)IT){xGKTJK@3 zkjS=qY~nOz#FQZYZ@gKj*-Gmg!cqC1v?W-+%=RWI&9b@3Zd9kkVgp(CuLd`5C5?@L zsP#)(g29}IptCK(C1S_1??LZ)0^^e>YaAmL4&~}N^M~gjo>ZG5ANyWyX`$ERy^7H$ zDk`Ing~2u9VAj+(xWC5i8Xu+XHk2+yXq?^{i3F0JRp&y=AfEUlpF$4xa=8ul8Wu^) zP2falSlVHkbTGf zIwYt@a=RLLAHIAQD%n!%XtWOc96rlL?J6#WC6-@`*%?8rbCZ!wCKr+25e^cur2>4)v2W5P>FIqRrBYno6rY#HAab9i#PHUGepi;~ zta0-GXeymHo!lrs(bz6CON&hBy&v@_N~XdK*>I2C2jZ1!n{oM|J58B=bizDL&wBV9 z0W5A|z$z_+gFfo7f?=fvAj|&74+jmQ`>+h&7v5yHmyD*UT2BNm&Q!0(e+*M;hr3MF z!qyS<-soaG##_^gO{(j^#a{9hM)8d&64UbJD55*8LW?;No(KB*G_ogF&*%1IP>B1! zZca)F!v45irW{^rpwlFI%396oE>`7mwzSIoU1KJgT=OSI+oS6GnTkHqa1jeu>KD`Q zz$$7ccmqt=xCkb?z6RWOMrc+Nr%!>e_`2c51BXqUWjN(c2W^KqrZCdA&(_={9>nZ)GybX$ zaKoczR@L((U8t5XGZadz*ivb*#r>hFG+ieywK9oPt-y!;Gi z`ulIXV{E>~V!Onec>r_c`V4S*LkS*KE(eq$Tc6S#7xCH?uo$8_!r#DrZB_8TP27h* z>?GW_)l``+dCC54>KHhHuq{1+Dim^{e?ZB%%nT4p13uMLtzm>9pcj@&KjHXfpw@MO zE18)%DFPC-5M3k}Q_cC}a@5ZC=2m&%;Qc`L1%bnkTKALre8VB!DD#4Fogu@L8m@cn z?yZtQl&460_g#q3-99bD5D8>72OlEMY~gLWyVEyXche9naH7%Yah#Z?xC); zt5xj&9L6y=b+orTUfd1|L`Y3G4b;Yz?1UGLAX(OL^7_Dj$#40!9I14r&G(ab=k4^3 zM@cwFZTBerSM?VPuoq1|7fTamEuF3VJJqR1YZx6Mvd5`3LdvG3H;$PRO~qhHfxnx6 zMsKgPmc~wDSQ*AeRHN6S*yFLRQqzAsa7X;Zp(K# zzF>|l+y46Vf|?%MBl8yY9)63CKu_uq@R<@(KUr~jk11bXc|!+T8MzlJU}jt<18|Bu z6Gy`El~emH{0rJ@V0H&W!@WDR~Nb;Ft+v^`BX*6Ci*y`fMoWD_9>J~#$cDHrwD2x4&j7d$M_GJY(8X=`6!w?*JKxlY0Z}BEUZ|4%4=M>|9=UV!m4s{&Lqz>1(52>soJf~1`#!o`}FAsMJf?B#6XaV}0LDwYZriQVzmV{Z( zL{c{nKN$Mb;rPh=-*{6fT+DY^up&gs{w0~mTl?;ku%2wrpyKoMd^uTEEwgcogsEw)M~ij(Syv&|9TaehohL^LIM_&*!RJn5RJj7f zzs|nc$x(2E&prm|Hkfsd329#Nm=C9&apop2YZ`Q`hL?#jY8@o_mFu`R_K(pW8C1<~ z&5uIvq0Z~*TF8X4T8g#m zL3_39X)7U-MD+%=rH^mj|Jsy%UxYWLkDUH?j>m*fK@mdX;K28Kt$Z=%O{2Vt{C{q z2_&m}a3V!3E(ALFuPAif-c0c;b3q(@K;j`SMw}A!~>R|9-e}8hc?xT7>3TAiyvwv8H!$A`^=+Zf3nefP1run4B5nVLt zI<%LFb)3W?3q9T@Zfq4>NRk;W+H_<|HVmh1MEWyH)}MqC;7A2=VV91t&NaCXz3cRO zRwJ6)8PR_z=Fdr%y)`J!pOH!(V|(^1-|lv<_?~EfG>iDsP5I)%<^T;-vIv@#xG6^l zU>#Z11nuyibdCGnel(iDGT$w*-*ezj^HkE4*|2O7DiS9=YGF31N5_@a(TZK`C9d z0Hf=HbjPnxCiRA!mTcSo0WIjMd++6g!goXjnYp2hVGSTa?2D2OM1_s)#bj%92TB`% z0rZL(B30=@d4F`XBByYX*;_nL08A*70D?L6k1_#1l}Z28_@fv|dT{0QHPi+!1<_a- z9)I=7rI^$rt?ip8C0yeQgSz}i@f}E>622k8D}tx`C8exA=;|BVw(rruNczhVT`w6@ zo#nLCxJc=+V&8%ZKus@W7F2%+IKmuP9l%bQRUXh)XIA^)ru=8~pubH41Cy@`ZM;Qj zbeW4*rm6H%Y)4?>Ewyf1{IWaD`W^z%L!_p7Z{6$1nrCI?}01;7UGvX7B^x19=M zyf7ko8;Ee$f*+u5`1jFg2Op4LN9SKdO*)DFrfa*UX1CwuV%UIAg-T6Z-2}&dy%1k) zYa+I*2qDW+@)cnl@~JF9{A5f*6rR_KYFzp$d1q!cS7zH8 zI9Fzorp37tNLA%KiEb%eR(y=ekB?@pxwCPrf{09_KO6RS ze&hSOxbRx8WNw;m!={LlPBpJl`~W){I}CRfK}v?n1XMzLIEKge>seVU*dE;N`t={e^;RD8XNG zm*#8X=5j0`8(jTdGf07$659|9`5lw?fx{;`{(U_t1SdMv9F!e(>uV=z4 z4hG2z@L50wMCNKqMz`9~yGN7sC+Sx(2=KicHbTcuAGP`30w5A-d_I*sUy~umXS^99 z$dNWAf~0s1Aj0qov?TX_Tn*m^{EW5|>>qau<115dZG><&gVBGJO@qEVKA?JCP zSVVhktirjm5LZ+SAUIzr>qr+?ArUr}#(GTu&V4}0PxKnE)V+r(;b$47ZG5!YYJ&e% zt3^FY3l`Ur$?QpYJZ$?2#yD+a2Q??Oj_yR%%_+Hz>8R@ zwt#CJiG^aQ2`DNN^r}DgL`%#?IkyfI0KmUg8}&WxARpnvM`}!(HZVm_xph*b1`ntC z;kJAgRc{D)Z|D=!)G?EmJjr-ch~({0+Fpl9mg4{z>g*iV(!0e{KS@cxnq|wL^(f1z z-h6RgO7nhWDXOVmyWV~HSb`GxC|UsGIeC$hOxGUyVe)2cf92^P_cj6{##$4wKJu*_ zAsSzd^9|3EBCAQw#GgD*zU*>F-x;{x5MGS@wfdDF$&FWUF4#kQ=14E2PEC2vcv!uPV} z0FUqJlZ|1OE#rDSswoOIE|gRjy~l0CSyC6sgz5$E#pjmqg;efMoH;?LP3eivpB>t- zmZhe%ieyM`38qc^A}l6LwvPKM*6O+a4#RrxAJJv$=G_>eUB{&!ghN`AL7(yb;6H(K zG~Ja#kqPBZ)QL2k0z%b{Nu0uHZ4B2wgi7fIGY zF3Pldkqn(|{g|FpOB>Fk!tqmw=R-?(1|zmDd;}Q1NG9dyx<4)qT?5CYV*qcef6N0o z9}eW1I}FOa>trSR<$(YW>P3$)q8=+KP_VUeHsTs?=)n?&*`A0IKsrUt`pZ-y6q#C4 zBDLN0E7*NoZRqK8+4~^v- z<-G&!d4OH7E@)C8oqUN28sX4vDfA9d)rfj51^S8l0SbrE6dZ$;>L4}nd@`Xr=>MVW zt)rrP1*Aqm0a1Yghwg5akdzo;00-%=yT8AC z&pqd?wf~sKn#DY8@BKWVe!XvTkaMpAD3#`-c zibcHX#Pqv^^HRn(gdhG2KDAigL9|>)|Hto(kzYhzV&*Q84hk~nxvI68yUHtIw9Oa( zH*N8=@9m=<7qNPI52#rB`Jy{BvQe@a`tzQJ z*ujqEs7JRsnkTHzV1X@%Kmm`EhOk!Hreo69T2U4TY^2*H$6E=Sr&)UFg?gU$%XFby zQHzr4VYU_jP8@6y-Sq;|JxDg9B5!mG9YQI>w9WzRt>b~)$I1tFTX>v>_EVcPV%nWO zw>jOzBO61(H=hLB4?pbGh)uakIv1rh#|S=-X3c9(M)Ti_y1l$9T2=mBxfd6e1C^y4cmxuNQQPaQ7n*Wt>InHAXMiA0Av#S(xTSFCPuIf6r{Lkc(-izd%NE%$S7C2hWM`_V10q z1##3L?Q|ue{V>G#O`X3deoVBBW#%XLSA4URFLKUeS8Kf80 zJb%sSRN8Wg9~PcT;-)#zKO7m04Z!)SFIu%Mh4CCC7T7_#Xj%U_vD0XA3~(+zZl(E7 zg3VLs^kR(mq2hk^>r!3#&nLwm1O^0Td-N0Pxdc40(%xcSTM$^uq}6mx!f@xa(67_< z73_i1@wR^tJ2Uv-QyGS`2 zsA2Qixb*HmGggfIqkBC=OKVG45s2wNT+}=jl3{Ln3C$JL$fE6HiusdQA7^ScRFSphh z`3?frfrt6`wTaspQA%eQUzRf@&So$=Mqc4b0)h5NJGjw)2Ye}x}|k zm-b>*d9Uv`-KL}4KBES(bQg_&H}QtVbK{hZw{EjQltw`>z@0$DPfzSP6119-hRUwZ zb|iq9c!d1?3k)Ns z!HX`YAaVaKInP4!qvL9c3pNF{2eo7B-{EnNz7?MnUk02e73Z#vSTyJ9jqTBJl{bIk zOMB6K%y&f+xUBWM`5rGvLwq(oEdi<(hiXiW+o<4XavZC+_Gg%fST~96P{9~Q8+8ai zHiqMXVTSu(=}@dJe5!&sO=35y9jTu{-*lM=B3v8#>nLG8A1*!WYLPA0dFHdEy2xJ?0%Vh7g7XI?2h9op&%8%4D@} z>q+qTC+9rQtIKccCy9+{BAb~5g^Mtnq7!Qz`N));BLce0t{BKyxiylNbbtGzDou@2y4UAp($i_i zpBFrAB9fa{+iBEx5Hfx21hvJ2dFErkp(NxMS2ta;SkK??u&gv150v89PB0Ql<78yY zTw`|ydch@TcXL*E;Gv}W>c26xJ?gHE!cSu&GZNBCR_p0cj{$(M&PnB%_QLxZ?h|gS zG>yfR!hIu^->s*KVLvcutOSJ*Tfn^nJ zKrIih`)}cc@Zw7=vkG;^d~+o=E9ZqUc=v-c7R<~BLJW!|oHM=A-JVjB0!x|Cbj$5G zl}1^>oXp(lCzKf4FtrD>U8x;sF| znTkK^?#m%!bl1EsO#GJ<&%ue&v@zex@ewMWSFziA!=R8JH;0=$y&6DO0M|a1n^53Z z^R|4_y(fF>Vhxy&D00$&U*#QQ%v1oYh@b(Y>#tIH^D5`i&Mw^i-th(W-jrj97;RnX zBA)`<061Vf3_**el2#o6-YuHMr}%t-DYww5`oLyFhsvV5QTSOBa-b?mEGv}@l}tGt zOJ)D1tcKBVqEufw$KI&(+quNoLbq5}jn&mfI6o;P5;od&M&wzArMZrlTN<-Wp9sO( zndoJBW?O&(56EPP3ZmZClh9=i_pWb~jRsLg_=TMn^^!Df&y1X}#f(~KB(MV-37rH~ z*yK^cEX8JVX)p-5=@x2t>BW$oith;{w4dd%Pe}$hrm=_CxMB$%mE$A zbeO&wo$ZJj26qMrTuEUV7Vcq13pl#Ui1Nm; zx2Vt_;l!vO;0)THx@gNU#!A6o;H<~+&<+ZUn;d1M>p5twW9qG@*R09@Sz?+5&)=4`H6=#U zsl%)YP$P&dGjvz#wo4?Pt>*lHu>jy4Fb$5SH{KY9o5eq#uL84VwC9P;miT@O6$z58 z$+;)PRG%ca)%;iz+OnQHv9Lzm&TUPrP{GER9WXe%Ea4c8aO%wE+63oiDD|;(?7+i{ zx5DSd9SPcOnf|<;xN!Df5~aQYT>CBNrIoXS`wZj!P)r$nuN~3$jI`InXYxKHZ^#OI z6+#SQ9Q0!)+e#;YKEMBX@-%Aj$4&=BxHx7T(TbB*YC562n{T%?merN&UY+hXX5?n; z(bc8Gw$y&}@~Bh;*~iE(K^YDKrEt(!mGd;k49V?he)du8ai+1Vr&&KG@23QgxAXS| z7I8T9E-o=6a00DwjqdZC6QhoI$AaO*v7g+J6hNIPzAKP(I=e6z1>|(L`9)YU;?7mr z-}T!~?8n6l#SpbbLDwm5!cveqf>B8awUaG5j<=`%7PcBC<-JBOi~b<$rA#R=u`1`q z8KDuOTA&Z^F>hE{c;;3ep{HXtrC7`)UJui7E_h0F?e6hEP7)o^NB!4=22vM66ExCO zef4JiuhqO?9iCzrW3R}e<%slu6>;eZ!EYX{c+&4M3DHN(Y_$)(0r9o*iLpt~_~;W0!bbC7|CjN7;~o99@^{h?F30 zRj7VJO6KZF8{qSpUHT}3^j9W_960&N~A(JBs|v;JNS)=?3}s6AdGbF6@(L% zB(aGTj0t|=G2S#`P2#~c6=7cw$<$5&W9DOjIHf!I;ebBMhNF*i*EC2ysnn-R3a6j6<#zh;aBR@wjPz4mQGutu?SXdsr8wIsw6g{d76TANMOdgV!!)+tA8eLtK z()F8S&gW>{q^=}b{?SD4|0Fb^?uhMVR-u}9;kSFK+oK|JRNaq%cyRdiwPW#`$e}V* zr+ub*tGX zQx3&X%{L0%3J6_Bj!DyW$7VycQRB3xA7-ldNIooZ!V@%TlnP(J*2DdWX#-Bw5i~j< z!c$~nIqG~#UOzG_c0cERPrJYe^mhB$OXJOZpO5MN(j2fiW}n!b7MGj2_`!;JdX_9C ze!};*VHVXa_sI(&S&eAsM%`oLN!u|BkbLSf{9w9_C7MCsWoVh$vzx=$MV{XC96Jji zCcT8Io!e;xQpdpOc(IR%(s+hh_RU9nmNuRMo6Z;7af1ViI*FdxZ^N}@KxjTDg?c;{ zO^tdQIbwG9h9eeJV@{eLIFR#GW5O6cpAc$EU%q*9hXop`?P5_99?X^y5)Hmh+fOz3T~7>GZpD)DCk*q!x~02P0}U+*eW*l#9M8v zv-DBvzy|Mt90@Sfa+OrqlqaaVbNyGGy)5a-QqAJ>Ufno7=-NrMoQ{VGw@!(GH}DCL zK~Kd~hO#%LSRq6RixBd!>JF5*!hcB~u+tGSk3_508b)=Iq@ z;~nt)9dN^o8sl%#Y27<1`}0+QKb{CKp&OR_TEoVv9KVzUB5E#mQFIgg2+lKXTcE4T zprpn`4?SI74N=bpy_z6*DHwVcQ(sJY>4=O0jXv`6ze_uqBC6#;szM}BeTVW$Be&-- zGUWz0ExK3~c^_FXDH1 zcs<4tBhFfP_4@=QfzV+o6z+IoP(F<=_fTLh*#C(^cJacm_2q8q zAyXc0zBm)tie10k)&GvsimS`zOZzQh?K?}PrFiCXX9t_e=aXUJGDpK zQaB>xAK^K@xsFRVP=RjaI~B^}7Q3UyV2b$_97Z>%po2d}+blr0+sBrH$q0ebKdT|> zQMYSlFTRRZFo-CSx1WxwzngM{Q~3w}v%GX;Jkux?)$dMUy94j%gcITg$>R03(A1(Q z*6nKT9N*UJMqc*b!($^bC1pH65cyJDxt?Pf%1@A++1YF%F9iy-0EYJ) zNI=ZuPUQnPSzot*OB9hGjm(zWRx6!tq<;;Xf%Tic!P4T-?}&!h%BXM>d#L~L zN5~rN&+77n%o&`5mSzci+_GW?QY!3&vwI#YIr{c(@C73j( zs!dt5{n9`G;<12J1xoO`;1nQ z*IKE8epMf|W8Vs#W>6!T=DmegwW%8Ndm{$%wzTWs(Qw4%DH%fkUXQ1jmPIHkxnB}4 zs$_pPQV^8ER?2G?)Wn1WebC6_f6`hp!nJ3Y_J5`o{u3Ktwyusm71#s`+0GFhzBAQx z`fR%LgttziV=>0qn2hv)Sp_aj&;mPw9pV03zwDS<|w$pZxkYV~_RFq_|#5kX> z(}J3Q$-!AY!~-ecMfbod)Ta_&;`XlPKMh7CP(0t(Z{oDpoFnTcNv@b@On!`39>YT# zwbolP5KTSK5GEO&aP{83Yw?Q_%iQW~Mhg0+e0YSH>_2kEaRJx8Z6{GzWWPyJ@GE{x zGf@yXUjduc|6WaxoP zHf(0LaqWQ}OX!%DfuFIXLzaCAH>xGw6K#b(NAqHt1tZjofhHJ4{QbK^=23PX?4BR( z@m=wu_fLM;bHl=in=@R}KEgitK0+LFYI16kLy($@35lVhfg1S!NFggPFVDTy5gcX6 z5weEE+gMsT#P$a7NXeiL$3#sHY(P$36s_oOc}G^=A`XF>^&Y+DDNj7x=ECcQX) ziRRACjbsDoYCVwL9!H|+x`Gx9`Y!dEPM)Vaq{@3}KM=TV)_byr&f_7OO65G6+GM9k z*TVZO!Sv*#b;OgdP=D}udVwm29{xK8PK3iz8mlHt>8kI=h-c%<_Fb{;&k6k8zAr9+ z@LBqHJ0fpd@-%hKprrBY*EGzn^C`SatH{=X*!Pq8>6;unn3K3L;-(DsW@$D9jcXAk zb)Zu1*8DIPEty;XlCj3Mfd8hVI;}`;NvnP(C8oHe(9K#8tG_s!15Ao3GDsb4;v>fH zuj9PB9NOPc45e0k-8Q-|F5Qw%$er`Ns5bYYxv4j*nRooMTPDtje~hUv*-sh~=L9N- z;}5nW!59pS1$RcmXqliYN?H$h3F%J~#QTFXGSx6-#=Cff)5xb`r zGt5kBaxIeGQ?6!*QdhW+P$0Z0hx6~0?TdC651Eu2D|@F8TMZDXAvvG!RM_@XxUctMI# zVInZeI!UbX;L)!3rx_NyCGwpS`1kdQk*lJPSX!}tOLG5b*Yuv(fOhHl626#-1i2lZ z{*377V?7qtnays$5U1%$az%2?x*}PQujDNt5zQz!TUAT^(q#vwl{HFXviP9wcHaQ7 zPi7h+(J66X7=_NR|5$Xb+&q=XHr<+3F}YGD=|p|fEq;0Y^2;T}x9}!7DO$t&Q>R1l zwwQ+eJc%zI!&Q;Oq??MbkM&w)N~4H~n!RHiyYqCg%sshU6~~=|gxH*Ko3U`QyLEXi zZRzRMN7WT=y2-GpvVDi4Lb77(Q<&Fsr1GU;4}+*%A2(E{Cx_KQB6?E}96&(r3j{DN zqD;b6{(>BHJ*iH49RoO2(B2fC2IKF2AxcXbxsyIyw>@rLR^74sdavm#8n8~pG1XK%|m6L07*mG@NcCx ze2IzR4Qj{HRK5 zfA|ekCXGX8)ByNxGod5z8&;&ck)n&Db*0ksW{+;un)-viSA5qisIX{994nZoOl6ip0yk9DSB;^?95>3%Cp~Rn_cANN#jQN)Dup z5A!M__IgzNb_DPT)4|6{cFQq*LG>a7nzn~q!aodH3RUN6nd^2X*_s?FCH)iamS5li zicBc*9g+7rC-d$(Ez%$1kGrqU(nW)Q^V&V}7~KlrGh=4>@eMym9AkiJKw#j-0P8<- zd+Uvm?M4*}l=KAkI;K1-U1gNlIr@i?cRtPwr34IGoOX5|wjbXNO;&%gcg0Y0kmruOGvT2LaT<3JOiaaRby_$KWh0Dsp2T8s>GqdS=_qp2mQ_bhO9F)x0z&v z1(O^lvG~533?&_<(f0^NGP2cp_~@qU`@u{cX{cw)y_j@a zTUg`XnDJP{_IpwTeiPUuD>0mno5MuBQ=N`WSgiOvKKzuN=A``8PbJj%ASoc9AX927 zf#}+{rAeC}Edq5Cmg5!hvioXvRZM&H2<5*BS$(t8Jc7D7wb&1VA{|I1z+*>w{c(8^ zx}^1xjkg9khn#krSBq4~ZXMtlfwjNZ1$5sOoF2C%y?pGA5)Tf8EzsBHx~Jl zl>&LXq!Y}UkQk1bsHO-?6{aP2(7HT%Us^zoxww9Ee3}fiAo`M!q=G|}>4PJI|6LKfHi>h_?B6(L^*caKN5_9@QZf!FU8tjmo zsa&6yx-UY!Hf$g~{5@ciz6doN=u4V|Hjb~Eop6evfzaP(efL$gBG+M|7tdv^@!3+b z94P3t$wlZGc1deDL*fJm&BMaWe}Bu9{FG~jo;)1kZZSduJmVWLnV_sat0bF#-m;84 zz8d@)HB82IYkP;UY(f^bFr8UvxdG{cC&EF#|E9%WaOc!9ywirI2^rd@C=qjX#q?rB zmD9Q|bewz38{e9m&Q`f#H>G6?R2JGF6+6V%Q0arxX}#LjU|hnC5X+|j?ihC$r+nX2 zH67kfFJ9qoR(y7}Z}H4#+p924NVx_}}OGg5X6@% z<=y?K;M^F6wAuki(UN#~>OLRg5I-GLmysYHBOYP+VOFp1l@-Ape(ougZ6A!w<#pWg z!4E&}p+@1&(TlvQ<;U*c?5vovpx}YE^4`mmgSv6VrrV$H>r5F|2c3Xe1p}dp@GTn} zA!s|lI(Evl7>u_GF5d~B5L$3ATl;Q&k;c@$c1F14R5e>qscmk>3Wg(@orFvi>;4{>5G{+Wjotmf4U~ar*);ndvx{) z)wC9&MK8NdexAnNd@Chjng!y_&WKS}9~y$hUnIp3BWXxkOE!?s-1 zi(~&!c#P(_JdB*5_%(!mY9skmLgdRQm0yR_LFR9oxeno@!Y7HmYq4257BLjPX&Cq- zE#|hN{eMD`Y31l^TWqaj5RYq5;8>nmFAhD;yXEalD-*aBldpU7Leg(BbjX`3z{a{^ z>dd%J7Vq1VEpuhvuN7FIIc7m)`qjsPiR{_-5qj+${#;O_kzK$~^`OIUyqkVWFFEI8 zm0L@&6`A*F3t9E@hn#^wO_%4uRI>Ca0ITjtMJHI`{yi*Z+H|nRm){aH#T*NzTP1hR zC)|2$jDOU9rGBkcg{VY-NyF}Gt^x;Y(7v4hoOTC{+Wi2?nA5-2xA^pzzU#qwF@@qp zeP!OM!CJ_=%3{@t#H1}P#OWvBH)-|lj|-|Di9VM0fqCIg;_7t1dxFt6=-s+6nQolK zJBpc=_j375X^swvEgrAhC!EY_pp92BzO5`PbT{`GPYm4|pi0^-3gO|diJ)X|3~^&e zv#X^s${5iK9VeLgMT|Ws0cP4seSIcmP$6jT?X9#jJhi}sKPv|y{*8aH0u19XS=%s@ z39o#FHs^F{zFQCRh}jj&_8DLv(?bsnZDP>5`qB$v^MzifFv+YO00TwMd@{_m_*Gwgb=oh@jS$Z=l5m=+>{D){`4=3E15rQRr`M2+GZ~o4H|3Z7MYz zisSO5swh#1^$@J3bgl#~HESzGN|`^X_q|%}XorZGJxhaElteAk{zzo>OCr?~G^u|5 zg^yW({Z&**&G&u7pF#d&`@l@4gjYg;>!WlT@J`Tb0XdkR&{Vl_-q(?koA8*gqW`T72Kg5!ku)QyHGcY4{r!onQ z5Px-Iw@XqV8DvbbIudC+RO!qE2kx187a%;B_9l|R^!4ssg56EN$lEr}l0Q!A@_u2D zKGROB>$Q0u38M6<+eiqWc86jenLWq;!hx|35$U{z+odi6#P;@dw`x36<}l#31C9|} ziM|y8A~`C5Rqt80s-}d^UkpNV&Ba>BaJ~y1AH&g1kf~}q=Lb#z-|Z@A@=7{Pot#j= zr#<{4j@sK=%iKu2Ar@`KO4# z&sKZ<&jGo%pxDfQ@W!Kc*+a4()wYbpj{v3fLryy4?E*k3e+i&_K8q;P$sK+%1)?XF z7_pLmNx~fft2vy&F(K3*elZ~<`RRA2?^l=6_ri_|iTsA#s4vX&VIdyy)7xw7BkZQN zfp3Ya+NtJkImaAb`}5WjgVlvH*pUT*rg?LZ)&&h5f~k=^1Yi_26I8`Dw*4G?!A1_(Vf(4ME&B$u4FwiX&8T>HdYPa z2uaYVT=NwL|BcviwnmPD*-RXo?8%7yTK)L(h~sI=P4)6|Zg!4KM^yxZlQdFDnAGLO zs%bW>Bc+kC7@W-Ws#0TA77zF=fl44Hl<#`2rP><&sO=2qzY)~>P}at`R*9wp); z(!jG#(lPGpcKCN}R&q5j;GreFPofbJ$^5Mo#h|)$l}3mFO1(-`hF@{PD|E^^=IJUVBj5X& z$b5WgimzoILI~JT?a~q{&&3%%V0hr>uieAmAR{TVFD?T@Go~LPxT$S?_2mNyb#Zlq z3eUq-2;Q0MM(S?w^^Rp*kdi-o6f0Ts)Xem^=?!h{2fY}l5L>DbX@}7GXB$nWmRMWC*YgoS_${+Zm2vF5RQ{i3YvK(a#;k3ah`W zM7eeMtH}OlF99cTj*ObRvTd0L+Ae09hf*w|9))JwD)gA}r5NLn3*x@C+l%|n2nhmz zy@W<-+W`tYe0~1V>;T<8!d*`j^rUqv+Z@M+pcCFLK6y7gW3lLV_nV|2I`Mu&uC?i4 z{9SO2;A)V)U;hSA$uSnz@vnb`G=jV-7vL>&Z>acdM@JRya38^~ zz~u!>j2O(2uAhG|eyqDGGI@WzqOZPL#v%sYz=%4VHNOFr*my;wPNtq-2Y*bC(z90B z#sLU+W$8cTi`O>rX@#U|q`hS32-mGEnMOn~x7d^(xZWNEf{r5LK&RKYz`t{I75XJ@ zJ!?K*-I)o5Awn-24zCPDJ$@npZ*GgH>xyKMQRT|V%jvtBrbYF!6^!DT%Ro5R?2p-% zqgjU$N7u$X0<}CwT5$q5wjRg#`-4!Xzs$)Y-{0b}KwP7oj%`en`{j#SK1P|kXlzH3 z6OezjV9nL)p#dio5|A45BX}0b`y$%Ps%ZZ#CcWicjOkdErl2@FU1t;b@ulF&~0f76i0CDR+$znOm6 z(O9iTq+&?>YfJ zA90BeD>}2p=1&2ks2KI>RGW7Oim0bQ>z*LrK!~j4CnOQLtx)5ma0W3O`zK>j<@Et@Gx-yV5B?7HVv}swMP&KWz71 z-8%Hbbm%+@13z!xjMB(G`dYdNkJ3|bC|2&QUp0ML!nR(g&~lA`OFyO`&%B7d=#Ye7 zA)-m--;q{=6yK?DJOqB|B3LC1c?|X>d%9*c7jOT@$LPKI$VT^sufhBMkr$m)EF*Av z9gEG+6{p8EmxP^QbrV^uU1_d8ftb>tgC zvTTA_``0s7!7;S=!n~;2mfY$`n`3H@qkqG5(3$iTM%w;5HonYsu{;Paqd?(-WhKA} zPE-i)ElBFnDkAsH?+n~u{9&#CdB7|F^#r*!Xx$;ArsMjH-4sfNc@PPBKYtq`2SBMt zV3NEx*2SnjKm1thl@-D%d_wNI%b|Q59VwjaxLc~KaLjYmFEjBN@>HMLg{y8od?3HP z@8A^cPxju}`7YAf)zSfAvvG~MK)a%{C_;D2Qk|M9kaSmB=ef=DF4-kwPlS&cn1ezl z{33iX;-YjyNQ#XOG#RFIWwvtw??)>R6l~zzUFeVR|L1xAFX#OM9t zU-&i#Ud5lPr*U;T9sx|HY>{8|<&SiA6!M%;0&E@MiOq|JI>3j0yR~1MVt$gg91V7C zeWm+lGJ57epo0~k`=KJ?r3#JhYz5+-8{eQ5p@RwoYXFBPn{1uEmi|>p@Fx^$2|3{i zJNoUuz)6w%>rfOZ$u;$30<>UEajn?%C*$B79Ddup1TR{1GxcZ4TglY=pFUa2Z*j(R?t zrFi+w2axQ#f%qGk@+>CF&_q51+%VQ?%v4gagqtnZA1LI!FuVy8M_AZQu_P`m8YGsu z`R*W;&3xfnzF5f+`vq<|m^DWYCQEJ)Pw0B_r;oKCf}ve?O*3d` z+>K+$?jpxM+!BU*-}e_Jt#d|}6>ihXjE2Peqjm{*mw+zeu?uuIc%AKsgW}=F22-fY zhTPmU@NJ$P2QNo%W8gfZbahvxP+RNgLRsnG@5kyeFm}`H&ALJ66S0^&$RRF%gEi~e zJ82ejT=%`1Dg>%H{JMzk7ue5GVD(^&#=Z9+{p~wo3;ur>Oq{^kD#!dY)8g-F{3&L^ zozSPywpRqNV=B8#LCPXt9Z4auAVFKwH@+VLdoYu%&iR$rsb>(Y*RF^RCj_; z+jTzH=LI?_;yc9mDPO)}s9^;Zx|CsC^6`gjMGp(b7e23L8;3bej#hX|A~puiYiW}&qJP#}a%B6jJPOv*kzJN3;uAh3CN&r~uD@nNEf%!p#9a1;-= zC=YirHj`%l)kwT#auy!ORJ};EZ-@Pb5JQ+q0f6QTkBPH55xry_pZp~<(KnlbOgr=h z{+upduk(OJk!?bKsY?ZOikU??VT>{?>x5%ao*0 z!BL#-lm>klh>|B)44xqAjgyYU*fu-yDV&VsM{}o%|fcK5R4}F>g`ujQh z;bt`mphz_3{waC5b^H6f#!io|g!GzK<9wK(_~?ar~{`2mEjGQc2-K%AeQ4 zPsn258^{3;U(H=U_a};D&gr=ir=8e~A7(y$(Qd{U?Oyvx41Srr1bOk})mEHE^P|}T z!1MdRE(0urL)M{KEWEH(t+uG-vclbN8?5SY{K-my&~!22zt`K8_xqP1vs!J*-2Fq3 zk)i(+oLs7mSu!W-5iMHVHgb`EdQ8U~i+KDbOXn*i=JI?0hr5fVJTa$Re&f}S$bCS0 z%x?FSfsC2E$p7`zz)B3Ru`Lwa0p1n!c`V*KQ^1t6On=F*nsdy>W;09Nd0<3VmNgIDxFA2`Od>zzCT!M%Z^_M#DBX`XTaa80*Av z`L)b_z)rhNSLk0jbvX3Co%?1bMVe+H|H}66n_#ZeXp9aAWjJ?}o_RFq7iCYbuGv38 z^YKsMPKc$eYKwXl`_Sb!!A}7i@{K-`U!U*#^F-^0F3Q}BTnaTgFAyh79H+~n+#vae z&vN|R;R!&J(bF#K>eIsnpC74KX(2o!E6Hq(*a!9K7EG>_uez!~Xr7G8KL zPo_u}bn~P22+tP3+~)XisJ+zI@yPk+dB$(z$Vy4i5sm~Jc3YkkMMSkka%}|sT2MKP_>RLWx)fSIb&<7)Z|AXAlv54e z4Kk1v@krOscgdD2@mF>~F_>(E$1!aZ2adVmEtM~gp_+1RB|Dw`g`+%F+3h+4iNINC zm`$FDXd`S))cc6Z8G+)&L>x3qx@13qKnoH!sfjP@WyaBBnUg&Ox1GC0L^LBQh3QzAE+rV2c z?e~|yapPl(*_Vcj+Xte3t|M=?t$SRVdwRobWnL=iJzVZp*W9W{t}j;_mh~YVyv9Go z1@)QDk@h#c=Z1p2C-72%r@V+<70 zOSp#YkPmncYy*-BgL*PsRIjIC%B6;>d|(rz1Iq!{0ZzmMka?pVJn8E>Rm~^J(T2Ie zDt^8M<3@9lR}x_7`K;fU2>oYc=IY(_OFYE#d`G)(uTCN{`^wXJ@{Yr|XI+merni6; z=81Cl@D#mUp+o1x-9egFyg23HEWpX~4*nBbpUj{G^ml*!DOyfo@0uU()9kapxHV>tizB^O{mokqWDw@6&1>eZ_-TZ#-Q(~`5&vb}dC+)_J~4`Sfull= zV_!=jCRLY{l6!-Ijnsa&JIy2ME)%%Jbe=ZQ}!{!3q&2;KJ z%clQWiBanr@a7>E+iPRj1<|q^zZ+7N7}UmVz59HuEvNtJaswiAjrle~bWnbflW=9M zZzE`(EP_nN20uT{03}KVF69GFc!sIq2Cm z9hJirT<|zD9!Vxw$uYf0h! zS`DI>#jw(W^g$?#gJPlEPW6nK#82p{{DAnL7BhMt$Z0v=JPj<5Y9PM(Xgx3^J(r`~ z>N8f$H27$cgVq>H!6?o1Q<2V%mtoHLE?LZ3>(tp5@o~jo`M$~NZBVc60NN*_J07pf zHO2VjI3VmP!*Wwedj~xUv$p%hocf!wl@oY5kS4ARw|ZdtiRn|-k!`T0Q1wLS_OmGcL3<_I^7b6JG%NShM;#S#f5aBH7v1tC9#HmWJ%>H{cPv zEEY*7h@s@JHJ8PDn@diZ21SvaDEB?h&T*XvmJ@|*f&H<(t~QtYaY?Ih9@k_jx{=RJ zyZU3}wek}r7AG-u*mBpDK~oPKQ2M>I$>{s($)`E}7Lh;#p${sfENy@PoN0G*nRzmT z%^ym+{bC-2*5VlU;lGN?;8g(g$AdH`xw)kxU*>_%o0WrGx{0qGF8+>h z#jsl)xp`8Q>-I)(BI(5yX7KXa-`|snKVg4oFS{>V{(e7^k#Jfh*I-;fiGt>jb!gdb zhsXf}|)Lx1IILzkTi=S$O)C9Rz?!R^h zbvZjel~x-8pX*=zaTm`?QH!6+)L_m};Wn*uH!4Y?p|5hEvJn!oQ-f5_*gahTHhEH~ zqn=%5^5ja(1{kEQ+Tp){byfDu0M@Z{i9NWx*s z)ZAw4YDjw_ldd?0#xdX0xE4xN?IjT!YMAKW-TK0|BPsr!36;~}Xy5xG4mN7tW#ga2 zuYTNS((QUOCSwTvr6ZWCa#yZ>VY^2zl+uQ_$6Hc|tjr!C^JsQtrtof=#?e+|xM z!V^iDKBUm--1q!*XUj;TX|;VAV^UK|mVkNJ5=REk*E&@HkXcgSQ09MueR)4j_}4wF zBqbg?=6Ltuew33E&xA^2`pKz4t$Xft(z6Q)?jFiI5=QcW&@3-aSORX2y2~DL=W>@A zgkVOs5E`;7%>Zj55iq;*!lZW2s4pb}xXK;l@1VvuZsuif3XO6UW407Us8RII(kig$ zmyGl{OFxw+`0>90Rg|im@W1GDrTP1yQL4mGVGCv%^rEs}y$Q5t0uC7`PO8tqIi=T7 zsiO}0Jv*{3ogN=M1BAJMJyq}j6UxmDY-l=%m#UYrpysKBQl0+Np_6E?ilr!0iu5^; zzq#UlRih6xwyv z>(EA-Lz`cB_0duih8hD3XehQ`^dPjH%7*M1NIHM3(p-D4PTTHy!@T5Q_>49XY}Y`h zr%nnz6_cKTy0aHv?|Vo84XxrqOb63L#3`N38oKnj=G zlS#&JayRDi5{V@V#7~iw@l*UstB0tG?#H{`eeE)O?O>j zK_&x-O;Nf`2Y`7u;cP3n9}hi{>b%lTIg*fYz`IL!lB3vd^Y%wSD`w2Myo=r&sv{(X z1};VpWw7yG>$&zIc2vqy>(Z$CG~ul4ypnv{xx#dr4(W+dhiJ}61IKG%78@#EbqMq1 zC|yqzJ;ASh^_%ld7V^oWoPalQWCBS7A`?wY}1BuQEQLTVO z^tAtqN5XIra36k+_PTnYq@i*=bwVio3F^WJEXr&Cz)2m(vOjn zDUKGN7MeM~=3~1zhrLrOdJ!@16qD!a?j#}ZQcsTU%xJh*s*Ljk-8|YA+ww-hI#acm zo8j@JkIuORX*EH4aqT8SAl;+CuobG|9Beq?2dgl6>v?@dG$ZHg23y&3#?Wt+(cW(_( z`oG0Drt6QL9=MX~AuC%_=jNDb^KmMT(}Tv6tF2jx)De|GTx>t}$%`jiv7g3Kv`SjZ zkh;-=BR@JWidOwGlhm;7`RAIITdFCHanx#6e9!$am)T5A8;g9oBHSB`C$WYO5;Z?!24Z&#}F1D8a8O>O@+JAC4Ypuf}?hC0xE4R^GU^+U&~xT&Yr&7t_SKAYc6>$b0r`>VUQ3 zLmJxU89KG^Bczfqo#P0A@%$5I}oJIA|ZGpk;tn$bF57W*_&a=PV}#JL-O zk}7NOrpvn%&hk*=r!Zm~E@YoPm$-K7g8uFaOYTC8^thnZezlC~@8bN0W2Qo$bbC*A zi>1&)Trq?all>tFb!3mtqYZX1?Pb_c?V`pedLHSp9Ry>zIcGwMmL4s6hl(GtP_or8 z>(p#WU^Kamr~B0_o8J`3h)r>}Ij2Vn+Y=J5*E32hFQEvruhK)~;u6OSLkgeTkEABF6Lqk#iqeC|O624~9MS(E%UBZUB|x13yOEqY_^mKw217zP;G9vn-Uleae67) zAAI=yE^)4y-^76`ebLFK?k(pCSKc|T(Za~03ChHR(;Dhk8h0HBrCSw}E+$dA0R{b- z3S%i#qG6c9ICw2BFHU1DoMwWjWvXL8F5o0XEkjU%Ny^A-M)fnli#jt$92$WVJZyMJl(~M^1z^8!m$>=nxabdXd?%N0t3@M=cHT!@g|gmrtJ9vavSUWoo&& zpT*-n9~=Md$J-9t({B;JrHE?WJD89+?XaVgPJ_-}*P1GmMaO*Kg5eH{h3cK*$Mh|( z@vp|(6-}eESf_L%J>%N*NW(>*ybVcDax9++mBeV#P-Gnvh}r2%7O~tJH%JJvj8md_*)Qyw33g3CjG&8r&9N4>kCox|FN`2E9)6uz71uIJ zd&B=g;4YV6%E*3n=dgN}K-k=%0QxGES|k1+z3NU83KqZv-8q{q!PG%bj#nt7y8o14y1+D3=F4(iQwW{0 zUE(u3#~&KOQ!k=)%ZtvcQCff}&uuk{r(wi=yf1|8tz^DGDfjwGHpt-Yf4Mn|n1Q=Q zJl3KuP|(s<(uxbZIU*5PhR^qZQv zi0xB-v;9|VLy7FWc0vrir}!^EO`Li-6^XIlx884xF1JXWiYIn>@bUYbNS^-dw1gj0Bj1UcM(fr?+NLJH%2 z&kUwGKdPZ^Ia3p-A~PYhlyraejoRs;icbnMcZNfRh8laG6h1Dx>$UCF@%im!v+aV? z-WUG3lZ4ShuP=XLcDP{psWKeGt&)HAG%Nw;`Zg$=sc z%G}34Jwx}(Zb6A@fz~~m^@7{$0~==S3y1zEaX(yt-u2KF!Eg+u7>mq&5h-EaQy%2H zTidjs_Sqg0n{`K$rzJ(6kMKw3s{3ykt(;XQny{mz7b9aUKb*PXwUj=E_choa;P+#UH*!<|-IEosjUzQl{|S|USciXsMQMXNp~ zE>1TRJ!%x0?kvN4uZI@74p3lcQg2}agxrbi#AA7Fh`#AiAg(ho{mKsW(vXXBKsT() zt|gG4o@%{7Y%Rt@OhxIp^q9Dosk6x_kx`3h$Gc_YK#ZeC5 z`P_{{_7aO4w5D{#{=|&Qoqbkj%dogktMwSsYOI~)*Y@U6J5QnZE=xtdc2Zssjq7oY z#ykBK>$5joViJ7_4W44h_dbj4%%1#pVnx}KP-6XDVim`zYpK~M#Zm7z(jCukOCe9C zm)N8I!~;iG&*H>e#lG>f2E~InOS9?lI;O%uMs0A=F4AdWBKgf-%Ir>^;M0;-&5`>xe1)yuVY=_kC6#`^kEp8O!@U+HO+rY;i93 z#5swvs(vk{QTYK&s(W8O2_)7_L(r{Uvu_gBjN3ZP>=u$7rOs7mEa*N#pVCLO=1x4h zK!pw8kXLoAT|M0!9#gfh95T($BEQlRioEjq+6WlWS~k&67C zHk{U=ApbBvTu{ho_r3&D%P0?%e7OGGHxiD-Sv=Yr^BW5#Ic@Gb+p#8a{D(8#iCp_@ zX{gWlV9}&Snh^iOPr$qTx+FJL7khGaZU{#_zgoUI^Cwi%Al^%E9Jp?asdB%?A3$@VdiU32yYOYl;Wk|=BII_U z-b#1i0sMDa6M*sStJWhEpC%syc5+_`w=#l4!g=xvGl@>F?hPvjk|L5%RYFP|=1yER zOK|6oHTdM_BbJ?qQpVX)^r9HJpL*zhLA>W%>JnD-UjQQMl=%P--j%NqM!KwKAXj;- z@Sh8h@V53SmRxlhfpDnVv7$An(EUQUyS1`1TI{y)T`!N{hAO-WG>`>ER9xl0E&6+K zHPz!^B}RETJ~y%vx#(BiRk|x@wI~Ayuj4qMDlC-?Iv#Bb!?tB`RmOLVI;Y5wnaK_d zOfN}ZG`^A=_s1WOO^BqoR1Pm{OkvIE3*=pCd@&P9bTkG##epm>yFavV`Gz8+C6FUN z@kAx7to)>-)!}_r@@IoUK0DY2_pH#1ySz3TpqF}3g2=uxFdm$6OrxS~Vw22n=1%i= zv$6&VW&Xho%M!c`#96eFKQjsow~QM}6J{ETar&yvJs<*F+~;~f?r*Sl57h3@To7>> z7BlHv1}+KRcJ6ICe&0R%ACc#|K}$)qND&Eo8WAd+?^#bGU%>_7&t&+X;1+1-+{O%8 zB@X}yT&WcvOu=?AX=IH6??}n~$IHYHMg7D7N;>BCNmQwAb|B_ZN>$#CT6T2AnSo zQ{lYz<{L>QUv_&p#8Bqf_S%=j?$MCrhm+6Jiw7RYL~*YmG`9|2A@xfPsIL(w8AvH( zE9YI4-z?1eIGjmd_twL)ql5HnU}>6n(@GaW$7U<_;AeQ0MEi%+&YQrJ{3MKc4}(2S z#%sY;>SqUwoz9nhEd}0;xTG{L`5hk~57GF%GF=c5%Ok7Dr6$AKZ=XmB4x`!@k@GMpft`M9IO6_5cB9vPUAvDCA5 z8H35I!v)WCk9pks>2*yBCzw{#t{U3!o2(4n-zyL`N(#Xz0iM-wXmz4tgsbr;B{x?a z`~8NGgv@p+PW@^7k}39Z*|BdWp>apBpTE#(^Yp>UitpT{z?wfYjN>&PK={ao0!ha^w^or`k_m}Y~ZE~WkHw_g~wk*~+_2dhs zpkN^q_Kyj-J{M?BZbLz`RzKr)+}Oda-#n6U*noNs{XOf3v{)_VB=vmdST1P+X7^!d zetC1kw7jafUUAK4*HU~Ivft*yh?E0undB>9*n;n3-gg~kj0x{AxL)=o7KJCUHwNhC{&bM_ zN~-(l{9eFGqUi8S0l4N!aS1?ffdxOfKe?hhm`nD420;wxUuKisn*Y{9UcM$dD^cZ|jm(k#r+oVNhg&qb%WOWt%^BKQGCqo>SsWZY z+$>wpiaS*=kQ-0oPGNnt_Smlx*jl9 zwCmrIxAoCAF2@E-77!~(7a(ecb8>H-k%IQduN2oV=Pys|CSmLa69nFq4K%|K7V~`B zSRU19zfD`na4EkS=HG$i_f1XIQx|WczGhYHswpQH2k$&@KUpo+2Y)efj|?^sHg78~ zbv6&2o6_bNnCc`iVPW3G*(&@4>QjG^zse!d`lAX|k4_6UmoH6$K6|UY7E6D8U%fvN z$khtXNHKMZHJ`qGqY>XmXUWisF#GVf@A2+!H-LltN4fXc*#{#|+7X|v5ey7?nIsoA zH@ysx37x>5aTX~=a}m_Tkk9;uOULr;r8=QK>fma%ZW6m>Lh>9oi_Ar?o z8HCEo@6)Mz9yZQ;lMiAM{;k>tG2td&Fh*D&j+8YY`pz>|cy!kQL(KmI%zs8mrG9Gq zU1!fQH=Gn}On4ofxcKACD<2t>Q90R}dkkrBt8O0G2%L78X5XLub=~7wRZup&ykL;D zaTWnjOEGQ0v)JRmI7UzI!3~5tl5u!siVbK2!{DL&xZU1ciO86bjv0Se!cfQXfIcY6BZnlgK>p}i9tyQ zUIX*IF2+}qvLuA(BZ>!X;(Hxo6>n?=Io;IPjuU3nEF>VOrXUbM4i|9Up{1puqp+e7 z{&uzhzRJZ~MH0^0c|^G3x81;Oc(w^Y?mkGnhmQ9Aj=GPvmM3LROrlmMgoXrT*f-gf zCIcQ4F2|j_IlK`UaOO+W_NDMTR80zw2kT%c&SdbtI-w+&Z1(qEsS=f@4?8bZ|9tAT zRgWu3J+T3mgX@ZEl|e*ekbmUdc|6C(-n=}hz`BUuYg4kFO|Q88=}@^pl)PQSjAV1f zeJtMm+$2APM;2YjY**@H;3mYf`FbI=CQX?S`Q!_OG6$y6Nkn_3<<31!22YUsH#gj<^uvC{PTRvUL`k8eJDgtcC5kqq3~H-^ZL~_L+X?`7tY@L z;V6~z+WT*_42R~o-f?SQYQkj=ho0=a2wo$4Vv+!lQzzTQgW5QSfh;`Oqi|qU%Wh^GUx6vNS$c=mAv`zO<KhV?R~Q_^6+|%+G0vkf}Sob#>z7oyLAn^$Tfr zNc*)BPSrB;{(oB%7CcK@m0L?9u=R2< zX9g=#w-|9HRVj{BtdRdC-y$cW5TdwN(a97L>&-!+Q%^Yl<$8fbl{N+OPU_O>n7Pj6 zIcLl5v>Y2yLlmW=2U{I~Sr2u5Oi0Q`#e^rvHQ;m;Pq;NdI(}&>Sy0Tr^k5-AjnjaaF!!&j;_$M$#^(30dCwX=DR!iRS)}JANi1S{L zY2hj((Hq$x_}eTQY?&@R$6u1s@aa!apvWC|(syre@>gOfCr+5w+cSF{MAM6QDNt$Z z)YGw$Q_KLDma`h5;JE6l9vWW$ zOukUz>;INQAX13IvDNC$k|f7QODwN;=N2p5zgqPYqgrD~kZ&BoQ`6j}WQC^u${Bg9 zTPAoOJ{PmtY)I(@>Z)~jm|36D%A0!JXdjo0bW&l^sk%Ma@oY+`{n{hCi8U*VOYadd z-oF~APan}`qjG}P>ZoW01*KR&A151pCS?7<1GlmJr9#+;x>oyL)LT??cw&E83y;nW zxdpwVv)K;m(5m!F<7)3P9sUaotwBu?_+_>nERVwDQ4Jmh>B4Q87=`bH7w~HFRnxrKk;tQ=$21K^-?a1u@V5beGXdt@l&&^B5 z)o{C>ZguvhBM_*V_*JMAhJ59}@kU>=mj=JJ~1 zg+D|k4~P1H4*StUeL=l^14^fUU8Z}f7&=F z{9&J=Yg8Cf-4NA<0o8znB2a{RJ^uAD2DwP1T#5usGRvKM|EZ(Gr(;$674gggadJm+ zFYKn|ch=V|0_+`ZEL$)dxKuZlKy^*YE~xH0>iA}a*ma+|t4wv_ZE}X3hIpGd9PI+B zACaee;+z@H556w@I)tKL(DQxO`gPh{Yj>`mZQ`fg3mJjiXC^mYEuYx0o|u(0304$C z+6eI{A89_)cI!6Y#lB!DpSs(z6&EC?!g!V)5X*JbGJ?U+yANppo{a+@a@U>l7fyg! zb_q}Hczj@6pD=nT@aaW_=EEO}{K4iA%K6eIQID+CeK5q8`=N9X z-Ou5RWN@dIOZxK)-QYLpB>Q-x(0uJwlWjaXUjHXw3WL@yD7PyGcHo-Zh6WOr>+AHM z~G|wtGWATi$BtH=s(l{Cun5hc{d)00Hf>vy!Xbs(@K*O5s z!zxXs5=KR#bDTnl=oyaC`Z8pOE*gqt7jo@v9`20hCw&(#$X^K_0@tIkZ092RA8HzL zVWx+WjtAN^?YEa9ePf_~Y-wO3zmEG}&u&VzjcDnboiV|kf52A(ZLD=wDZ=E@jMv#T zV^QRk6x=VNEgCtd^Ao28Hzmud{~?vN^Z2CP@UV8f{x5ht_mcs_MOweO;Y9D2y}YN{ zL_mIwg8MYfX#t9#BWL6!H4elxC$+zDgH3Q7TK|pyobclCx8cR} z=nBE3gNAP$5fmyT%2?X9c$T?fHkM&L9YV7!K7oXS8W~rvaIyMMb`pykbF{r;sPe7Q z3ilwni5)Lft+5O9NRC?_9Pv9k&>0|f_NNFG7pG(jOd&?$7dwDrtsMK1YeBbQcAUr~EqDht*j^86S# ziADF`#fdKM-Ua0|ax;DEUc6*bOwYW0Yk!r6tu8nKl;|&XY~(eoutn{L!&gIBw&4Z^o@#}n~tz+zLt7dJoZ8g zdBocE%xwxuzqRj82`n5_Z<6mAyxi0A{qHI_pdQtjXM2K2uHx#81tMq>KTRHM$N#v^ z(7P1I>{~uj*T`5rKgm0^O76h$5HJ+=vCv8r&vM}|BmK9xeJW|%16_zpP$J=s_vsL9 zvj|3$0&sL6HLitk+2sc-sTk@8_mTI}bl;$yJSXU|rr%^sgZnJ@njR%LjXG1+btz|a zg_v0=nEw~XYGRYkA`QM>zqsb)l#&!E)-I$6NZVXm@qF|Ry9OZ}r`*ZY7sV(iNSYX{ zg84a0GPF8b0nFX5Fm4^Y7ccpvyOc1u%vY5pG2{`!2@2QR!>@`BaR zA72gyY#Cmn9A&c_kY+D6^J|00i$aQrF&@Ri3gfJUlHXA30ydPYmCnk9(mk?#O&)%kX$AGG?@Q)+Ff&z|dNSV}ldv5Ej{O@=4e}AL7h$4S# z+MBDliRd%~#SkUA=df;-6_GtTcNgBht`zY@Spna<)90W$s{vmPqe;7yD_5b|A~Nn! z`CV>=L)oTa3dr$Br`uZx&$IW2+PM6~cni(BHPWzT1j&LHp<%*x#2Dz{(a^2yLoxb@ z(4G4Y?2w@VLM9tY0TNbPXAy(L*DgPOez9p@f^4d7(EIS;tMd1WHQ!GnWeePs!J2A3tH@!t%}$>P~B*;w~z{dd^<3mbh$ zjqBNcJ7@MC%n^05)k`04PaT?G*ySHf^_+c%-kC`>Xb3$y!Y*Q>L}|6bsqMP1{31`> zKKuFK*W&M=Z&3y?`)+@<>Q1vS8+M6|Vv-sJXjO>)J8oJLk|=+zjE#wmd2d`Vx>lLp zMZod-_FQl5(w{a-{B<0%0kFep&QwobAj9{9Z_O};a} zGl?c2MD0Pdf4;|RDYpbhw)mDdpSb<$Cv(cBIu^6v!n_ww|cvrIVk?o!!nn(x>jXERTwp_U)y$90+!;zby1dWlU zXPF=2`23FQ?<@OXpJi$Acy8wwAnrR3qLn$cTcej-ksE|rPq!IUn zL!;f<_5{9SAj}8yp=?YRv41Wjo9i(00S!_0L)j95>2YaB03NcEekc5|Ed6&XfF-qV zeWv{Y*A!ITWw1!?y@sx8N2g!@f>O5x2FDCGW;=B#tSOuldG*X6%N&-62_v>Bk;S#U zJ@5A?%tk8R%T9_83;2lc^5Ut>GLNN(lCc`2BcwQP!Bos)-aL zPDaND0p>$fJ13~s9&8$HVX9$v(hk_G(6@FltReOKBT|P33x6UBV)?;_18cZI_@`%; zrr|IZ5YzHssbwD`Uj%>;9M zYmHP}B_|Hl_er~}T@ zk%~*Tz&5|4dA2)!#SDeIT7Z}uiFwr%aqB`7bfxWVu|N18XE)waOBoe{`kcPK`;Wd4M_l}Df7)jV; z`wKBmGzPj*4%-36r_r6!&8EqZJa0gVE$?^m^1mJ($A(*F0_2gc*G#;Q9m2ChAMzk& z0BZOlpw8$e=3mERq0JVpvg*^f&C`A-`zgpe&9gbb_V7V696w>3&vl!_t{U#Taa@=a z=MQd@;=!DCd!eKK`QMa+|5{G!YdFrE;H8C`Xb3|FSh2v4ZdjkC$bSJ93qs2xnZNw` zcpH8r4X>^#+KDdpzV!*Pj7f@U>ZDi-3PAi$e+w_oH#C2A6Z*T0#0Rgj+Fe&9EfyUY( zM1tK zc0~~~(>Z|KpkX@I06`rAFv~gBj+WV94U&S_V){A`D_$HWGWcmVb7`pTeXTymuA)t3 zSCQbM9Fz)zXWxnAxpOHeKb@3F$G!L*+_-jubc!N3~WhO>zL}%sDpO z)XQ$ztXI)RNNjs9>R2;~zcR216Er2P3ZWpwOe=$EQ3IwbV=&w#=@*WiWLtj8p4{t@ zvh%d|2P=NNzLdBGgHQ-?&0G6i0Fsfs3rCQ7r4K;RouE2-y1?6c`kuK12-l1h7yX2z z7mPl9ul_3v2Fv%`6?fXf0CO3_!1?YiS%VktXq(S`5L{a2PcUdsrS18rvloBOzkmQK z4Z!#9D>9f12`SslMD*VGPpG?}-)b;FrjJy#)0&hspK6ZU=r1y`D+~c=kRHf zDlF~Q=ea8czw%WC;yO#G9sawwgDoX%Nu>*(7E&-lC-UH@4U*Sdo?R`l2hY1-%OgWb z4peaJx^mp{5`c3mS^e2g8GgL)vJc(%R?B@YQwN>GWLxePp`Fi+iOXX3+X! zUf2xMo~U^Tr5_qb5r29Vdekn1Rf%U97t8C9mWf7IK#IO%LLjw;F*jsV0bPen*pD#(6DQV3POse)9d>H)E{4wI zR6MMF#^f!tKdn5q9M1azu&)||tDslb%xU=zf4n5VovUlcXMia|Ac()jD-Sv|wXy>V zh8`A}SU!}cgruT;2Qke1{UME~lJz$#x)afo#3<=epq4H@g5>=inCLDArc_;W?aE`} z$UZwOa|1~SXb$cnG`r)g@}g&PKmD{JCmQtdy*T)N0PsgOQtD#to>jxBgUG$_XA!%m zB_!6w9GAcr1i4}RzeG##+Sm@3Wam|HE>O}A)oc#hZZ=YB^aBpE1e~3_Ani+Vx4Rxu zw@_UcmmACXrP!EWzJTU3#Eih9c zbB!Ug`I2!}1Y}2KVXIJRKG2gyAyKxwOjjgo%X(L^g)gk zOssDL@!C&o;%%0vUs?p?L2Ur)md6)bd6w%uU{gm*0S6sJ=MWj$N17<_vZKR216avi z2to4$;B9n05We=OHpfr1{$W3@!d+?ojS*BQozV#PU3bC!6GDw5B8+~Z%B|=f%FTGQ zfcx(S$Dcb;|Cai|{O4`R{aYzg0Eb|os_+-W78l&aH7^XAS3&GaVFy!S(42pGV!?S_ z$vbNLz?j$|0>y`Sj~gl z(|8tfxr6&xUqXsvak%NjjmIem$5HuG(4&9Sga2243@VyJ*COJ+Y)ED)aQ}xUnZ0JM zuMe4-6XKq!P>rOyt2j zK@26J-+7i9!}|NLQBwl7N-iL2gbZPN0n00y`|;}=6{94Cqs+VcRvq1M7B&EKg}b)z zQ+Z900v|n>>OKf|jCR1QmRfZ_*Mo64Y?4KEA>!Q`E(e1!Z`4^T!V*8s6lwfE;9Pm3 zPw4^WfjzVxOhTJcqN4)l-E8@rh<}`M1a0M)YxH`v3UwEGDtq;9E3zWRBG0xfG_Iy` zz7A*{P}Se9y@m8}Jqb^Y{_svgsBz&+=w}cK&X>v(e!Tiz zBkfG!t)8p^=$L!!u4(_xSmy7Q=!^42EFFl?+$QWyFQp*xJlj127t3Nu%?2x@)p<}B zU?4)ZoG5_yag`hu2Y|DIE_7%X8`_qPrH&M5X&{C+v1Pmbp}b9?+J;UW3jw$k5&toH zPE&XReQLivT!Deg$6yF!kFcDFqp)eOXpQ+DRj-W;V35Pr@>U9p)m-(|%Mn9A2r07u zAI-le8v(iX92EW&B&fTqD}F~q4fJ-EOPJmYS8IqkQzT|Ij{UDs)w4bV$|?#ea_Oy^Qf+YUItMVB7GSc6If%wwb%d&pg_4| z09D;qsDttUR9yuMck9mQmf*sf?KK7{$>ZgS%65Jj3?XE^12wY)7QL%Qa1RfjDEyLI zoA7r96o89(c#Y3};Amk1TPp>uyF1~6$+GzZ^d5`#ym)_guiNAT7{v~dI@bB)9zI(k zw|gu;OZQlN=!2UrQHl0v%%J5A1*9U++*0m8JcNQEuwWOFJtBrGP-Je26(>7<$PQx{ z+w8Z<;LU{xWQtf0Jkz$%iSB+U#$* z?6xpXMmglt&iAc&p&0eu13^N(sd55;|ALxmyzEi&&Uq6rejo@WTrUtKkBV=;!N2ncof@dlAOHNXGyF@(6m1K?`=wc)fR zSX}&78Gt{~y~bxtqt%taetc;FH(qx#YuVv``L7VYY*QuNF^GzSmVeUWrRAw2nHlN; zEiPEGI0Hq@Z;(W!$fPDm(CQ^9smh=l_sAW9ZgyB?*hyh(!o;H!4Mhr~gUltvZMPJe zpzwgnMSFzE!EE95{7y3nG2$z#BVA@J&2#qjV<8C;=^h=^7(n1?kTjS)7K$Xyg?3QZ z8Octhit%i}{~mi1>X5EllE4nK+9qP6vGBCk&Aqy3FgT@@uf^A04pKEHI= z9EvdC-c&zGzRR8xRvmQ7V$K;p`)e(K)ozHU6NH2XGMc=$xVzw$UI%$rTk*Gb%2{PR z1o0Dw)5T$*6MZdDx3Io}7IAGr%Ac*IrXe=~1l$GVosI(%5o=%a<=QfC@-0OIVfzY2d6TLhKP@ZZVC9lr8obS{S`P0HbEZ?ZXzCL(m#T2qnKK zSJ#97B}gqv;3INo44?*(QN?CN^1G}U)UzoiyaZ9X*RDgkEj&2mm(52wz+<)7^K7Iq&^cjXvuc#4;I$$)hRkT_+ z({zL={VmyH$J-5;#Q(n6iDQ6hay61>03>a8;PQ-|st4r($d-qK4266^4W@oS;m5VM8rYicngXam2qx}L|hG4CN& z2MX`%12Do1^-)5|bIczquJc^7VnJX6)PmG4NZr^8!3zQY$lwxMka-JK9&8Q`LG7RT z?VB4wHHbYInT~%V0&2t?63I0FrP{ zS)E&q)sQZ9o@yQd`r#gum=6#^9P})O!{~Q7utoOaTy#E;b@@XOkEf7Tx*E*cg#}p} zlJ@vJ@*!z2|7ydp1I4~0%!53(hgnkbrG4m1kcJEcoykXsiWwRt4tNa~nL#DU`FWNj zi-b*660%Zcix%e}p7n)@ylor_mS9rc09f&KTE66u91!5DrAA@|2aAa;K6Oq^KjIJ{m4QodY@JFoP+Z6!A5N4NJ9R4-V7|5AyrjDU(TsMJ*EQS0yHdtzF4d57( z)cXPHw)6Nldp%f4HpvGnbu1KC7$jFKu#@L(epVoJb52U$BHH_3kdeA1#};?@n1B z^4X{^w#};5{8%_&VvJd z+XM-m-~|srS9d{bdpv0Vkx-Ws8C-C2p57lQ%JY1c*dot{ ze9Htqu1_u~i~qk=6PRr5wQolTOUj`4`;AD+0FjLeCAzfQPg;H3*?`} z!;A0@B|?1xB+rW&?W^TVD}pZ9N8hzmnEOQWQgHONu*x`faRT=; z#KUCK(=Y?KA^(DLdNcYPA)eIL7dm)(vFjO8r+&Xt|9bgVnadH}QT8|h#%*H-y?_>b z16sF>wr53f;86!(p)^aD-gwqs!k8fY4Dk;w7CdP=$REffSwfc-d9aFk%D3ru1h{NWhYCSNcfYr>;zfJFAD!!UDawk^pD{$UP zR?tISYC3hZL|Y4!kFLBH4w0e6JJu!6^G_F_*RYhV_%!XoLN->8rx^?TI%%pS`m_)iA3PzVWl>RP_X{TE zR$6i606bU32)^*vOw$_nj(p7Ey}J-URDdE>ucyC)yzE32ylg||LH+I@U)GcW+X73y za_8{?7>n^C7~4E_5Z4I|nDUWrJP0@Me1o1h73|qCKVOphWLXfWLi`(50)|hC-|J-v zOzdGgEIygGMz*PN*9 zO#u+;l0Uz4+uUA5fsEV@poh^C;G!%Aljg9{`6gz*F#tIattfy}^9A8m5r6#Fypl%_49G9)S)QQB9)$)2RuH#JRBv=#LkT74-9i_hIJc z4U{y}K3EH86(sCDzxOWtD|D@~B49LC^7E_8TMh3?DxZ4-Pd3?^ca+i-PKO-B3#j|7 zz(O$ivX9H`#-HAY?8cDLcAsH(|!J?+=G(!a8?YC{ewdZXo!fj>ASuLNrD5kad-7cRC%vf*0}}gI9u>1n_ikK$#-t)yZEz9_Xo1fR?iAS%>|p3nD@V`?Jp*WsSEIHj2Qc&WHSVVRGq;n;{`Nq9?^xm&AlsHPHLY&%t<1qWuMXfLCgzc#Ak-nC zKx0l+O<eKxNNu1EeD3K6Cx`Upx2&o)6b&w5Z@Y?nhg)rW`D?jg->|3e3BjWukVX zHkCftM`kX}oHw{vs+D6X>J=~(A;7%X-{La9>uuV}%+rX>UK^jg=0qN7HF?rY3f(~J zDZSIGVO69hDri+)6&@Y@oki5mvRJPMgiTVl?D9w~`8K|bE%17`&Y9B{oy{|}GI{OX z(3H}r@79o%Gn3A`LS0f}C)N$kh8iT-tTt%Eri~H)p53*%{kHNL$i8QPhw}#>^D#`x+4x#R8^!HJuu;lX{9DZd; zCcSOyLOO$FG|!tMmnA^V5%jYTpX@a-b%Mm|SY0STZE11L#wjjUjxFS1$*JsM+vQaA zQ&?IyG?j*XH#CNl^}J4hz|cA6;MAvKe?kzrKAqy(vC_!>-T3tc;|KGv`4*pE9wgFD zZje1n@;ltwD81SF-gp6w-@9jQb&gCx<-Ad<*=ewAoc(H1o6+-bmZNa|;Yak`m-mUf z<`IL=iUawyn7ZS=0Kn=>XF3=<@gY^aI6B$2L631U|8(CDC`zB0v|S7+6k3Ngt;>^n z&sBn%eh(Zv)`k{#zacY;!&~2P_VYHqE<#8hsUIaN;CJe4I;dyi^b9QxEoz^&He zf@;YZ;M$Yh^BaPUx*YeZUlkY%xg97NTUK^wTYuE3{QgTym*=M6uYv99(TOz8#Gm3z zd4@~5N5Tya!ZT;sU2Od9@nbLNrqYuR8hM>jFkE!=RAC&ru^Q7S=(faGTT~j!kxnbQ zux_ik)MZs`8*8Oep0q<%>Dk>_UrC-(M4uoYf*#i$+la!m4`ge=`j9Uu_AIdu7@gZMBpkxs;!##e>}+)kV9bhHOIWEE#rbpRujh^IK}&nM^`5@M)ClC!c6@M7&%zeuh~WnJlizTtIvmRv~=SEG_~MyN8ai)Gm7 z4@5G_s{6iMm1A!ps)=i)g)5ruPtw|QpYDoc`Q-St+4wOwiM)?f{u^|SvC3%?pP;Nw z2L>}wzdqV~|A(SXThiG`0T%U}sY#|^>q7y^5YsqBF(cIwP4T-cG6 zn(Phst-tO%iX!R-M9}*KfKMC2V4DE;mJ*RHJw-FsKt_E7j5@Jh#0)MYl==$h8^#g= zg?e>hju0P0IA?h0$$-i`GJWOWaET7x#`)kJ*Mt4=D>B*zpn2y!Rgt}`_ zFH4gfZ$QI$JvKPyc5ID12nV?<2NxnynXX84){|R7HiDE(00FgAu`ew+=fjk4*6nTi z=Vp0J1BIXEiQj-M=Tc;ecCOK5xFf@1Q~hkLhW*)uMB6+;)@gIURF}4M1EfbV%Jp!b zP-a{1j5hmE@=9Ouzn$H5Hnw*Kl}jc7-|QMSXWB(3zP;BFn+Wz!&0){2-Kt&@wo2X} z5H1G`j9FVcbiRTc*X!GhN>1Bd?lrNZxdL3|DsV?*ouo*XtA%;k;d{PtQS9e*$=h$E zqm++WpYKJj&iMH()7EZZrIDoH*u_2%KyCy`7#hCPDvaE-sCIGo2bIW2F)*xH7p^0X zW%TLEP!^s~FW3c{YfQ@xA8mSHDBaSoB)NB{fMD8ligc#P3*=a;$aoQlIkyDYeWEA% zu2b)>*c8o&DQgLT?3jw0Y&bV`MR)Am+jq(XI!12Vmtnrig?Vc)l-?n!28zJGXrJZN z3rd%Q85u|+kXIGlNOh|0@*FmVa`Ck7RH&eCrB28#G@J;j3SOUMOk4$T(;nQTnae>p z=+gtmKeNNl_UQ7Nu^iqNVFm>y2MJw0z{A+P-Lv^n4;T5^EbE3wWAH zR8mt0o*S7UK=~f6vHYc(B-XR<)m2^(T~xMl<+6J|twqqHBBPNIBDUi29PqrEsQKbO zjhqXJT~!dWk)sc%q4bwBcWT^wcl#5BQ=(sLpNHRHR@|)?5&r+!I_s#Y*Qjk%>L7{? zC?P$xv`7yjjg*Sg-AaSR(BRMwQj#hPN_Pz@-AE(d-6i=w<2mPD@3+?bmusE14$7~d zz4v|H_l_q^KU7c8=^%9gDG(-ooPH!!O@lf4JyX|9Wl0gvzr?1l4R!fN$M3J?mh^Tu zeR3{0uF~z#87t}#2&TRdpB?GOznos^aF$9a6%=ibSob6uIm<4$=XA8GBy;?Mm|M~_ z&}j$_!`q}1cDC9w6KyqcS`2SvyqrE=q{^#*6)XLdr>2!Blz&shH14%S@+kZ5s+mT# zKh?8!~Ip+BCF&Anq8+dI7%7nHBiOpdjztiRJ37qI$)(8by$d1z197!G(Kevi5pE&uD zmj|xUNOwtGk4>nP(|mDpS@1XmT4qeF$~J{lq;d6Ho9K>o#qU8y)`~e-e+c%$+D*qEqk{ zvWZFNF?vgA;5XO0<*ldB7~k$-ERn1DO6j@M!!g6pp@no9!_7pkI72vjQCP@4o0n@E zjeAO5DI_nX^;|{vALork^OMtJu9=X`%ios}%2~Wm#rR@W&)quh`6)F*N;wCqrOAjH zB%&R6&oLJ4Qk6_1g&q$@Ylv^=Bo{vFLZ(-Sr_g2XL!bWOqpR&?iN$bEa@f4uL|=YcDtj$9Dqij&3_kAHlh zZM=E;%*)AIt-P++#`7NjSz3oh3H`l+%tIAt0droK!IQIx7aq=eKl@hRo}Vh8SJe%V zjt#JN(eBF8?ify2kOqxaTD)W_K z=eNqY6^?hgMC34fwu{7Ne@d^8BnaNv`HeQQUDGEfE{}=^Z`@sCK1!u(r>23~u& zo*$}cl)M*T{OP=5_jsRoXO*x>IQJkC`B}bTkTk~;FI8f_ zrpYcWvpT=j*p|rS&~TxySl8XjSl70Xs11xj8rmgh>vwP{M|v7~NZ&gKn#mlCXS6|R z;P5opUrCwOhIDOmEE3F6qRG>Zmr3uSQS{BZL63(oAf>@L5|N7va-7All=5pH|1pdt z`TY@BGw9*V*^3BiyB~}BdlZZ;&Dwscs7(oQk6c(cbMnfyh<#FTRXwd6&ibgBLZCzc zkP)nscXUE{2ENL#TK<@%xTi(?4i@2rTebM$4hb^ol2O`;k4_#g&o^f9K8$mY%=brf zr9j$hxOj4qP$UXPw+_+Si-E<{H3U)3RpeW_((ZST^o?zMXI68tgpN{jKdm=N2-eZ1 zq>OA%UQG6QW9$4%WVaJB7TQkq?1TCI%hcR>)aJqi6RfO)b?wf41;+bGj5eBPLQE;$ z^_UJe6-n#0vxPWG#q69fbqz#!s?2VMDcNjv29)7e?$6N{Tzx7SiOah(Zj0!NrFDj= zH++#29L}6N#Es>iPvib%Bk?Libp+^aiPDdk5Y0Lw8=G7#i)6CRTuBC;7l%LJ?x$J~ zxJpl|%5MGW9bP>96{w`aF7VyBpFG6oPR%pYrzi>;X4+iw0 z>`O<%nvae}z&e$o!rj@2T;;D~H&m&edk7?End+Sj6hJ{0CNHIb ziI>(BgG-_fs=hGKiSm3(S*KuTsoStG+z-5P{Hi-1l7~{ZzH=H$2hXL>#2ir7?dxQr zVrpCa51)J+X7KbI{aiFk`dMVDK%tY*i}|;wg78D0=AfDznTH<6Igo2nINfb8s9oQ5*0&EYZ2X+%98>=&q`2#7lYd_D6S3E} z%er0yL>+#J2(MPwIp~?VA?8!YM{ly`(%N0UCtq9UU-XoEpa3`8=|sjUApMM}J}g!` zC9g!2vJ6{?Epav&zPeuMjIx}lROol=Ijz^E9F`W$mkw5%$l5;Jr_{TR=hRQNn`=p8 z`UcxLut?V-&5YLVgLMi=sLwiNr+RU6LMEM$s!RLSTGxxz^rCpcTmB6A79M%zESxEL86s_8%Jt9$STz*Q-nZ{ie@?(o6*N9Q(f;U{$8|` z4U)vrR;DQk!Z^-gTO#jMJRVMpkVSr&%GLQvZ2nJ`iKPenEIOHrNMh~zm>=LW&er<{^Dek z8dFo1Oe$oO6J$gmtWh>Dch)+_MkA(DM~W8BU0fFElf!qmCh^ABscx=ROXJihG~Ybo z+}SU0(xz&{%l%#SzUS-a_CBljYcI7m`)eaY~6c@ngAG zh`G`AtAaa!Lbyg!Dc6GnD`$!-YKn1R=~*$#Aqo^)%$x-R9kheldkZH0sC_Fm#a>Xs zciH8udN$+LtC~eTk+UIPybB%^C ztW{~jw_R8^_Ni~K99dXx`EtDSF{g8I^{s8mN6^VB3PXv6gyMZRc)PxNgNj_Z&K5HlbZ1^ZL3K;UyuN)Qx=81ENq<2_1QmWa*86 zGLz~h4a4?M=jcZR^O3MnD59JRVpPs-|`1Td%vH{VEDds%&%#LaRfbt zg$NwPyp`?^bP)Mj{`dd+|A)^)(moAa->ImMrJlhAi>f$YNoARKsc5|Bpe+l!+PYoZ z{1R63=VR85p!Q=H=IOo85`hLD{ZrCV0uiVgZhcc=3fL=$c>2_pKt=Yi*d(68NsxG` z*@LgJWn!>VxY|wyITXe+^&fs)A{#97L*po-w0PaVQttndbQL2YyTnU~&QC37HYhi3 z#F&d8)Hlw&oCz19jCVbd-zz@!AihyGvPmL{yk{d(pzD3=l+-J7pK*B4$yR(G{#TE3 zsQFe_W6rd~<}!hW@SC-EaxnSMBUmjH|NUe4l%0UI6GC9#@_c_4FNT&XBQceVJoNw6 zK6k|+yH*bhUbETI62f0-kgU`(xd#94CXYM)^Oe(+rR}A;v=>IM9SUa<_(7Z z+j|XjG6MXcR*}*K5z6h+)maS5;|foR34LWbY;~{-_s8i*YQPd{FsLZ``R~mWLiZyP zw4vBCx3O$0oexUivlPYO$~*bLt_=>>bvTCapp3r}O$DLy^?CNlbuhHca2enT-Ht`A zrwvWa5NUO#dA87COKko5T)THTuRpCJISa?LMH~cX$6gf-3RHEfGe&AU2Xl85JGP*$ zpl#UYv~M@-Rjrdw#eg{J_8#bjwNE)CC`TX~_D32aJ!GyuT9^#KWd2X7AX_%0pjp%2 z*jf#UNm)zJu>gi4qg7@pxl~-0mRKB)|3pfo$j-W(kvP965z$R(-?N6Ml2NkwJ8H0E z2cfdzfcMB5fKTv>S+g|z-n~RNl^7;D5}1XCA3<6w{yre@BGAOaO@#tE-v+yh z;RS+h9Z0)e@!9*?a$Lyse?SOxAsBN4pUeVxS3CitJ|A(i*oJWDWecZQG(VKc%g2Rn>uHJ> zwo?ocS6q^&d3vWZ)?JW#zbQ~N<-L-j z00W?ek}Na~llHr2?_}NF83lq1*T4?+XK0zAjtZc2zfqWEzNPe|QEpXwLoWodcfRW5 z@}j+>%RSZ2N!IEV9806M!lS@UlT-)vFS8w;A}d*^mQw4|obAd}j=y)TPJH%Sra>>> ztpE1C2|Yj3!2kd50FkJnP}(Pp?diyuF?6GTd`hPmXHx&3uqNhos1oy~u%C4*{||)5 zk;b)pJuBax#hO2c(9B!=sP|4B^TuBYw9Ics7bR8X7?Yn%P3;ZuPz+k}s5|Y;c~k7Rr!RHNE-Nz9v-A$!_i3B8!!x&&nc*LQ_%Iv zo9{)NiR8mHVH;@4_#2RJt@3B|fjIIG4P&5%W$%IroK>J!G4=ql4XyrnsYu+`937o! z*_i}m436Kw)|QQK?EE8wWiUYwlf-{dg=aIhws8M_%fvBgON3@(O&kcPFGZemMBx4- zmZ3FLXDAnm8&KejnKxc+f|1=L_Xl3vj_F-g2(+$8i~=5?YPSB)Yk+GYK9ACU%8A}M z)!kHp*+35DhtbMJey>!sjH1>O^`rXlmN`<2 zDIj4aIV8ku^)Wo{kSyem|cCQ`S;!m6^S4M%~;y!Q}1H_JPRU%=)@*c6y0 zz5%?m5?WHiR~h&rdbWs`o=|&_Z8g9b5Tl2pA|*LP7=SAto&s$EtBsK)J`FeDVy~Et zK}*($z`?e1rFYhYW;rCuazP8KnaLn#1{XDuLkmdw zt&5r8ncJwl)pRz!Ep&~@OwE_^aEKY@H>+iqeiB20(i#o@miJilJE0EA!|ZkcYLyTi zMkbt~V5zU@wZ3R|H>qZ9#Q+cmo%XzlTZDP2PLcG=*Oj>2L+$k*kqTAb3Gn>|hhZe0 zpf}6tPUoYx;U%UYylDj=1G%~nDr(x6bJ~V4`#6j;9Qla0A4VGU)9!-@&SP1=XMMm` zQ8+#B{WaJ*N1;^oJ$``{-t*;rYb?T?w_LIeUcZL7Kg@uN!vIcrBZezAk+jr0OJ}bo z-m7n0dmDhNWJ6cHX!@$%I*%ePXL|h8U9Dm5q}|5QxbM{E>*}1SU5PW0kd)LX_=ELb zgL>V5Tq#ILSUKB=ivca}FX3Zt?8ri*X51Pk4`4S_XOxAhDrJ5h-SG_Yw09VF=s5Na zvuS%!qaVtK3|a!Lt&FGCOjLJp2yVjYn-oshl}$F9Wu$ICl}Slm_?n>C8Cm&fGR$N+ zE^-X=OrK%G36V2{FbOgDKO1H+soAbDqE@+UlE*=7;AJaa{V?*qtUiP3(KmSciPNLR zvPYwc=vB4yjldqb2i}@=YmTWvD?N6!t)g79>`>YrGtC3Y%;do5 z3^7fAPyTLEIke>zEx7-4eg*hUt+s`eCr}?a?<_63011mb-lL!%>7UO zlt=iVngz#BddSJ^mugH)jd*QqxHfrKYI<4oX~t(C6^_5@)e4qF$tCtcuxL`_&uqzfCb6QiC=t?kYDksf)|&ox%TZ{kk>diHBH9DVbMzmLJb2U24k!@&XgA zx4I7aIhWx?AAdzGRXA@x=OZW3g9(4O#B8K9X)mqRcg~iMk<8As4F30%4*qo(5e?uo zmqb0$tHzo4VGAPZk)$5%UM_GUD}n#o1yY&4j|uuxwaO z0sOKcZK=RJb)lY61ufH1#Fpo$Cy@u?x&v40(sNv3RJCJYX%X{A%WFXEM?yML<(a7>oiG_LtdDy9B>#M``? zR>a|Y4TSU8vs`Hup!JRq^K3m?$DtLjGv(UOi+KNi=NZj$0Jc@t%_{QA&wLAPReS^J z$9y#WySJFRh8+^f(0-3?mal*uja;v|^_P zQoLm+fSZ3mR(pRw7pK}Ez_52?>P4$2ngX4A8hwx4;OHieBh1=C_N4&~&SU|KFK;WL3u zSd!5=uqLCv{oKoKm-^%ds!-LgSmNL#&;NA|EW&-ViYJA8-n03H+6EgtGclHN@Os_F zxk-ZQn2d&#(3)y+yZmYMy_{fiOH~xm6It=9Q&b^eWB$ZvkyzhGJ37o>e0Vh}5`mt7bZ`iHn z`tMiu&P^;&SpUOBk;na3Uy>WMHBxv8vSB{V?vsj}bS4I0E2pXoL&%TvHt>_YX$&5}fw4@Ffq1I~TyB_631qJs2FSa=?Fa1RK3RyI2uRFGx3wVDDTpQBC-;rX#@(4mIASzT%UT^h+xt zfWHieu;wR9ay4J%;@AHDyaf=Vf72gPPbVL}&J;Ibrfc{){I)Q&kB<&BZ`}^mW#0g) z$xjptpjTc`uobhO09>BGpIdW7KpcV_GtsS*v4NNo!E z?{@@G5KaFC?eyh`ifND>PFys3>-L3!Ncc@U``dfsFrsJ!Y2 zsxE%pJB*Qtgy#40qg5X06B~Sez3^?Hdld-Fy3ADFoB=~#Zp@j2-s>xk+E~%sZX7}kqx_m z5*n8bPM?ThCXqb)6esJdd!cDQuoILaOJOhN26~P`5H?+c!8+uVU}xJ>6Tgz%(;(2b z`|$x@xvJp^Z$?V}<(91`oa6j$x7hW?T9hPyNAmmeTP6e2BzISerd{4M(}K1F@ah2S zCEXR6+LghOuljCD@-nip2cU^-j_Kz*&Fd7?N)Ja|Xno)XMXe9l0DayY`Hgll=+C0) zcxW&5e@cZwpYyL$0>#%DjivQ5ZB1Kmy}?JAF*zD-Zx!@nQhu9$ z;2mRIE2MD#*PoLrL+)b+CuH1EPD{C5YLw3euTz$Tz-M4RU=5AodtHo(af!gDqFhN@ zt(k`N@}*2*Eb|GWSEmmG5Ho2ZiH~Rve)pE}XFb*ddx7D4F{>7AF*Gs;S`WdV$6ahw z+q`sQ1taLfh|>FFkBI;97hi-vphO*iuO>TP{&GUTUtd7)DBf4l(QK;mzg525ZW{Sq zr`cG=+ZuSvtJn=QCaa6jvD2CV87f?fgZa#rIBzFK5rhVxu-Ncx<@kaq9VUiU2i^eL z9q7YW5Hs4}nWy@NX3`Df}Bb{_}8z%1-! zM;3UPQh8B)tQ&dlGA=jJY|Hj&+gP=*xA#>9+my#a5AGil_0CYHo3yqWNMU;~fW@7# zA-xZhXS%t)-3Rq9gGc3r5aec$J?gu278QuN6>#MQX|#HR=? zpzGF*)5W^>hQ>Pj%N`ei(G&Y zq-t;JPf-ZXJ1xu)pC`-F)$_hQLfd-in{xS70}h~L42}`4h+BF;;OE}mV4KHyk&mP= zJo@jkK+1xC3tnnHULRn`3JRcH2c1n*$|&7)WLp}&)QxdVX$`lR+s_4WJ{1CChX>F2 zXeYjr=moLG@Dil`gj1k_Q^j0pl^phaAFr(kXh*=`9}B^$mNfvRCK1nrrWET}j0^Z0O9I>$qqcA_)1P#835D|VC=Z;_w-y{u?)@YtCm@~*^Q zt{5EUL1)lh#iE~h0ge{7@tufR1$DBkqjwAF8 z)Z`jC7`l-sAkAI@jVV7Ce8^*_7;%Dz;*N5%7-9m$_THuGphR!x)Vq3wo%mTco3trV zNA7tRYSH(i=egi`=F%aUC1ZxB!2P!LCiFXM$3>FI{F&HG`z46d?TTGR```zeATLVT z9Y$!;C$JE<`9>iTzMN=WET`Q>Sr~ijZ}|s*^iky3lY~pp7aVtXf1yneVp~TbBF=nr zPuhsJaR<;JT|3}-R(ED=I0+s<%@b%uzTLqQ_nUpL*sNe3mcFJAqVr$cSRNIUO+82a z3YKh9(mi)vjh#PxcDewZk21h}ZbT9SS@JkwoUiYrT&)-$1WVM5h=_SHWC`2P4# zRBl)}wz1V;>|YX#(vv^@$G!y|K5h*CTHFVa(x;V7uu+;(5oNI?TAx)KBM7rmz;lwA zewO<|61dN2^vR#Td#kFyEZbjy+QMlDJx9aZXrPC&c-=bg0#JT^^!gq=64v8i%Q&rP zp@xK?Yf%zxn+or)YuJpda3eYx_BXw)DHE^xmG|`joALGE#yn9v>AO-SezB+N$hl|6 z;4vbSQn@tcNg>8Q_dUo0rvMlz1o7Od*slTgmkfNV(JAUy&n3P-hX71=%sbcsM@1sv z@zJR9(moOLT%q6plOwLnl7BL!{s|~YmRy|OVG*utP#5?Ib~}S~0!?(yeq4D-PdXPh z%I2#kGPYd{SDuk~Z%nwA-7^7?S0QD5LP+Xqv#4BnAZA~@cW$;~EH*brao(i;>`D~LcLg^5wbQ0gqzKd)tU*x#qR?14y*>&h z!$br38pR2M0?))^_y})kZDpsa{Xm#ofy#RPph@`$JaU+_=)^U#=TWnmHYh;PpWxpQ zgT6NTm@$oCMA><1%l!KrwYC*c6^mz0JrkHcMHaIE_bsp>@pY(gR)wf8MV~oG?$A@~ zN!`ES>oYnB7A~ar$x;wG4%;HxIq?eMO2NqHpK)qhEC>4Yn`qkc!sLU$-H8|6hYlOF z2dh>oH7Kpu_(d8&eYE=Jt|Tm3c5F{-@?l&2!=ha=_fXlQgV}+)g}WgN^B;x_@w}Lx z1AqV0AL%d#u*PpyV4bUJLoO#hb;=u z$cuJA0d+Z6Bmd^`AtZ#hQ4bp4zSqn)R-6v01Kj9x|AS^Fy@oHkq?I=1!zeadyhT0dI$)L76Cq2ak@N4gw3`@;{dB?)k!(o>t`F|rMB2AJQ15Pz zVcOaOB=lkJjwdIP8&d5~={7{urIOYUl8q~qCE?~`+O~59vEWIgC>-7X=S=^nD&fU~ zE8KcpL`6c^O9%}$kiBS5ka5CwLB600N8tzi^Syqg7l!1VXPypd+drdkCDdgwS^-5$ za;+$$+iea9sXv{Gwp$Ua?qM5!i^4^VOT=N`yYHz@^yDgQ0ft(9Ya7Hae5`$H{i+o| zEe>we5`nf@@Q}3ac*sh{TOCWMFm&!cLRVG?v0A$fCRg($_3bOMkzb_eKB5eVBU5oy zKRcCVZtpk6h%30(fuTcjd;6ZaAKjhMu^%4ubhJ>$G%7kaB0@whuycs@?4b05N7kj~ z0^jU;u$L3pq$FS{o0#M2^#BWcaR`IfEaO_1to~d>VwHJt*yG?36H=dqnHYNF7Ax zH_6e_b*n9%iY}wJv}dt!x8FEw9!!A#trszU&2+M871BN5c`p}*GVSe8z8h33p;?A{ z7wikb(*jfxyc`ZHt08;AS(r zcuD>m&O~X!sq?+XNhh45*=(P6AGlnfr^?Aaw~kLq#`tju@0ZjP$VU*u`_;*j)*xkc zL}N!ZhqlR?$>OBE&eg+$MXKr5Sh?7&yyoEenwnzyD#0$_%U_bD2AN5-Dx@X~tBEqI zJMXdeirv7wXASyMyN+bn!Wg09xn8Py;ahi)bU^X$C}uUP96^f8nQPQe+>JO_-+Nnu zYX56!Yp_Er%5#%$hUXcyK1e7@bV~;`7CalRFdu`w!pbV@F5eROyt}FKuRz zg~EEW(q7YoVg7cNc*<*%v}*LwL9deOREYn@%WUj9BG=npOhqJZCdRxE@;etQ?9>n* zt*o_{tn-}YhHJQ7s1Jh}(YsyxR}ROpR@LD)M6f@~2SN2Syp2uR-{AST1w?dl=qT5n zVaiB<7STuDPmO<{>LTF5hV*7RE=vo)B&fou#Y6*N6}}<s~s zOm7{p{;knD+KuRMQa`%%ol5meozswx;)`c3ztQXc7Wcmob1XKN=Qr{D17;2w?CYeU zr);O%@iEKkpu53vBw1NIM7C$GSSa4@YD6<+N>R zwK!0IY+Wa77&r;eAb*Zry#zN;W|_{plg~p-~2EZo<0{1j#+Kuhiws*$yA`}?&kET zr@4uj+F%@a8pb0EdY~!_)46Dz=kSu|9_!pMLPePTE_E?ue(v*8z``6^>=SZ=fX0tE zOTMNepC%OZx1Htf+DPz`3v0QRno!7|&k?Dz;;0vSc)&%R?|YhKrSl*iiCFMxbZLHL z+HgApY3zig=2pK%tnNQsl4hOe)Vm0`^`LX7L5qb+ZR^;>&MZgUTQv@N->epu?n)m0 zP*|TIe)Dq2`DgEux~_1U;Qn6)xL+S~2xgXr?|4G)Lll)E1Z+_8Ow&xF00|QWcD9S| zV{T~rCTKXjq<+cVYNB@C#3hqPwJ=N8`6m%v%zROVbGkkvkg;8NW5chKHi3Et2l}?n zM4iC;VVNi<_?g*zyp`ok@l5HM@oe3Pv2otqt;#}cnToqD2m9HHk6Uyk>k!OAmdhG? zl~g+i+40DC#}yS4Uuyq73IBcy14OYzpkLjzSW&UFasuoWrLo0!RLApSHBs0< zNN)-aif$Id%~Hj;A2hx`B~#}8vGL-n=~GBX2npeb7TebxoLwR(lRSZ@y8@^kNG9>J zy?`lhdLd|!zXFS`sH1q>56*($`={=4F2dB9aXrY_ro`HN3KE6JLCx#F=5B4a<3-@q zF|%QgB>jQZ@QAy`uD3=>oOaxVR{^#TZbB8G=oW1?mA zHlioS9^(bW57-w7r!Vmy@HO%bqKju-rq96{bb}L(bY}ZBnZ1~*Qp3UpYFIxsOpuxy z=jm;%wi=&p_o|1fnivSwqyMxnR)C;=`q+d0+F+l@R&r0pml@`wYv+?JCFY_0dK5v& zz>Lm6K%V&ly7OOQ2wKxWFs>+ysO*ryxf1w8yew;y%ci;A^kM=%7q&W6%nLC6=}1e% zQE8(0lENsnQx;(GA~YAdVJgXrH&;m>p=i-^gbz}W*(5khx*w0b$G34S%l9va%Qu+f zCDYFp7zhDAz+AM=@|A-k&-AwYryC*SZwJyYUB7Z+!41bbb$s``RJHbr1QGo|lADtU zRShcH@Y}y*@bk_r-3=W@2Yrlp&`|duWlh{#d>!BbmRXj+n-69JpW@;|x+}M53ZhsX zUW${9#J{tvyB?PR><3*NgiyK`X$E^U?dg$fwd~vIX3R zWDJC)8F|-hs(}w#Y<*f(ciYj)Hfc9zzy=E%v9nfeq0|4e0G9Zd&hADtEc3aQ61Yb@ zHF5l20UPQR3qAkJu(&rd(JM~dO-P}p!m#$7c%M)VA}{i-oDs2qw3UDAj}AP{i?8%4 zxYw(}1g|?Sj+8TD^z$WBgUW);)1mdxq=w9JcZj})M6CvAqfw^NGFP?n!cd=Tz^g1V zecpYd6KXAGobqf^YdTv3OF)L14R4BgnZaJYRzQ^CASlbO2Kez0T6K+@=6^eB2&oI| zLz(t?E(U87i=_1RzPXCk49RBSw0BLtXG?wO!z1dM68CCPO(z%x7{BdsoK?<{BX(dB=S0d&XK~-E9Jfz zY7VgHN= z74ad5&oZ1G0XBDs1c%_|bRwGb=6C1L#hH-h6IKesdO1wwFu7KBQ&`L)pc3XkjhiFK zH&@EN_fnh@ZI@FlZ^4zMQG!K1p--wn+Jn8hO?PBTVM7vByGUtq&mGi0tR1bGF;=&1 zkiC#Ls0AmU?QVehhq5B)?`|JxuAT2P-sxJg=Q2q(V8tiBcUupiFL0J@Zn8al%p5Ki z<#2uOMgL9a*WI7axTkW>u=W3`Oy2p4LoKF+?L+5V#^uSjmRnwg*mVwM3`!dtr5C=0 zlZ)67vCTb$>jviKTaA$8x)tNZKgjp zcgB36rDFHWzGGMfNk=~W%rh~|)c@>=y^1DMpOL)&>`$^+))kgEKa#zU3-*48PTR1}M-yXgle!EmgtLZtpWw3-YWz?JuQ#N-XcfaZU6f z+FgoJ@K}I+KD4rPW#DmSDD845)4WhVq^^!b#&qo0$9Jh zww_&Vt2Bub?u82dtI{dfLGC{#PH1ovnu|;QEVJ39w99m2J((RVMSrkbw-6kaN4RJ2 zdxN;0D*cqW=#I-vF;0rxoT|1RE|oCDQColGIX~A|ujS}R2u1hatTc&P4Ka*7+IwYz zJqHa+5%?tSXs^AeQa<|@Sa6l~+RfiPkAdFfz*ZoooaSsk5yw~}&x5P^g}nw}Vadnc z473)Ol;rWh`y;dWbR5gyBv|SR-q@IX<4BL@jF$Rjwb&8 zVc3T3CyPEb1FGUw{#hkMY8}x*$kQ@R$lCQ*)D{HS1^I&g&|E~y%?XK*>PTY(v-cy! zrx9~aDmC}vc%l%CafXD14RoH5Nt;$1U3(Dg<6IHM`b2|o1M84)Tf6I@kmzN&bNz#R z>Ut6d2ehYD_Oa-m)v<7CqH){Tx{30I?VB# zbti7xy8-onWmn_$D*+x-6f4(w_gMrsNtk_-lv+)rbS~1bihGu&~m&haNauiBxIJK8u=q&QDeEP5GK zsRc3Z%8z6{=;EfuuXF5}HdAg`-8vST3-sW6@0M78^veX=A3(P7SaGwT6@Fr?ImhE^vGxy@C)kr4^=0LE^elIQaEuHkk2xwi`-o$K5ZB~V=}f7k=ydRV2-zMb`$jq(P)E?DH(%B z9S9NRs3|^4F6{UMk1PkfUO4bt)K(alh_qUAj$9KQ6Zi-gHb78#N2D5VBn>AgPGnnf z4deH~>IXX-$&x{w%L8mH&s~f--K}l{!&~vul$^>LFB@JOU2WZq`u~|zR6y?2_7cff zVol%wz5;2uSMeK=YmUrV9(PolpR%8w*BVS8lpJo#UEIHJW)b8lyj!q{5=WVg%FR6| zrU)unXPKL9`BItD7W^LDe-uRerPlM26uRIvKIHiCW=+UusNiVv(@#5s|MDpUtnYep zS|=wrCdV+>{;lmtP=P3&iRlCCafjNUF(gHO5Jw(~NA;wde5;RyF}2^Un9{IeVKGx+ z8kqwqnf>W7qYctPuSh}xVm(XINtZ7aZynyD(VXMQyfN_T&p5lcJlw{5Qrz{Z=5cev zFsEX^t0ciT;$Hf@U-DIK(<36_F+v3H*VN^DO_!PG98+{2L4N*(A;6cm#`}SAJIHoET91wwY7Qa+2R7lKY!|xLrxTHa!mAnYi=VTn;7r$nfxfwH z*pHtw3-)d2@wwcK>N8)Rh@I@wY*kI ze7p*ZCyfB5+Jab0vZTYzOdkCuI`MFoZodUUs-Q*6kvx*-4T>keyH;OlyWyiR)Y&ZNK z^n3ykpcH7~C^Ecr_iZQv$}|Tzp&|?WiVZog4#X|E3zu!szXeFYAe!V5vaT^wT48L3 zqSKu){**hi&e>!hd$KOQzBim!j+DnMKyRqycKDnxpWIQ{dVw794Qx)?=>U}rIV-hSc`$(=NvHT|t^scz zu)?zV(%f*id#r!acl5ulQ5u7(G-(xxqlr6TK_-HZ*O?hj`URN|&ip1FF6N{w5NYMb zZ0_rN9&0bStm|HS(;EU4Ia-u7$bec-tEh$95E-8 zHa02mUIlY|LY#)U82*`w8nnY`$}hndphS|bDajoG>`{dBZGVw0?`SF{hKv!-qet$r zyh8QL;;bAbH|d&3vblNdlqiR70Zq$4M7- zP<& zAkm3ReU9WW45|mD+sa*!TRBv&h<|<%<1L&C^>2?X5{D9KPMaS^x}&e}V=TEq*>Rx4 z{(1-22V~7$U!4%0+hsWuZK7%V3sRJ8X>KZGVQLMi8o<;sxji6P1a^ zBIe$QkarP2EoXrLWI`{%K^$mIm(KUwfOwH4az54(VqA#cPR2k>>BXBiWHkK~1n2h&tKiue0=PO0 z06%Yjh3zT*`|5x{hG4VuO@Jn16^nKRJ^wd=5D0`*3iF{e-QIwEsxm*$2{s^V9u^Bs zb_{BlF@n0dZojDEDOl0 z(ys=cTrng-(v7`pF-Gv4x%U`XRlIydiApm%KI;N!XfX`Cs5DjNakoEuy2+ML_~x=t z(AOgL9Bs{hGpg&N1aKn(j=%Q;XIx#Jx>o8RR0w#8IXD=P(L%k)XdxA)|ktjn4 z1Oj1?A7oN}whYGkgRIu1FzS%KtfnuOxir(D|8J z=={?_L7HB4Zm7yBU4azd7d|HIuj$R}0kFiOrBD*Ra}%F9P}Fwo4wS_oC%*U(Ik*%sl#zJ<#i3 zCfXBWY^i&hykvVU^84esO5**`KPox~owh<^h8>;SF3TcG28P^uALx+RC-j|A;69AK z&T)?S2;VYNe(9RGBkOdepdmPo`nJhH7qO4dMmvGc+WiW?#nz{-!U8@-$!|chn%tZ( z1FQi5zOjxtSMHg%X`EiTI5Id`a=UnFgKhI}nPq42FJ`E{nmv z=zSM|qpU#W;jtioIA8P1e|4~dgE&T3A_(|BumsVI)@$SP?{bNx?Q_j$LhorYy{hgz zzss4U;9t?9o-h_pKAKvw=(4GQeKwj>;4!+c>t)1K~hpPwoH<^`9vG+fqq-#r%69%N%o{Ubh$QV|EY$>~pNEu5Gc z47Uz~f=|E}7^X<035XJE%AZ(^8RGO{?5`p7@2kAZfK^GJ#!Y~Gi@9L^kDaesJ@0Z0 z$$F)fF%^~_E3|^UpwAU1I+5JWXnI(lvTfB+rN!>bFRUj_1g}~6?fS{%br_}k|Eun+ z|El=Fy5cuDizPoaga- z?md6N^{4%k*|XQInOW--@6dNyv1F;@G*~!xX}jIx_B~h1$K`GF^I|Kib&8dLHNcAq zz?!_VtVdbPiGRHKYkeNh%- z$^B853YZV^RM*JySh*RueO#lyD6Si+LsxY%v+1?ud8+FH#`?nL!gB68LgqkV= zqACj#QGq-gP94i@e8%%rPK5upRYUL0J4&<@KTY54+!b@*bav!}Gpswm1rAtmvin1t zhwxH?k}fwP`(U$6sjS$}XD%^sAk z;})RmbQbA0I(V4g`UbfWC1!*(E_>#=OLN@*McDWw_w%j3@UTBp*Z*Fu|NHHs!cUll zHb;5BRVAT>$%fD~TyGeb3jA5oGy~0N2TXl=wrW%2A@^UE3;Z4Dh$4ha@=yS;nZX;W zM|nj;3_*xZ93zMR;Q?NE=Bnt~wi~rC^Q&KxIWQuRoQ*QI^)LSLr~dO}|LFk!kj`jB zv)xvvpa)u^IH+$65+ddVrc02cLr_C}sf-b{0@MM!L+?|jL;RnQy6%0#!W#0o zTx~`eo|~Y%!^KI4{=Mn{`)4{}sF^uok$C)zk$;x4zLyZ7zaFa`UDc_g@7~ROf6^XW zGDwA6$a0I4GILnwHv?_09RLz4)GQ}i@AVu(Ahjewvdk2T*kp(p|BEK@U(5A-tkGMi z;8j+Jv|y+4-5?sm@s8cmmX@JEy0Oo&k&e_6G%Y^Rv{3k{rJkmj*p2|1MzvH&hftT!x_7YlOc9MFMYa;8-{SRbwDAY9Gu+ zwe+(W_35-qekJKO*S6W@VTRguW&D zTmh1KJ-TE5_L_%lgX%lYANgg;IQh`&X~)ljbDeCprM2RiMjT+X;Yx>G?eJR|aDo zr;tJ`X9CDdK5FVw1U4H2P0ZR5OXb+N}YK_%{E`?+w_v}|v~<{kUR=$xBvvvpmeo*zi3!=t6gTx{9c zb(YNDBR1`Zk4IV3Wg~m(?Nhsvu2=oxXN<|o6(M^udENA<<(_3g?5CVR)M$i-U9h~i zbPNfkJLe~N)an*SQED|n|NmT`B6*BDqI8U=$S$t(jV(pdldrU#OvAxRD&oNt5r#gpf9v`OmTrH$KHu!kTU&R|s3%anlx*xx) z3P-ZJ#_zwF389#l*Wd0|&G|etR6>-mGq-Z_WfS=2me&&H$Fln72^^IhG-pK%xV|@N zl+c^7-FKZR301b?U~{*Rn>AuF!4uRDi|#RKl}!(e4$qPI(X8AZuSMv06DChZkIvO> z6>h$c&`3iRZ1Oy-iI+pVDi@X24i-(yAJQK0o4e+b2xoca2!=Of2=zF+-~T;k6RIlW zXs~-feb-Q^q{nW>7byAmLz=(3M?koF?I5w&>+7)H$$SIe(}U*_+q)kUoS(5pIf6($ zSEUd_px5_mKRN}6vY@cwmiWbgF~osd03}YjT^fcj?;E0(nqTAR0#NX1{RIo*0Px;7R9>X~QicKd=*Ckx_T6DgUrL-Z5ZNyjz?KJtB!L z=B7S?AlGLQY%9>3W(Cw4;-OGpcE=%KfD)8BUtSD-1sG4~ao{0fy9IfPV%uYvK4X6iuR!K0rq@3o2@miDX^?+1OMQYEA57>>fyA zd?P4`D0s*oFj(`5lh0Q`hc9l+@3!FF5hPC%F)?&QfwTvl6_GGo1sNuI?}ivCgs_yh z)16p3kxS|V@qm}N=m!7D)KVta4J90G05MmgFaU50x$@Bge;xq%cb(b|cAcV{^;~Bl zUAKgt@!bUCr}Q7G=f_HIy4|)^u7?Osr!_g_)a(Sh`q6kqWxd|NovZ?G^Lz_S{~AuL zF{@~k4z%v~4vnkk*Cml!hGG&5)-FTG%Rpf}G<_i#dl*=$c!HWq^6TW_lk~?$);msQBg)^$O%y^1x!inDTKRPrSfAa)qEF6&t}3@mPcQ zyztgVNKIg(z`#0QAb;qWOGf&4bC#F{hxM7EtE{4XPS8C>I5aC=hHtqv<&`c$KJ-qp z2ScETQF(OsHa)nA=Y@2%&5|}Er!u68Ws^)Ah}%{ORr6qwyD41cVGrB%6&)&WVb<1M znR@l}hK}@5yJiR*6B_(%_K>4sHI?0i)VV8;Z=W=c0a?&~%5|*$S9`S^A3&At{iNU| z?MAU{w-AuFKCMHQch%a=Z|yHu)Ote+e3%eE^uzKAd2zH6HSZvm1@!>w~|BG zeh&&IA_b#HNfrY&0cah+{WJ0S8T;URI3zNm-tT11};Vj#5Lx1KCw%bWkAy1 zY_3AP(zSZCw3CamKfHJvyj)x&c}ME?GlM^gx@c1*e1CRAiZ@=m6*k_@2$Xl>Lu^u} z1>v-t-+|D|AC2WA3KPo4VUJLeLVlBEuJyvp_3HDD>qiWiE@@2vYIp$#WCXf)D>;Gy z&on<=>vrJHcenjT4X86BXxRh5uU>z|{z^il7O#-0K&rrnuSs;9DEhV60_R@R^Q4&b zDaqTPq*68rX{5ANDA6P)FG0Gj8_yl@Upxh3fYUeX!n-8c1we>UOtV^_!G+-okcJD& zaq}>O5z*DL5y+3`c5!)(fkPQPv>61z6v;!TKwJ+Qj;ID*&{!{maNBux?%S6>3YCgY z5Fq;%%b`t}4B6Xj`wzoI+_6bIvOeP5ebh*er207?h;M03lgLLiDvs5m98eG2h}GM8j>Z4Dn9#M0;(*_Nc7Zi&KA;Wc#Y@n zxjjghkC=OrH%G@j(Tt(whCV~%`sy1xv_!9jl*E(~BSq8q6+DN6_XkTpysvWuKb}n+ z&POvb=SC!DES_qZ`2cDND)+@Oz^>jHgOjmo^OR%<-EB%2B3q)@t_fbU z9Pb`D=Dw=3o&|Yt#{!z5!hA)1;O+0c45awotW+I0tcl!5>SVpRA5JV&!>MgEMH-_c zwH1+dpjz1vikL}`8tiR}<)t~C{OpYz83DLR@z27jwE=j--;V@1C{rOJEz~T-%Zegz z){rfA$6nYp{1Bq7p(WJ4BG@3GIeE5anqYZD#8|NQ;1b%ttL=HmH_Kcn!Fsq48?`WL zhlDjImxnkdeN3`$vs6?I6pquUlm#@XDz>t%a7SrMqs@YfSqh0*uE!xxh!(`Z+Dj zx4JKvS&v-GOuT4AU*HymK-h?kjPNrop`nVT1Ds>_5b@$c*Rv2$u|okj7Nb`{SK@R$ zkbBn>)LIMf77T4c!r%JO{aB5*L2Ua*KQza9Ym(S!3U4rqG2^HF@!C3R6YjC4NwKGY zDT@Q|glRkJ^H$jv$JFoXLt)fAP4>tW(_7TWbCNEX_eLIFYA5U1lya=6>2Kxj6hHUc zsu)>5j~8g_@vhOXyn0te>J*fN9KJpzJ5GG{y8Dk(`u7+gTb8_ul4*B1a|$rutv1-` zL5tGmK9QJm zS7l}XZy4MvIqVHf-@W_-p?i}AD7=yDSqOWDP*u=^_V>d^C6v*XR8m~TCxw%hJQ!M{ zR*AnGBi?&sx~vI%BZ!HEy+DQPbtRnO1)MxY`x{nefe2Pw~LEky|M#s_Y+&rIWOpHZSn&u z9z%E$ZDCIy#i*^Z@$1ie8w?gbU#9Ram&nw9BuCPY3}vy#po@3}+X-oI5MC%S1nClE zEShOPX6Kqoj*LY|!yybWYF4i29DB_F7}f7aQBRGlGeY>2t-D)8Qv28~ye*4;?4woX z->fdt%c>Sc;@)!g=I;e~Y9Az$Hih`XeSq0qx)gYuwG|R25@UsA3B&YekjziD zzR!qsOn3Mzid1sD&F9jWgQIpr{w5}`5uTYmRqZtVaVPqWeNBlO#t62<*Lsh#+14H) zT~bUpO{AwTMSTs+;$aU$be4opJFhQ4caL@99;#DryYUUtZ?)rkt8?LQm~hxD^!(Jw ztSFVI+2kRzoHIi_be(72bO?XdfBf|6^Bs<_t9eVKtrz23f-$GKzq*Y}gjHF|uOzv= zhb~pPlyXg`o+^{AYZa0mH3d-&m9)6EqFyS$eo!Klb$h*4#KkGBM1AJ5PyE?3+Py12 zb_EAvmv2PZa;~l===w`(;dwwkduxf$wW;ez_RA*Qp-yqSu1V=Utr(LbY|UTwQ$(Xv z`8EseY$~m2$12BDHKlr;FgY)rC-Ub(!@V|ZNIJHxAZ7iWby;fBc@2dXJ~YO;%`guHHwPKpfNGse9QbQ zR0jJMz7*kcz4pz(I20)TIQ|W@D0F{Onv6^=bX9cqv$}=)1&%J) znPd+qi{*{n4g_MPYJ06{yZ>@yAlsvNMDAcJ@%y|-t4pIPx2;+THX@Y)HpK+eN=fcGlA%>einOHDn74X<&%`YlGy~JO+ zpoig5aY1<|m6=?O^x`oH_n|D%}Al9t_1F{LlSdTQN?CN&`x*NSZ1da0Qvj ze`VF!T=iUvk4qQY#o?wsEP5PHQ)b*rHmmxBuv@Cg5LJd`tChF1YP! z2RDlK_>WZg#x*h&dN_|aHjbS7^rM3HfBT*vR ziIu?4-vV!|k&f)xl-qhdY6UaRJ;+zrepGB?@J*eQp5fzOf=#4LK0SvqqpL&jbLlkY z$s32_k9RgRY!aH^^N7LnhL)?tJ&fN7Xq^Cvzx@}oEuLYZqYl_gHA*06H= z-MJ2@7;SJOOfI?Ws>?W(Ki62%Sb;ojndy+e(muXr8-MD^(rdW+ZOJ28Z>O0#c_T?E zsBf~iB8}Q1qnm)P`pEq&*DuWt{*DR-a#iK^_6p7I>7TfT>M*Z5{MBD_mr7{&5jzhzDJ331|P&D;=WUfW-3-Vbo zyO>fXsg?ONx|-@FEq&=qsgFgB;o8`lyG0?!FgC5eY^Kx?Ar8*lp(3E7zG|LF$(PvD z4ospt0>0!ivFWbjv5Yz{duB5=>CKGr?(wH9tlDknW}15$o3DQH-upD4p6McaLBm}Z zASAV}WhGr!`d+6^_Gzq zRl)0;nZ2g2vfu8S9D;eGwYU)?;BU}G)W~HF`LjMF?hs75D0sV?MbX_7FGpVTIfzwi zkEG0;W zGY6n1HY6!o6e2&7Ct9w9CWSrAofJRgz>-OKe8Wft-wgP>C)ojc&K)L@XX*_ctPKoo zj+G4n-H#7xxd*4a3G|B{5_nHzhVx$%v0w!ne*`zD zo4aVi4avxcrVYX`i<^o^z5(E#`GDtUp$Tbq3{UD%P+=VaC-GPh0E>S4&+2Q{d3l@x z$O9=63H;i<5l0`t_US-L#%G`$<0hGL4%K4|>aWhwdxpthmT84OKL<8WUqGAXw2=Yx z)OQZYAmQXBWi{+*Q10)CB9z#FbH@>2FhNhK+B(F_z61&nI%4A!EvCe9p<_Kj8e2M>e0f z;k)rru6jxR?Rl0wS2CRhXl4(<6Y_KoUk9q!K5^X~Jpd3ZR*-nvS}k`a-YmcL8Pck^ zz!$arNfei;hmgVprb;I^zzk^aVduZX?~ytB3(C4n6}L+s9X>nAr8Ksb<4na_?br0s+YD^Q#>siuJ?dfpGY#6p6Cf4Y-0T!#eN*B5$v9?2)jvo9!O12=nALR zI-4ix^%y(Q))L6^zjJ`yA804ud%DwbgCNQO%QN?DLeDexqBrU_z&Z)4W87UfYNhHB|xq1X!(qF z>Q{!N@;skjAIcj5aPcTR#BS@(j+uFy3FIM_4Ww;z;vQZp{o~~(HA92yy;$X;%;3gU ztuFB1%L^^_NP~_NZNmbO{m(}&S}i0zlE$YzM!5KLr4ECfovd1D^i-l2sB=+a#XCJI zJhs}#O>g0RLHI!y57WM41zv>1nQFjY4~*>m(X>6Lg+uKK_!)RAWg60OuJ-qB0-s=| zjBJj!!GW~^jF=7aBay&wv%3rniObiz9_T@LsC&(t39sGm#76OW_l*GKUYe;7L;X7HucbrO4V!}@u6r24LhfG?-u2M3FUMV^6~eF}d8Glu3|ql8 z{*Cb1@3#kt%>&>C%MCN|>1{O`bq11Z&nRII$XPPDyEl-#&5H0^q}>$~0DR%Mw!cTA zO4u5Ak%ywfBhHco&?~MRzkkjC?;Q_XGo1ZV7Vft4hl;HpFSv_ePeEIwAwE{9R)&*2~s(uefDZ(f*BEi>;K`vcB}H)VEZgMgGuiW|xMAI1VhV9IVdn{_zUyfC*?o zvk*&IiWJtfdp>h5+~Y4QWYpdn|JoNQdVYaAFzyiF+6u^d;&GB1-A~4Mw!)Oufl{jx zey3%)Wf(^YFI$uI5z^NDvo924{AX7akoMcf&qE_K;P5d<7&Ddj>{fzxFvII#Pbc-q z;=y}(B9ysU%4*q4tOm9fGH4W3ejEu4^)yN}ax`)C9Rl8G2l(WP4WiCJCt`3h$<<31 zMLb<;XnGPjX{o=ny)2~@de)(Q0uz!aP?B}ee&F0EW(BaH5uRa)$$%6hf6`tRAvs9K z3(~y($r%)jRSUV~O2n8su^^3T0^N&)S|-cf=dRW@>u;*W3ymuI`NWYLO-xohEg*uD zoHKTkzUT_UcN^%b-_U&6YYaC>V@+#@TmuC_^GU+=i`|2>zV|dQZm@v2<^@uHDA zQ8Lvi$n7`Od@Tp^$*>!+mvA#lJ@JB_MiWv&L&;DZJ95#zG%1QvFH8VT%cOTbxum z9RqXII_N->7&D+TlfR>Gr6F9F%npEO$cnSH5K%Z1ZcnJ^Y5aWc3s;5pm6)_W!+Hz2 zU=DgRQ2_bDfG`@nbE`-TVUF$LxAMrCA*~A9uLUl4nk5ptI|GgY6nBTg?Abl)*e77) zzN@T*@F0+XsV(^)%Q0GLksjj(F*iSBCX0(y`%ek(k-(Z=f9XPtl&tj!qbj@z=iNsP zM!YWdZcMsJMc%19`t31cCkm8sTOqr-U}<(9v%-`ID)*yEBFqKc058msAPF$g9%TOv zA<4Fqn67i5Qe~V+`DfVX|C^91AC(+`p}{{I6Zc_gAc!LUU>Jd-Ixy`xIob~_+T~0I zy476pShpV=dc8rP_?bitRtyP5Q@Y*J<_pyrWe8%aRx4kUX1^ z7WVswnVr#}8Fv$v~b-fn&J+! zv?g~#CrXCcQFth*S(GsR9aG2{I%Ci3HX?ajS@|6CJ>!JmEV-LGv+mhIlk+ScnjH9T zI0gL9{Ihx4&aW~-9Agsp2YJh6^nD&^?q2glve|DrFx<^bFVYeXV6r|D&tJfpW+ZhgdbbY1KJLaqPt0VgruWJfgPdl}@lY)r8GhxU$B{1*gl1OgB7B6K9B zg|rB`_Z__!=#!rseJ=jw%XbWd0@`=XOUC3L-=w2js&j}w-T#Jz#)T~y+<3M?cx!q_RPqE-E%9VuzU_LP{<{T=IeIv^vU-&CD1l^NFMDLvm$fUN#`FOQ0_-WB8-222+|302%d7Eo#zh0 z55-4TKl)*`QNfkU1UHh@UWcdNk3dn-OK6}@yK_3K3rSXl>|kB|2BH`yEv-N8!as)s z(72RGwbQ#FqvB1!pYcS+J2he0!6m3{ngIZVi1ONfp9YI)frK2WQ^k*pcKtK+3&lk} zJ6gGAixR4aJBD6u3gGfGMQdEKd1=`~_pyc{Y02po4 z>NY;O+IfEfVtq#MHF<-ZMufX}2x66P883ncJTXu4!2^3Oo}V;6%oO+cs1#_l(g6H! zH>`~bjj>qmxE%$B@Ug6<*o)I3boq7|>=y)3Ao%QZy0^6JhJ2PeRRa3tUSeg^wOFkOwXvTPn2Z_XqmuVdJ##cF8^v+kTpE3YEgUo)RP5eS2ZYaW zE1ULsw^P5D^wJgxfDBFHE~8Lp2RvaL-yP<;vHAWpcDO^^pas8c8?XCXHWbxir#+l* zOzg8(@Nyh@B>QS%;|zUoQFbvnu_B(t@zQz~N8#=ADH5(qqq+Cc1cBY|-f}Uz7wt)j zEa@CZdw>OVSyRy0`)${|X_t+7KKSx)PM${Jhgg$XAK=)4%0ndnLI@BJB%GxtrA{Qj zElOaT_oVR8v3i1yE^t+zQc?vbYn)%ILi~0Oq@lD24M0WsF1aH$Q#4j}!&8V+E1f`y zPz>Jsa3bFhr?=lf~)6+-(n|7Dr+PeadMb1Uc_Mk;3%ps_Y#2sS=qrwdhJ9cx%h-jGq-u?g^5VpOUxJn$*531Pjc5`q1!U%EqU) z>}@v+mWyB228gLXQ}X+;&C%}g^ib9jF(}B`h|0KQ@ELjjfMr7zmtVhhKv*B6h=tjr z1=q3>ZIHrB7xsRGHFh|jJm#vNVLsD7MvKX2_trl#X&;u5 zh0M1Tw_4_~l}QkKlkZxNkzFwxoXREf*p3VQ%9&az6T)_mMk%X5ZSG=kGNkS8d8HH2 zsuR}(R)V1Jv&yO;tnevwaq8(JsAk8P-%?;UzzSDykq%$F2tzg~*)T-$hLjFf61e*H z5Q;M#zItK4RN}r{N;U$}TLjakkYA2C)id`3(*QT-Pk2f|Q(|X*X}cv@im6pt$ebXR zz>YvQctZZi?wg+sb>#HO+51q^yhCL3^_`(YCT;sEekj5Bk|Na$yBCGN(f?Vu!R`M* zB-kL_mvtpCC+trf;v$tobuLNJ9w(qs=;5 zh9Epg%I%F*YYBPjU=-tr(^Umn;bU|Nxv{~UjZxvZjy1mQB<{QHCx;Wyvt?R*e(A8@ z(^CM~r^2{Po=Y(7XM+C+vmQhLkiU(Dl?`dk^@TRNhQ%JUJ5oy#Azj!y_;_(D1bKBzCQ@4xPt3l#-Im`a>$Z!~=<(HcHegKG9jub|Rj}6HX&M)+K zm=2j>=qMHrG102Mt!jfv9hz(2t(6_L9gBXdO6z1sFNBmB2ppzzVTll${C&vwFe$G%P_>?{`L zm`HwqY%_$fD5(50X6K`N%2qaU1>0XU9w6h#fnh@_iHtnlbpYLF zj1afvX)5mqDG4M^9ehduj;JaBI_M=W$Q}Ezopm#ojO1^1U`r%lOaiKn)#3Wk+T8mG zO7d|m!Kq^e7Jy9G9K@c}Hkn}%K3uPkHpy(`?$XIxl?7HQ5l|Y?x0-JWzFA&?_5`X- z{%6Ab2+-hcT9r00A=RLPi_`r9J-4a5b7N2m5zD?9D5hqE{45Kg@v7&{nE|IS?usjy}_&%uKRryT||Jc$tC(Rm?^w{^K5ZUF^& zO`jG)-9rf?F(q-C$-%|$WASOILRB6dSlZ+Amf0EBMQeFt^RD4Sz2rh*h~o?aB=0h7 zv?$!`tbn+EEzNI$b5d|5R!{|-izSXL&YBvU^IJ-QHaLJvm<5&Nk6;tR1-V22EUe1O z_;Cs|o03higRB55Kdd!kKDZ_CIN0P5n{x$hDj6HfaE@AVAHfC;x6P3SLvq6o%@s|~ zI|HS?)_xnxK23d(r5em9s4bv6I5FDc2swsBg0!3Kg_?#8D^;*GML&Mx?I;VUU0(3@ z4VZg4KHJaWU&D!aJ31aGP30v%-xyip&J%!)%IXRXqK+u-^m(r7HZ|+@X`7ek9l@78 zuxIaj0+q`wC?jh<=8K?0?tQXv^o$rPdu-98u)o|LqV9Xdjhsl+W&m}Hcu`Pf%uT@z zfWi%9Y}gHwIN{m5?WMM^E_A(sZ$W|D!l;~f-|^Mj_m2^ng1gpvf+a)O+icUl!!~VDv7S(~3{y4i88rs?o~B1XBBZAxWXqM^Q`t@Q>te=!pQ%}l?|`O(rq0$EU7IhPN7ytmSf^!S;quU}$RNq+BF_IDEDS{`5V9u7X;nbfc8)!Rw zfiT!9?%nRxu$>)iaw_|VkK_l&MkDK1G!It9MHhcev#1E4LUwcF-Z7h|gZW-pNq!YS;psZ7a7@6ku}8Px=f%9O^(@@)ikooNIU4W-)323B$kZ@6s4 zyXF?^6kR;957oesp91?F)$~(@{db;IkMg2lu7k;MI1=W6yjs77N=l@YZvDl8=*F9~ z*I(xdX77FK4RX6D63_Z;JIg>gsqz=2S!T8V%zgb)?j#}(u=clQw zy;V)le6|XR0UB?ead<)xUsdeIsHksFVut+7(CCe^uEWM$ zdY=8|WKJ7OH;@~a!Ge9ryiND^fBbqz{`39!Qa&}ndsebKJ6K!0TjVZfb?#759`6C| z7NI=;{aEEMQsUWDS@lr>&Ocp(3X&x=G2v`c6EH&1ts?eP(VysNod&jq6t!=f(mP02 z&pD$!>il+rF`)xKMIDy=Hm};GsPBzd>7VI}jiTq(8 zS9=z+#SryFeL~Of^PZttdeyXxRDG(W#LGnP++^Q#kRr&I8RdLlq_v}476 zw`1Z}BL9QS5TPC`4c}?@f;WBrMY+dp^FouFUJ1^f3yPbDpRRV3kaBEMRlyW*wyuKVwH5w?lwLSFq`mt91;;Z(LrMJ<>}R= z0P6U=BP9FbaKw{r*g8?h`u!XLL20>TSJ(vu`zq?ywidNUPeP%Zxb!SO!TYt6OJB8OCChJMvP`dtZ z8x-VPXB;w^=DjDal35L)GqDpg6^QC!N0Yc-agg@hn0I~iRU@r8UHX}yt5|;VXKt== z%1axxpetP+2JDzHlx_M#MaJ8m!(8K_UwFWbDuk-1J}^-b@;!WKgmtcjWR){*Uf_Z3 z#jNWf(Jr^ICGR+k@3=~x*`F=Ep)ZX=0`GDUy{6Rc@A%@cPy>9tg@QT)yDdtqBlUMQ z^mh!9kpjD8**V6hP5+-5<^MVd`Z%hkX&%%6c0^3HADS)ygjC-DHV}VQsti# Date: Fri, 6 Sep 2024 14:27:58 +0200 Subject: [PATCH 049/128] (fleet) fix the run path when using the CDN (#29075) --- cmd/installer/subcommands/daemon/status.tmpl | 54 ++++++++++---------- pkg/fleet/internal/cdn/cdn.go | 3 +- 2 files changed, 28 insertions(+), 29 deletions(-) diff --git a/cmd/installer/subcommands/daemon/status.tmpl b/cmd/installer/subcommands/daemon/status.tmpl index 714ed431f3214..045b819d53764 100644 --- a/cmd/installer/subcommands/daemon/status.tmpl +++ b/cmd/installer/subcommands/daemon/status.tmpl @@ -7,16 +7,38 @@ Datadog Installer v{{ htmlSafe .Version }} {{ greenText "●" }} stable: v{{ htmlSafe $package.Stable }} {{- else }} {{ redText "●" }} stable: none - {{- end }}{{ if $package.Experiment }} + {{- end }} + {{- if $package.Experiment }} {{ yellowText "●" }} experiment: v{{ htmlSafe $package.Experiment }} {{- else }} ● experiment: none {{- end }} - {{- if eq $name "datadog-apm-inject" }}{{ template "datadog-apm-inject" $.ApmInjectionStatus }}{{ end }} + + {{- if eq $name "datadog-apm-inject" }} + {{ template "datadog-apm-inject" $.ApmInjectionStatus }} + {{- end }} + + {{- range $remoteConfig := $.RemoteConfigState }} + {{- if eq $remoteConfig.Package $name }} + Remote configuration client state: + StableVersion: {{ $remoteConfig.StableVersion }} + ExperimentVersion: {{ $remoteConfig.ExperimentVersion }} + StableConfigVersion: {{ $remoteConfig.StableConfigVersion }} + ExperimentConfigVersion: {{ $remoteConfig.ExperimentConfigVersion }} + RemoteConfigVersion: {{ $remoteConfig.RemoteConfigVersion }} + Task: + {{- if $remoteConfig.Task }} + Id: {{ $remoteConfig.Task.Id }} + State: {{ $remoteConfig.Task.State }} + {{- if $remoteConfig.Task.Error }} + Error: {{ $remoteConfig.Task.Error }} + {{- end }} + {{- else }} + No task available + {{- end }} + {{- end }} + {{- end }} {{ end -}} -{{- if .RemoteConfigState }} -{{ template "remote-config-state" $.RemoteConfigState }} -{{- end -}} {{- define "datadog-apm-inject" }} Instrumentation status: @@ -32,26 +54,4 @@ Datadog Installer v{{ htmlSafe .Version }} {{- else -}} {{ redText "●" }} Docker: Not instrumented {{- end }} -{{- end -}} - -{{- define "remote-config-state" }} - Remote configuration client state: - {{ range . }} - {{ boldText .Package }} - StableVersion: {{ .StableVersion }} - ExperimentVersion: {{ .ExperimentVersion }} - StableConfigVersion: {{ .StableConfigVersion }} - ExperimentConfigVersion: {{ .ExperimentConfigVersion }} - RemoteConfigVersion: {{ .RemoteConfigVersion }} - Task: - {{- if .Task }} - Id: {{ .Task.Id }} - State: {{ .Task.State }} - {{- if .Task.Error }} - Error: {{ .Task.Error }} - {{- end }} - {{- else }} - No task available - {{- end }} - {{ end }} {{- end }} diff --git a/pkg/fleet/internal/cdn/cdn.go b/pkg/fleet/internal/cdn/cdn.go index 53d93568777b4..3689ee2e67f24 100644 --- a/pkg/fleet/internal/cdn/cdn.go +++ b/pkg/fleet/internal/cdn/cdn.go @@ -73,8 +73,6 @@ func (c *CDN) getOrderedLayers(ctx context.Context) ([]*layer, error) { // HACK(baptiste): Create a dedicated one-shot RC service just for the configuration // We should use the CDN instead config := pkgconfigsetup.Datadog() - config.Set("run_path", "/opt/datadog-packages/datadog-installer/stable/run", model.SourceAgentRuntime) - detectenv.DetectFeatures(config) hostname, err := pkghostname.Get(ctx) if err != nil { @@ -84,6 +82,7 @@ func (c *CDN) getOrderedLayers(ctx context.Context) ([]*layer, error) { remoteconfig.WithAPIKey(c.env.APIKey), remoteconfig.WithConfigRootOverride(c.env.Site, ""), remoteconfig.WithDirectorRootOverride(c.env.Site, ""), + remoteconfig.WithDatabaseFileName("remote-config-cdn-tmp"), } service, err := remoteconfig.NewService( config, From 9ccd5390daf91b8f49bdc2b9dfcfafb6df1de0e4 Mon Sep 17 00:00:00 2001 From: Guillaume Pagnoux Date: Fri, 6 Sep 2024 14:52:41 +0200 Subject: [PATCH 050/128] discovery: use executable basename for service name detector dispatch (#29048) --- .../servicediscovery/language/language.go | 75 ------------------ .../language/language_linux.go | 79 +++++++++++++++++++ .../servicediscovery/module/impl_linux.go | 2 +- .../servicediscovery/service_detector.go | 5 +- .../servicediscovery/usm/service.go | 40 ++++++---- .../servicediscovery/usm/service_test.go | 58 ++++++++++++-- 6 files changed, 157 insertions(+), 102 deletions(-) create mode 100644 pkg/collector/corechecks/servicediscovery/language/language_linux.go diff --git a/pkg/collector/corechecks/servicediscovery/language/language.go b/pkg/collector/corechecks/servicediscovery/language/language.go index dd4b224f9bc47..fdb16329d58d5 100644 --- a/pkg/collector/corechecks/servicediscovery/language/language.go +++ b/pkg/collector/corechecks/servicediscovery/language/language.go @@ -3,20 +3,9 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build linux - // Package language provides functionality to detect the programming language for a given process. package language -import ( - "path/filepath" - - "github.com/DataDog/datadog-agent/pkg/languagedetection" - "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" - "github.com/DataDog/datadog-agent/pkg/languagedetection/privileged" - "github.com/DataDog/datadog-agent/pkg/process/procutil" -) - // Language represents programming languages. type Language string @@ -40,67 +29,3 @@ const ( // PHP represents PHP. PHP Language = "php" ) - -var ( - // languageNameToLanguageMap translates the constants rom the - // languagedetection package to the constants used in this file. The latter - // are shared with the backend, and at least java/jvm differs in the name - // from the languagedetection package. - languageNameToLanguageMap = map[languagemodels.LanguageName]Language{ - languagemodels.Go: Go, - languagemodels.Node: Node, - languagemodels.Dotnet: DotNet, - languagemodels.Python: Python, - languagemodels.Java: Java, - languagemodels.Ruby: Ruby, - } -) - -// ProcessInfo holds information about a process. -type ProcessInfo struct { - Args []string - Envs map[string]string -} - -// FindInArgs tries to detect the language only using the provided command line arguments. -func FindInArgs(exe string, args []string) Language { - // empty slice passed in - if len(args) == 0 { - return "" - } - - langs := languagedetection.DetectLanguage([]languagemodels.Process{&procutil.Process{ - // Pid doesn't matter since sysprobeConfig is nil - Pid: 0, - Cmdline: args, - Comm: filepath.Base(exe), - }}, nil) - if len(langs) == 0 { - return "" - } - - lang := langs[0] - if lang == nil { - return "" - } - if outLang, ok := languageNameToLanguageMap[lang.Name]; ok { - return outLang - } - - return "" -} - -// FindUsingPrivilegedDetector tries to detect the language using the provided command line arguments -func FindUsingPrivilegedDetector(detector privileged.LanguageDetector, pid int32) Language { - langs := detector.DetectWithPrivileges([]languagemodels.Process{&procutil.Process{Pid: pid}}) - if len(langs) == 0 { - return "" - } - - lang := langs[0] - if outLang, ok := languageNameToLanguageMap[lang.Name]; ok { - return outLang - } - - return "" -} diff --git a/pkg/collector/corechecks/servicediscovery/language/language_linux.go b/pkg/collector/corechecks/servicediscovery/language/language_linux.go new file mode 100644 index 0000000000000..4149070faf06d --- /dev/null +++ b/pkg/collector/corechecks/servicediscovery/language/language_linux.go @@ -0,0 +1,79 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux + +package language + +import ( + "path/filepath" + + "github.com/DataDog/datadog-agent/pkg/languagedetection" + "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" + "github.com/DataDog/datadog-agent/pkg/languagedetection/privileged" + "github.com/DataDog/datadog-agent/pkg/process/procutil" +) + +// languageNameToLanguageMap translates the constants rom the +// languagedetection package to the constants used in this file. The latter +// are shared with the backend, and at least java/jvm differs in the name +// from the languagedetection package. +var languageNameToLanguageMap = map[languagemodels.LanguageName]Language{ + languagemodels.Go: Go, + languagemodels.Node: Node, + languagemodels.Dotnet: DotNet, + languagemodels.Python: Python, + languagemodels.Java: Java, + languagemodels.Ruby: Ruby, +} + +// ProcessInfo holds information about a process. +type ProcessInfo struct { + Args []string + Envs map[string]string +} + +// FindInArgs tries to detect the language only using the provided command line arguments. +func FindInArgs(exe string, args []string) Language { + // empty slice passed in + if len(args) == 0 { + return "" + } + + langs := languagedetection.DetectLanguage([]languagemodels.Process{&procutil.Process{ + // Pid doesn't matter since sysprobeConfig is nil + Pid: 0, + Cmdline: args, + Comm: filepath.Base(exe), + }}, nil) + if len(langs) == 0 { + return "" + } + + lang := langs[0] + if lang == nil { + return "" + } + if outLang, ok := languageNameToLanguageMap[lang.Name]; ok { + return outLang + } + + return "" +} + +// FindUsingPrivilegedDetector tries to detect the language using the provided command line arguments +func FindUsingPrivilegedDetector(detector privileged.LanguageDetector, pid int32) Language { + langs := detector.DetectWithPrivileges([]languagemodels.Process{&procutil.Process{Pid: pid}}) + if len(langs) == 0 { + return "" + } + + lang := langs[0] + if outLang, ok := languageNameToLanguageMap[lang.Name]; ok { + return outLang + } + + return "" +} diff --git a/pkg/collector/corechecks/servicediscovery/module/impl_linux.go b/pkg/collector/corechecks/servicediscovery/module/impl_linux.go index 7d7237c38484b..2500353ccc052 100644 --- a/pkg/collector/corechecks/servicediscovery/module/impl_linux.go +++ b/pkg/collector/corechecks/servicediscovery/module/impl_linux.go @@ -324,11 +324,11 @@ func (s *discovery) getServiceInfo(proc *process.Process) (*serviceInfo, error) contextMap := make(usm.DetectorContextMap) root := kernel.HostProc(strconv.Itoa(int(proc.Pid)), "root") - nameMeta := servicediscovery.GetServiceName(cmdline, envs, root, contextMap) lang := language.FindInArgs(exe, cmdline) if lang == "" { lang = language.FindUsingPrivilegedDetector(s.privilegedDetector, proc.Pid) } + nameMeta := servicediscovery.GetServiceName(cmdline, envs, root, lang, contextMap) apmInstrumentation := apm.Detect(int(proc.Pid), cmdline, envs, lang, contextMap) return &serviceInfo{ diff --git a/pkg/collector/corechecks/servicediscovery/service_detector.go b/pkg/collector/corechecks/servicediscovery/service_detector.go index ac0434be492e8..a491837c1f7f0 100644 --- a/pkg/collector/corechecks/servicediscovery/service_detector.go +++ b/pkg/collector/corechecks/servicediscovery/service_detector.go @@ -9,6 +9,7 @@ import ( "slices" "strings" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/language" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/usm" ) @@ -42,9 +43,9 @@ func makeFinalName(meta usm.ServiceMetadata) string { // GetServiceName gets the service name based on the command line arguments and // the list of environment variables. -func GetServiceName(cmdline []string, env map[string]string, root string, contextMap usm.DetectorContextMap) usm.ServiceMetadata { +func GetServiceName(cmdline []string, env map[string]string, root string, lang language.Language, contextMap usm.DetectorContextMap) usm.ServiceMetadata { fs := usm.NewSubDirFS(root) - meta, _ := usm.ExtractServiceMetadata(cmdline, env, fs, contextMap) + meta, _ := usm.ExtractServiceMetadata(cmdline, env, fs, lang, contextMap) meta.Name = makeFinalName(meta) return meta } diff --git a/pkg/collector/corechecks/servicediscovery/usm/service.go b/pkg/collector/corechecks/servicediscovery/usm/service.go index 9de72147d64df..36ddc7e804d35 100644 --- a/pkg/collector/corechecks/servicediscovery/usm/service.go +++ b/pkg/collector/corechecks/servicediscovery/usm/service.go @@ -17,6 +17,8 @@ import ( "slices" "strings" "unicode" + + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/language" ) type detectorCreatorFn func(ctx DetectionContext) detector @@ -165,20 +167,21 @@ func SizeVerifiedReader(file fs.File) (io.Reader, error) { return io.LimitReader(file, min(size, maxParseFileSize)), nil } -// List of binaries that usually have additional process context of what's running -var binsWithContext = map[string]detectorCreatorFn{ - "python": newPythonDetector, - "python2.7": newPythonDetector, - "python3": newPythonDetector, - "python3.7": newPythonDetector, - "ruby2.3": newSimpleDetector, - "ruby": newSimpleDetector, - "java": newJavaDetector, - "sudo": newSimpleDetector, - "node": newNodeDetector, - "dotnet": newDotnetDetector, - "php": newPhpDetector, - "gunicorn": newGunicornDetector, +// Map languages to their context detectors +var languageDetectors = map[language.Language]detectorCreatorFn{ + language.Python: newPythonDetector, + language.Ruby: newSimpleDetector, + language.Java: newJavaDetector, + language.Node: newNodeDetector, + language.DotNet: newDotnetDetector, + language.PHP: newPhpDetector, +} + +// Map executables that usually have additional process context of what's +// running, to context detectors +var executableDetectors = map[string]detectorCreatorFn{ + "sudo": newSimpleDetector, + "gunicorn": newGunicornDetector, } func serviceNameInjected(envs map[string]string) bool { @@ -194,7 +197,7 @@ func serviceNameInjected(envs map[string]string) bool { } // ExtractServiceMetadata attempts to detect ServiceMetadata from the given process. -func ExtractServiceMetadata(args []string, envs map[string]string, fs fs.SubFS, contextMap DetectorContextMap) (metadata ServiceMetadata, success bool) { +func ExtractServiceMetadata(args []string, envs map[string]string, fs fs.SubFS, lang language.Language, contextMap DetectorContextMap) (metadata ServiceMetadata, success bool) { dc := DetectionContext{ args: args, envs: envs, @@ -234,7 +237,12 @@ func ExtractServiceMetadata(args []string, envs map[string]string, fs fs.SubFS, exe = normalizeExeName(exe) - if detectorProvider, ok := binsWithContext[exe]; ok { + detectorProvider, ok := executableDetectors[exe] + if !ok { + detectorProvider, ok = languageDetectors[lang] + } + + if ok { langMeta, ok := detectorProvider(dc).detect(cmd[1:]) // The detector could return a DD Service name (eg. Java, from the diff --git a/pkg/collector/corechecks/servicediscovery/usm/service_test.go b/pkg/collector/corechecks/servicediscovery/usm/service_test.go index 38d8c390bb169..d3e4f2df2b6a3 100644 --- a/pkg/collector/corechecks/servicediscovery/usm/service_test.go +++ b/pkg/collector/corechecks/servicediscovery/usm/service_test.go @@ -18,6 +18,7 @@ import ( "github.com/stretchr/testify/require" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/language" "github.com/DataDog/datadog-agent/pkg/network/protocols/http/testutil" ) @@ -50,6 +51,7 @@ func TestExtractServiceMetadata(t *testing.T) { name string cmdline []string envs map[string]string + lang language.Language expectedGeneratedName string expectedDDService string expectedAdditionalServices []string @@ -111,6 +113,7 @@ func TestExtractServiceMetadata(t *testing.T) { cmdline: []string{ "/opt/python/2.7.11/bin/python2.7", "flask", "run", "--host=0.0.0.0", }, + lang: language.Python, expectedGeneratedName: "flask", envs: map[string]string{"PWD": "testdata/python"}, fs: &subUsmTestData, @@ -120,6 +123,7 @@ func TestExtractServiceMetadata(t *testing.T) { cmdline: []string{ "/opt/python/2.7.11/bin/python2.7", "testdata/python/flask", "run", "--host=0.0.0.0", "--without-threads", }, + lang: language.Python, expectedGeneratedName: "flask", fs: &subUsmTestData, }, @@ -128,6 +132,7 @@ func TestExtractServiceMetadata(t *testing.T) { cmdline: []string{ "/opt/python/2.7.11/bin/python2.7 flask run --host=0.0.0.0", }, + lang: language.Python, envs: map[string]string{"PWD": "testdata/python"}, expectedGeneratedName: "flask", fs: &subUsmTestData, @@ -137,6 +142,7 @@ func TestExtractServiceMetadata(t *testing.T) { cmdline: []string{ "python3", "-m", "hello", }, + lang: language.Python, expectedGeneratedName: "hello", }, { @@ -144,6 +150,7 @@ func TestExtractServiceMetadata(t *testing.T) { cmdline: []string{ "ruby", "/usr/sbin/td-agent", "--log", "/var/log/td-agent/td-agent.log", "--daemon", "/var/run/td-agent/td-agent.pid", }, + lang: language.Ruby, expectedGeneratedName: "td-agent", }, { @@ -151,6 +158,7 @@ func TestExtractServiceMetadata(t *testing.T) { cmdline: []string{ "java", "-Xmx4000m", "-Xms4000m", "-XX:ReservedCodeCacheSize=256m", "-jar", "/opt/sheepdog/bin/myservice.jar", }, + lang: language.Java, expectedGeneratedName: "myservice", }, { @@ -158,6 +166,7 @@ func TestExtractServiceMetadata(t *testing.T) { cmdline: []string{ "java", "-Xmx4000m", "-Xms4000m", "-XX:ReservedCodeCacheSize=256m", "com.datadog.example.HelloWorld", }, + lang: language.Java, expectedGeneratedName: "HelloWorld", }, { @@ -165,6 +174,7 @@ func TestExtractServiceMetadata(t *testing.T) { cmdline: []string{ "java", "-Xmx4000m", "-Xms4000m", "-XX:ReservedCodeCacheSize=256m", "kafka.Kafka", }, + lang: language.Java, expectedGeneratedName: "Kafka", }, { @@ -175,6 +185,7 @@ func TestExtractServiceMetadata(t *testing.T) { "-cp", "/etc/cassandra:/usr/share/cassandra/lib/HdrHistogram-2.1.9.jar:/usr/share/cassandra/lib/cassandra-driver-core-3.0.1-shaded.jar", "org.apache.cassandra.service.CassandraDaemon", }, + lang: language.Java, expectedGeneratedName: "cassandra", }, { @@ -182,8 +193,10 @@ func TestExtractServiceMetadata(t *testing.T) { cmdline: []string{ "/home/dd/my java dir/java", "com.dog.cat", }, + lang: language.Java, expectedGeneratedName: "cat", - }, { + }, + { name: "node js with package.json not present", cmdline: []string{ "/usr/bin/node", @@ -193,6 +206,7 @@ func TestExtractServiceMetadata(t *testing.T) { "--", "/somewhere/index.js", }, + lang: language.Node, expectedGeneratedName: "node", }, { @@ -201,6 +215,7 @@ func TestExtractServiceMetadata(t *testing.T) { "/usr/bin/node", "./testdata/inner/index.js", }, + lang: language.Node, expectedGeneratedName: "node", }, { @@ -213,6 +228,7 @@ func TestExtractServiceMetadata(t *testing.T) { "--", "./testdata/index.js", }, + lang: language.Node, expectedGeneratedName: "my-awesome-package", fs: &subUsmTestData, }, @@ -226,6 +242,7 @@ func TestExtractServiceMetadata(t *testing.T) { "./testdata/bins/broken", "./testdata/bins/json-server", }, + lang: language.Node, expectedGeneratedName: "json-server-package", skipOnWindows: true, fs: &subUsmTestData, @@ -240,6 +257,7 @@ func TestExtractServiceMetadata(t *testing.T) { "--", "index.js", }, + lang: language.Node, envs: map[string]string{"PWD": "testdata/deep"}, // it's relative but it's ok for testing purposes fs: &subUsmTestData, expectedGeneratedName: "my-awesome-package", @@ -251,11 +269,13 @@ func TestExtractServiceMetadata(t *testing.T) { "-jar", springBootAppFullPath, }, + lang: language.Java, expectedGeneratedName: "default-app", }, { name: "wildfly 18 standalone", - cmdline: []string{"home/app/.sdkman/candidates/java/17.0.4.1-tem/bin/java", + cmdline: []string{ + "home/app/.sdkman/candidates/java/17.0.4.1-tem/bin/java", "-D[Standalone]", "-server", "-Xms64m", @@ -276,7 +296,9 @@ func TestExtractServiceMetadata(t *testing.T) { "" + jbossTestAppRoot + "/modules", "org.jboss.as.standalone", "-Djboss.home.dir=" + jbossTestAppRoot, - "-Djboss.server.base.dir=" + jbossTestAppRoot + "/standalone"}, + "-Djboss.server.base.dir=" + jbossTestAppRoot + "/standalone", + }, + lang: language.Java, expectedGeneratedName: "jboss-modules", expectedAdditionalServices: []string{"my-jboss-webapp", "some_context_root", "web3"}, fs: &sub, @@ -284,7 +306,8 @@ func TestExtractServiceMetadata(t *testing.T) { }, { name: "wildfly 18 domain", - cmdline: []string{"/home/app/.sdkman/candidates/java/17.0.4.1-tem/bin/java", + cmdline: []string{ + "/home/app/.sdkman/candidates/java/17.0.4.1-tem/bin/java", "--add-exports=java.base/sun.nio.ch=ALL-UNNAMED", "--add-exports=jdk.unsupported/sun.reflect=ALL-UNNAMED", "--add-exports=jdk.unsupported/sun.misc=ALL-UNNAMED", @@ -308,7 +331,9 @@ func TestExtractServiceMetadata(t *testing.T) { "" + jbossTestAppRoot + "/jboss-modules.jar", "-mp", "" + jbossTestAppRoot + "/modules", - "org.jboss.as.server"}, + "org.jboss.as.server", + }, + lang: language.Java, expectedGeneratedName: "jboss-modules", expectedAdditionalServices: []string{"web3", "web4"}, fs: &sub, @@ -317,7 +342,8 @@ func TestExtractServiceMetadata(t *testing.T) { { name: "weblogic 12", fs: &sub, - cmdline: []string{"/u01/jdk/bin/java", + cmdline: []string{ + "/u01/jdk/bin/java", "-Djava.security.egd=file:/dev/./urandom", "-cp", "/u01/oracle/wlserver/server/lib/weblogic-launcher.jar", @@ -329,7 +355,9 @@ func TestExtractServiceMetadata(t *testing.T) { "-da", "-Dwls.home=/u01/oracle/wlserver/server", "-Dweblogic.home=/u01/oracle/wlserver/server", - "weblogic.Server"}, + "weblogic.Server", + }, + lang: language.Java, envs: map[string]string{"PWD": weblogicTestAppRootAbsolute}, expectedGeneratedName: "Server", expectedAdditionalServices: []string{"my_context", "sample4", "some_context_root"}, @@ -339,6 +367,7 @@ func TestExtractServiceMetadata(t *testing.T) { cmdline: []string{ "/usr/bin/java", "-Ddd.service=custom", "-jar", "app.jar", }, + lang: language.Java, expectedDDService: "custom", expectedGeneratedName: "app", }, @@ -349,6 +378,7 @@ func TestExtractServiceMetadata(t *testing.T) { cmdline: []string{ "/usr/bin/java", "-Ddd.service=dd-service-from-property", "-jar", "app.jar", }, + lang: language.Java, envs: map[string]string{"DD_SERVICE": "dd-service-from-env"}, expectedDDService: "dd-service-from-property", expectedGeneratedName: "app", @@ -375,6 +405,7 @@ func TestExtractServiceMetadata(t *testing.T) { "org.apache.catalina.startup.Bootstrap", "start", }, + lang: language.Java, expectedGeneratedName: "catalina", expectedAdditionalServices: []string{"app2", "custom"}, fs: &subUsmTestData, @@ -384,6 +415,7 @@ func TestExtractServiceMetadata(t *testing.T) { cmdline: []string{ "/usr/bin/dotnet", "./myservice.dll", }, + lang: language.DotNet, expectedGeneratedName: "myservice", }, { @@ -391,6 +423,7 @@ func TestExtractServiceMetadata(t *testing.T) { cmdline: []string{ "/usr/bin/dotnet", "-v", "--", "/app/lib/myservice.dll", }, + lang: language.DotNet, expectedGeneratedName: "myservice", }, { @@ -398,6 +431,7 @@ func TestExtractServiceMetadata(t *testing.T) { cmdline: []string{ "/usr/bin/dotnet", "run", "--project", "./projects/proj1/proj1.csproj", }, + lang: language.DotNet, expectedGeneratedName: "dotnet", }, { @@ -407,6 +441,7 @@ func TestExtractServiceMetadata(t *testing.T) { "artisan", "serve", }, + lang: language.PHP, expectedGeneratedName: "laravel", }, { @@ -416,6 +451,7 @@ func TestExtractServiceMetadata(t *testing.T) { "-ddatadog.service=foo", "swoole-server.php", }, + lang: language.PHP, expectedGeneratedName: "foo", }, { @@ -425,6 +461,7 @@ func TestExtractServiceMetadata(t *testing.T) { "artisan", "migrate:fresh", }, + lang: language.PHP, expectedGeneratedName: "laravel", }, { @@ -434,6 +471,7 @@ func TestExtractServiceMetadata(t *testing.T) { "artisan", "migrate:fresh", }, + lang: language.PHP, expectedGeneratedName: "laravel", }, { @@ -455,6 +493,7 @@ func TestExtractServiceMetadata(t *testing.T) { { name: "DD_SERVICE_set_manually", cmdline: []string{"java", "-jar", "Foo.jar"}, + lang: language.Java, envs: map[string]string{"DD_SERVICE": "howdy"}, expectedDDService: "howdy", expectedGeneratedName: "Foo", @@ -462,6 +501,7 @@ func TestExtractServiceMetadata(t *testing.T) { { name: "DD_SERVICE_set_manually_tags", cmdline: []string{"java", "-jar", "Foo.jar"}, + lang: language.Java, envs: map[string]string{"DD_TAGS": "service:howdy"}, expectedDDService: "howdy", expectedGeneratedName: "Foo", @@ -469,6 +509,7 @@ func TestExtractServiceMetadata(t *testing.T) { { name: "DD_SERVICE_set_manually_injection", cmdline: []string{"java", "-jar", "Foo.jar"}, + lang: language.Java, envs: map[string]string{"DD_SERVICE": "howdy", "DD_INJECTION_ENABLED": "tracer,service_name"}, expectedDDService: "howdy", expectedGeneratedName: "Foo", @@ -481,6 +522,7 @@ func TestExtractServiceMetadata(t *testing.T) { "--workers=2", "test:app", }, + lang: language.Python, expectedGeneratedName: "test", }, { @@ -581,7 +623,7 @@ func TestExtractServiceMetadata(t *testing.T) { if tt.fs != nil { fs = *tt.fs } - meta, ok := ExtractServiceMetadata(tt.cmdline, tt.envs, fs, make(DetectorContextMap)) + meta, ok := ExtractServiceMetadata(tt.cmdline, tt.envs, fs, tt.lang, make(DetectorContextMap)) if len(tt.expectedGeneratedName) == 0 && len(tt.expectedDDService) == 0 { require.False(t, ok) } else { From f7fa2e9b0ae071b4c82acbb2a894e7988641bc6b Mon Sep 17 00:00:00 2001 From: Guillaume Pagnoux Date: Fri, 6 Sep 2024 14:53:17 +0200 Subject: [PATCH 051/128] discovery: tests: convert cpu usage value to percentage in assertCPU (#29102) --- .../corechecks/servicediscovery/module/impl_linux_test.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go b/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go index 7997723188262..1ae02c62fee82 100644 --- a/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go +++ b/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go @@ -525,7 +525,9 @@ func assertCPU(t *testing.T, url string, pid int) { // Calling getServicesMap a second time us the CPU usage percentage since the last call, which should be close to gopsutil value. portMap := getServicesMap(t, url) assert.Contains(t, portMap, pid) - assert.InDelta(t, referenceValue, portMap[pid].CPUCores, 0.10) + // gopsutil reports a percentage, while we are reporting a float between 0 and $(nproc), + // so we convert our value to a percentage. + assert.InDelta(t, referenceValue, portMap[pid].CPUCores*100, 10) } func TestCommandLineSanitization(t *testing.T) { From 067da18dd6e3ae5deae510c4fe734a126f61c33d Mon Sep 17 00:00:00 2001 From: Kevin Fairise <132568982+KevinFairise2@users.noreply.github.com> Date: Fri, 6 Sep 2024 15:21:29 +0200 Subject: [PATCH 052/128] Add another OTel test for the ingestor to replace old Argo e2e test (#28702) Co-authored-by: liustanley --- .gitlab/e2e/e2e.yml | 1 - .gitlab/e2e_k8s/e2e_k8s.yml | 12 - test/e2e/argo-workflows/otlp-workflow.yaml | 156 ------------ .../argo-workflows/templates/otlp-test.yaml | 229 ------------------ test/new-e2e/tests/otel/otel_test.go | 1 + .../tests/otel/otlp-ingest/pipelines_test.go | 148 +++++++++++ 6 files changed, 149 insertions(+), 398 deletions(-) delete mode 100644 test/e2e/argo-workflows/otlp-workflow.yaml delete mode 100644 test/e2e/argo-workflows/templates/otlp-test.yaml create mode 100644 test/new-e2e/tests/otel/otlp-ingest/pipelines_test.go diff --git a/.gitlab/e2e/e2e.yml b/.gitlab/e2e/e2e.yml index efbed3a2b7422..785ff989ce1d1 100644 --- a/.gitlab/e2e/e2e.yml +++ b/.gitlab/e2e/e2e.yml @@ -1,7 +1,6 @@ --- # e2e stage # Contains test jobs based on the new-e2e tests framework - .new_e2e_template: stage: e2e image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/test-infra-definitions/runner$TEST_INFRA_DEFINITIONS_BUILDIMAGES_SUFFIX:$TEST_INFRA_DEFINITIONS_BUILDIMAGES diff --git a/.gitlab/e2e_k8s/e2e_k8s.yml b/.gitlab/e2e_k8s/e2e_k8s.yml index 2c3faca866165..db52467098c4a 100644 --- a/.gitlab/e2e_k8s/e2e_k8s.yml +++ b/.gitlab/e2e_k8s/e2e_k8s.yml @@ -68,15 +68,3 @@ k8s-e2e-cspm-main: retry: 1 variables: ARGO_WORKFLOW: cspm - -k8s-e2e-otlp-dev: - extends: .k8s_e2e_template_dev - rules: !reference [.on_dev_branch_manual] - variables: - ARGO_WORKFLOW: otlp - -k8s-e2e-otlp-main: - extends: .k8s_e2e_template_main - rules: !reference [.on_main] - variables: - ARGO_WORKFLOW: otlp diff --git a/test/e2e/argo-workflows/otlp-workflow.yaml b/test/e2e/argo-workflows/otlp-workflow.yaml deleted file mode 100644 index 9320d2ae9ad7c..0000000000000 --- a/test/e2e/argo-workflows/otlp-workflow.yaml +++ /dev/null @@ -1,156 +0,0 @@ -apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: argo-datadog-agent- -spec: - entrypoint: main - onExit: exit-handler - arguments: - parameters: - - name: datadog-agent-image-repository - - name: datadog-agent-image-tag - - name: datadog-cluster-agent-image-repository - - name: datadog-cluster-agent-image-tag - - name: ci_commit_short_sha - - name: ci_pipeline_id - - name: ci_job_id - volumes: - - name: datadog-agent-volume - hostPath: - path: /host/datadog-agent - templates: - - name: main - inputs: - parameters: - - name: datadog-agent-image-repository - - name: datadog-agent-image-tag - - name: datadog-cluster-agent-image-repository - - name: datadog-cluster-agent-image-tag - - name: ci_commit_short_sha - - name: ci_pipeline_id - - name: ci_job_id - steps: - - - name: start-fake-datadog - templateRef: - name: fake-datadog - template: create - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: start-otlp-test - templateRef: - name: otlp-test - template: create - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - - name: fake-dd-reset - templateRef: - name: fake-datadog - template: reset - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - - name: start-datadog-agent - templateRef: - name: datadog-agent - template: create - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - name: agent-image-repository - value: "{{inputs.parameters.datadog-agent-image-repository}}" - - name: agent-image-tag - value: "{{inputs.parameters.datadog-agent-image-tag}}" - - name: dd-url - value: "http://fake-datadog.{{workflow.namespace}}.svc.cluster.local" - - name: site - value: "" - - name: cluster-agent-image-repository - value: "{{inputs.parameters.datadog-cluster-agent-image-repository}}" - - name: cluster-agent-image-tag - value: "{{inputs.parameters.datadog-cluster-agent-image-tag}}" - - name: ci_commit_short_sha - value: "{{inputs.parameters.ci_commit_short_sha}}" - - name: ci_pipeline_id - value: "{{inputs.parameters.ci_pipeline_id}}" - - name: ci_job_id - value: "{{inputs.parameters.ci_job_id}}" - - name: remote_configuration_enabled - value: "false" - - name: networkmonitoring_enabled - value: "false" - - - - name: wait-datadog-agent - templateRef: - name: datadog-agent - template: wait - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - - name: test-otlp - templateRef: - name: otlp-test - template: test - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: exit-handler - steps: - - - name: delete - template: delete - when: "{{workflow.status}} == Succeeded" - - - name: diagnose - template: diagnose - when: "{{workflow.status}} != Succeeded" - - - name: delete - steps: - - - name: stop-datadog-agent - templateRef: - name: datadog-agent - template: delete - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: stop-otlp-test - templateRef: - name: otlp-test - template: delete - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: diagnose - steps: - - - name: diagnose-datadog-agent - templateRef: - name: datadog-agent - template: diagnose - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - name: diagnose-otlp-test - templateRef: - name: otlp-test - template: diagnose - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" diff --git a/test/e2e/argo-workflows/templates/otlp-test.yaml b/test/e2e/argo-workflows/templates/otlp-test.yaml deleted file mode 100644 index 9f9716ffd9fd2..0000000000000 --- a/test/e2e/argo-workflows/templates/otlp-test.yaml +++ /dev/null @@ -1,229 +0,0 @@ -apiVersion: argoproj.io/v1alpha1 -kind: WorkflowTemplate -metadata: - name: otlp-test -spec: - templates: - - name: create-sender-config - inputs: - parameters: - - name: namespace - resource: - action: apply - manifest: | - apiVersion: v1 - kind: ConfigMap - metadata: - name: sender-config - namespace: {{inputs.parameters.namespace}} - data: - sender-config: |+ - receivers: - file: - path: /etc/data/metrics.data - loop: - enabled: true - period: 10s - exporters: - otlp: - endpoint: ${DD_AGENT_OTLP_ENDPOINT} - tls: - insecure: true - service: - pipelines: - metrics: - receivers: [file] - exporters: [otlp] - - name: create-metrics-data - inputs: - parameters: - - name: namespace - resource: - action: apply - manifest: | - apiVersion: v1 - kind: ConfigMap - metadata: - name: metrics-data - namespace: {{inputs.parameters.namespace}} - data: - metrics-data: |+ - {"resourceMetrics":[{"resource":{"attributes":[{"key":"telemetry.sdk.language","value":{"stringValue":"go"}},{"key":"telemetry.sdk.name","value":{"stringValue":"opentelemetry"}},{"key":"telemetry.sdk.version","value":{"stringValue":"1.0.0"}}]},"instrumentationLibraryMetrics":[{"instrumentationLibrary":{"name":"test-meter"},"metrics":[{"name":"an_important_metric","description":"Measures the cumulative epicness of the app","sum":{"dataPoints":[{"attributes":[{"key":"labelA","value":{"stringValue":"chocolate"}},{"key":"labelB","value":{"stringValue":"raspberry"}},{"key":"labelC","value":{"stringValue":"vanilla"}}],"startTimeUnixNano":"1637674530222121000","timeUnixNano":"1637674532223257300","asDouble":14}],"aggregationTemporality":"AGGREGATION_TEMPORALITY_CUMULATIVE","isMonotonic":true}},{"name":"test2.sendtodev.histogram","description":"IO read bytes","histogram":{"dataPoints":[{"attributes":[{"key":"labelA","value":{"stringValue":"chocolate"}},{"key":"labelB","value":{"stringValue":"raspberry"}},{"key":"labelC","value":{"stringValue":"vanilla"}}],"startTimeUnixNano":"1637674530222121000","timeUnixNano":"1637674532223257300","count":"42","sum":1541400,"bucketCounts":["14","0","14","0","0","14","0","0","0","0","0","0"],"explicitBounds":[5000,10000,25000,50000,100000,250000,500000,1000000,2500000,5000000,10000000]}],"aggregationTemporality":"AGGREGATION_TEMPORALITY_CUMULATIVE"}}]}],"schemaUrl":"https://opentelemetry.io/schemas/v1.4.0"}]} - {"resourceMetrics":[{"resource":{"attributes":[{"key":"telemetry.sdk.language","value":{"stringValue":"go"}},{"key":"telemetry.sdk.name","value":{"stringValue":"opentelemetry"}},{"key":"telemetry.sdk.version","value":{"stringValue":"1.0.0"}}]},"instrumentationLibraryMetrics":[{"instrumentationLibrary":{"name":"test-meter"},"metrics":[{"name":"an_important_metric","description":"Measures the cumulative epicness of the app","sum":{"dataPoints":[{"attributes":[{"key":"labelA","value":{"stringValue":"chocolate"}},{"key":"labelB","value":{"stringValue":"raspberry"}},{"key":"labelC","value":{"stringValue":"vanilla"}}],"startTimeUnixNano":"1637674530222121000","timeUnixNano":"1637674534223387200","asDouble":27}],"aggregationTemporality":"AGGREGATION_TEMPORALITY_CUMULATIVE","isMonotonic":true}},{"name":"test2.sendtodev.histogram","description":"IO read bytes","histogram":{"dataPoints":[{"attributes":[{"key":"labelA","value":{"stringValue":"chocolate"}},{"key":"labelB","value":{"stringValue":"raspberry"}},{"key":"labelC","value":{"stringValue":"vanilla"}}],"startTimeUnixNano":"1637674530222121000","timeUnixNano":"1637674534223387200","count":"81","sum":2972700,"bucketCounts":["27","0","27","0","0","27","0","0","0","0","0","0"],"explicitBounds":[5000,10000,25000,50000,100000,250000,500000,1000000,2500000,5000000,10000000]}],"aggregationTemporality":"AGGREGATION_TEMPORALITY_CUMULATIVE"}}]}],"schemaUrl":"https://opentelemetry.io/schemas/v1.4.0"}]} - - name: create-deployment - inputs: - parameters: - - name: namespace - resource: - action: apply - manifest: | - apiVersion: apps/v1 - kind: Deployment - metadata: - name: otlp-sender - namespace: {{inputs.parameters.namespace}} - spec: - replicas: 1 - selector: - matchLabels: - app: otlp-sender - template: - metadata: - labels: - app: otlp-sender - spec: - containers: - - name: sender - image: datadog/docker-library:e2e-otlp-sender_latest - resources: - requests: - memory: "32Mi" - cpu: "100m" - limits: - memory: "32Mi" - cpu: "100m" - env: - - name: DD_AGENT_HOST - valueFrom: - fieldRef: - fieldPath: status.hostIP - - name: DD_AGENT_OTLP_ENDPOINT - value: http://$(DD_AGENT_HOST):4317 - volumeMounts: - - name: "sender-config" - mountPath: "/etc/otel" - - name: "metrics-data" - mountPath: "/etc/data" - volumes: - - name: "sender-config" - configMap: - name: "sender-config" - items: - - key: sender-config - path: config.yaml - - name: "metrics-data" - configMap: - name: "metrics-data" - items: - - key: metrics-data - path: metrics.data - - name: create - inputs: - parameters: - - name: namespace - steps: - - - name: sender-config - template: create-sender-config - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - name: metrics-data - template: create-metrics-data - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - name: deployment - template: create-deployment - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - - name: delete-deployment - inputs: - parameters: - - name: namespace - resource: - action: delete - manifest: | - apiVersion: apps/v1 - kind: Deployment - metadata: - name: otlp-sender - namespace: {{inputs.parameters.namespace}} - - name: delete-sender-config - inputs: - parameters: - - name: namespace - resource: - action: delete - manifest: | - apiVersion: v1 - kind: ConfigMap - metadata: - name: sender-config - namespace: {{inputs.parameters.namespace}} - - name: delete-metrics-data - inputs: - parameters: - - name: namespace - resource: - action: delete - manifest: | - apiVersion: v1 - kind: ConfigMap - metadata: - name: metrics-data - namespace: {{inputs.parameters.namespace}} - - name: delete - inputs: - parameters: - - name: namespace - steps: - - - name: deployment - template: delete-deployment - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - name: sender-config - template: delete-sender-config - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - name: metrics-data - template: delete-metrics-data - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - - name: test - inputs: - parameters: - - name: namespace - activeDeadlineSeconds: 300 - script: - image: mongo:4.4.1 - command: [mongo, "fake-datadog.{{inputs.parameters.namespace}}.svc.cluster.local/datadog"] - source: | - while (1) { - sleep(2000); - - // Gauges - var nb = db.series.find({metric: "an_important_metric"}).count(); - if (nb == 0) { - print("no 'an_important_metric' metric found"); - continue; - } - - print("All good"); - break; - } - - name: diagnose - inputs: - parameters: - - name: namespace - activeDeadlineSeconds: 300 - script: - image: alpine/k8s:1.27.1 - command: [sh] - source: | - set -euo pipefail - - kubectl --namespace {{inputs.parameters.namespace}} get pods -l app=otlp-sender -o custom-columns=name:metadata.name --no-headers | while read -r po; do - kubectl --namespace {{inputs.parameters.namespace}} logs $po -c sender || true - done diff --git a/test/new-e2e/tests/otel/otel_test.go b/test/new-e2e/tests/otel/otel_test.go index d67c79aefcd90..1a00bf90d9e51 100644 --- a/test/new-e2e/tests/otel/otel_test.go +++ b/test/new-e2e/tests/otel/otel_test.go @@ -38,6 +38,7 @@ type linuxTestSuite struct { var collectorConfig string func TestOTel(t *testing.T) { + t.Parallel() e2e.Run(t, &linuxTestSuite{}, e2e.WithProvisioner(awskubernetes.KindProvisioner(awskubernetes.WithAgentOptions(kubernetesagentparams.WithoutDualShipping(), kubernetesagentparams.WithOTelAgent(), kubernetesagentparams.WithOTelConfig(collectorConfig))))) } diff --git a/test/new-e2e/tests/otel/otlp-ingest/pipelines_test.go b/test/new-e2e/tests/otel/otlp-ingest/pipelines_test.go new file mode 100644 index 0000000000000..f15c74e858220 --- /dev/null +++ b/test/new-e2e/tests/otel/otlp-ingest/pipelines_test.go @@ -0,0 +1,148 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package localkubernetes contains the provisioner for the local Kubernetes based environments + +package otel + +import ( + "context" + _ "embed" + "fmt" + "testing" + "time" + + "github.com/DataDog/test-infra-definitions/components/datadog/kubernetesagentparams" + "github.com/stretchr/testify/assert" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/DataDog/datadog-agent/test/fakeintake/aggregator" + fakeintake "github.com/DataDog/datadog-agent/test/fakeintake/client" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" + awskubernetes "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/kubernetes" +) + +type otelIngestTestSuite struct { + e2e.BaseSuite[environments.Kubernetes] +} + +func TestOTelIngest(t *testing.T) { + values := ` +datadog: + otlp: + receiver: + protocols: + grpc: + enabled: true + logs: + enabled: true +` + t.Parallel() + e2e.Run(t, &otelIngestTestSuite{}, e2e.WithProvisioner(awskubernetes.KindProvisioner(awskubernetes.WithAgentOptions(kubernetesagentparams.WithoutDualShipping(), kubernetesagentparams.WithHelmValues(values))))) +} + +func (s *otelIngestTestSuite) TestOTLPTraces() { + ctx := context.Background() + s.Env().FakeIntake.Client().FlushServerAndResetAggregators() + service := "telemetrygen-job" + numTraces := 10 + + s.T().Log("Starting telemetrygen") + s.createTelemetrygenJob(ctx, "traces", []string{"--service", service, "--traces", fmt.Sprint(numTraces)}) + + s.T().Log("Waiting for traces") + s.EventuallyWithT(func(c *assert.CollectT) { + traces, err := s.Env().FakeIntake.Client().GetTraces() + assert.NoError(c, err) + assert.NotEmpty(c, traces) + trace := traces[0] + assert.Equal(c, "none", trace.Env) + assert.NotEmpty(c, trace.TracerPayloads) + tp := trace.TracerPayloads[0] + assert.NotEmpty(c, tp.Chunks) + assert.NotEmpty(c, tp.Chunks[0].Spans) + spans := tp.Chunks[0].Spans + for _, sp := range spans { + assert.Equal(c, service, sp.Service) + assert.Equal(c, "telemetrygen", sp.Meta["otel.library.name"]) + } + }, 2*time.Minute, 10*time.Second) +} + +func (s *otelIngestTestSuite) TestOTLPMetrics() { + ctx := context.Background() + s.Env().FakeIntake.Client().FlushServerAndResetAggregators() + service := "telemetrygen-job" + serviceAttribute := fmt.Sprintf("service.name=\"%v\"", service) + numMetrics := 10 + + s.T().Log("Starting telemetrygen") + s.createTelemetrygenJob(ctx, "metrics", []string{"--metrics", fmt.Sprint(numMetrics), "--otlp-attributes", serviceAttribute}) + + s.T().Log("Waiting for metrics") + s.EventuallyWithT(func(c *assert.CollectT) { + serviceTag := "service:" + service + metrics, err := s.Env().FakeIntake.Client().FilterMetrics("gen", fakeintake.WithTags[*aggregator.MetricSeries]([]string{serviceTag})) + assert.NoError(c, err) + assert.NotEmpty(c, metrics) + }, 2*time.Minute, 10*time.Second) +} + +func (s *otelIngestTestSuite) TestOTLPLogs() { + ctx := context.Background() + s.Env().FakeIntake.Client().FlushServerAndResetAggregators() + service := "telemetrygen-job" + serviceAttribute := fmt.Sprintf("service.name=\"%v\"", service) + numLogs := 10 + logBody := "telemetrygen log" + + s.T().Log("Starting telemetrygen") + s.createTelemetrygenJob(ctx, "logs", []string{"--logs", fmt.Sprint(numLogs), "--otlp-attributes", serviceAttribute, "--body", logBody}) + + s.T().Log("Waiting for logs") + s.EventuallyWithT(func(c *assert.CollectT) { + logs, err := s.Env().FakeIntake.Client().FilterLogs(service) + assert.NoError(c, err) + assert.NotEmpty(c, logs) + for _, log := range logs { + assert.Contains(c, log.Message, logBody) + } + }, 2*time.Minute, 10*time.Second) +} + +func (s *otelIngestTestSuite) createTelemetrygenJob(ctx context.Context, telemetry string, options []string) { + var ttlSecondsAfterFinished int32 = 600 //nolint:revive // We want to see this is explicitly set to 0 + var backOffLimit int32 = 4 + + otlpEndpoint := fmt.Sprintf("%v:4317", s.Env().Agent.LinuxNodeAgent.LabelSelectors["app"]) + jobSpec := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("telemetrygen-job-%v", telemetry), + Namespace: "datadog", + }, + Spec: batchv1.JobSpec{ + TTLSecondsAfterFinished: &ttlSecondsAfterFinished, + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "telemetrygen-job", + Image: "ghcr.io/open-telemetry/opentelemetry-collector-contrib/telemetrygen:latest", + Command: append([]string{"/telemetrygen", telemetry, "--otlp-endpoint", otlpEndpoint, "--otlp-insecure"}, options...), + }, + }, + RestartPolicy: corev1.RestartPolicyNever, + }, + }, + BackoffLimit: &backOffLimit, + }, + } + + _, err := s.Env().KubernetesCluster.Client().BatchV1().Jobs("datadog").Create(ctx, jobSpec, metav1.CreateOptions{}) + assert.NoError(s.T(), err, "Could not properly start job") +} From 5bdcc44f1732c88bc397f547bd1105fd1cc31f3e Mon Sep 17 00:00:00 2001 From: Nicolas Schweitzer Date: Fri, 6 Sep 2024 16:40:02 +0200 Subject: [PATCH 053/128] feat(ssm): Write secret in a temporary file (#29083) Co-authored-by: Alexandre Menasria <47357713+amenasria@users.noreply.github.com> --- .gitlab/choco_deploy/choco_deploy.yml | 6 +++++- .gitlab/deploy_packages/winget.yml | 6 +++++- .gitlab/integration_test/windows.yml | 6 +++++- tasks/unit_tests/linter_tests.py | 8 ++++---- tasks/winbuildscripts/unittests.ps1 | 22 ++++++++++++++++++---- tools/ci/aws_ssm_get_wrapper.ps1 | 12 ++++++++---- tools/ci/docker-login.ps1 | 14 ++++++++++++-- 7 files changed, 57 insertions(+), 17 deletions(-) diff --git a/.gitlab/choco_deploy/choco_deploy.yml b/.gitlab/choco_deploy/choco_deploy.yml index 86b63e251e362..9a8bae71f9755 100644 --- a/.gitlab/choco_deploy/choco_deploy.yml +++ b/.gitlab/choco_deploy/choco_deploy.yml @@ -10,7 +10,11 @@ publish_choco_7_x64: variables: ARCH: "x64" before_script: - - $chocolateyApiKey=$(& "$CI_PROJECT_DIR\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:CHOCOLATEY_API_KEY_SSM_NAME") + - $tmpfile = [System.IO.Path]::GetTempFileName() + - (& "$CI_PROJECT_DIR\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:CHOCOLATEY_API_KEY_SSM_NAME" "$tmpfile") + - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } + - $chocolateyApiKey=$(cat "$tmpfile") + - Remove-Item "$tmpfile" script: - '$_instance_id = (iwr -UseBasicParsing http://169.254.169.254/latest/meta-data/instance-id).content ; Write-Host "Running on instance $($_instance_id)"' - $ErrorActionPreference = "Stop" diff --git a/.gitlab/deploy_packages/winget.yml b/.gitlab/deploy_packages/winget.yml index 0d0cd80b7b981..a29aa20668ced 100644 --- a/.gitlab/deploy_packages/winget.yml +++ b/.gitlab/deploy_packages/winget.yml @@ -10,7 +10,11 @@ publish_winget_7_x64: variables: ARCH: "x64" before_script: - - $wingetPat=$(& "$CI_PROJECT_DIR\tools\ci\aws_ssm_get_wrapper.ps1" $Env:WINGET_PAT_SSM_NAME) + - $tmpfile = [System.IO.Path]::GetTempFileName() + - (& "$CI_PROJECT_DIR\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:WINGET_PAT_SSM_NAME" "$tmpfile") + - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } + - $wingetPat=$(cat "$tmpfile") + - Remove-Item "$tmpfile" script: - '$_instance_id = (iwr -UseBasicParsing http://169.254.169.254/latest/meta-data/instance-id).content ; Write-Host "Running on instance $($_instance_id)"' - $ErrorActionPreference = "Stop" diff --git a/.gitlab/integration_test/windows.yml b/.gitlab/integration_test/windows.yml index a68f86ab8a16a..b13c0a2d9f18e 100644 --- a/.gitlab/integration_test/windows.yml +++ b/.gitlab/integration_test/windows.yml @@ -7,7 +7,11 @@ needs: ["go_deps", "go_tools_deps"] tags: ["runner:windows-docker", "windowsversion:1809"] before_script: - - $vcpkgBlobSaSUrl=$(& "$CI_PROJECT_DIR\tools\ci\aws_ssm_get_wrapper.ps1" $Env:VCPKG_BLOB_SAS_URL_SSM_NAME) + - $tmpfile = [System.IO.Path]::GetTempFileName() + - (& "$CI_PROJECT_DIR\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:VCPKG_BLOB_SAS_URL_SSM_NAME" "$tmpfile") + - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } + - $vcpkgBlobSaSUrl=$(cat "$tmpfile") + - Remove-Item "$tmpfile" script: - $ErrorActionPreference = "Stop" - '$_instance_id = (iwr -UseBasicParsing http://169.254.169.254/latest/meta-data/instance-id).content ; Write-Host "Running on instance $($_instance_id)"' diff --git a/tasks/unit_tests/linter_tests.py b/tasks/unit_tests/linter_tests.py index 78ac5a323ee1d..b2d05a6f8d2b0 100644 --- a/tasks/unit_tests/linter_tests.py +++ b/tasks/unit_tests/linter_tests.py @@ -56,10 +56,10 @@ def test_with_wrapper_with_env(self): def test_multi_match_windows(self): with open(self.test_file, "w") as f: f.write( - 'DD_API_KEY=$(& "$CI_PROJECT_DIR\tools \\ci\aws_ssm_get_wrapper.ps1" "test.datadog-agent.datadog_api_key_org2)\n' - 'DD_API_KEY=$(& "$CI_PROJECT_DIR\tools \\ci\aws_ssm_get wrapper.ps1" "$Env:MISSING_UNDERSCORE)\n' - '`DD_APP_KEY=$(& "$CI_PROJECT_DIR\tools\\ci\aws_ssm_get_wrapper.ps1" "bad.name")\n' - 'DD_APP=$(& "$CI_PROJECT_DIR\tools\\ci\aws_ssm_get_wrapper.ps1" "$Env:TEST")\n' + 'DD_API_KEY=$(& "$CI_PROJECT_DIR\tools \\ci\aws_ssm_get_wrapper.ps1" test.datadog-agent.datadog_api_key_org2 $tmpfile)\n' + 'DD_API_KEY=$(& "$CI_PROJECT_DIR\tools \\ci\aws_ssm_get wrapper.ps1" "$Env:MISSING_UNDERSCORE" $tmpfile)\n' + '`DD_APP_KEY=$(& "$CI_PROJECT_DIR\tools\\ci\aws_ssm_get_wrapper.ps1" "bad.name" "$tmpfile")\n' + 'DD_APP=$(& "$CI_PROJECT_DIR\tools\\ci\aws_ssm_get_wrapper.ps1" "$Env:TEST" $tmpfile)\n' ) matched = linter.list_get_parameter_calls(self.test_file) self.assertEqual(3, len(matched)) diff --git a/tasks/winbuildscripts/unittests.ps1 b/tasks/winbuildscripts/unittests.ps1 index 8323cdd0afe75..64e95dde53a07 100644 --- a/tasks/winbuildscripts/unittests.ps1 +++ b/tasks/winbuildscripts/unittests.ps1 @@ -60,9 +60,14 @@ $err = $LASTEXITCODE # Ignore upload failures $ErrorActionPreference = "Continue" +$tmpfile = [System.IO.Path]::GetTempFileName() # 1. Upload coverage reports to Codecov -$Env:CODECOV_TOKEN=$(& "$UT_BUILD_ROOT\tools\ci\aws_ssm_get_wrapper.ps1" $Env:CODECOV_TOKEN_SSM_NAME) +& "$UT_BUILD_ROOT\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:CODECOV_TOKEN_SSM_NAME" "$tmpfile" +If ($LASTEXITCODE -ne "0") { + exit $LASTEXITCODE +} +$Env:CODECOV_TOKEN=$(cat "$tmpfile") & inv -e coverage.upload-to-codecov $Env:COVERAGE_CACHE_FLAG # 2. Upload junit files @@ -70,10 +75,19 @@ $Env:CODECOV_TOKEN=$(& "$UT_BUILD_ROOT\tools\ci\aws_ssm_get_wrapper.ps1" $Env:CO Get-ChildItem -Path "$UT_BUILD_ROOT" -Filter "junit-out-*.xml" -Recurse | ForEach-Object { Copy-Item -Path $_.FullName -Destination C:\mnt } -$Env:DATADOG_API_KEY=$(& "$UT_BUILD_ROOT\tools\ci\aws_ssm_get_wrapper.ps1" $Env:API_KEY_ORG2_SSM_NAME) -$Env:GITLAB_TOKEN=$(& "$UT_BUILD_ROOT\tools\ci\aws_ssm_get_wrapper.ps1" $Env:GITLAB_TOKEN_SSM_NAME) -& inv -e junit-upload --tgz-path $Env:JUNIT_TAR +& "$UT_BUILD_ROOT\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:API_KEY_ORG2_SSM_NAME" "$tmpfile" +If ($LASTEXITCODE -ne "0") { + exit $LASTEXITCODE +} +$Env:DATADOG_API_KEY=$(cat "$tmpfile") +& "$UT_BUILD_ROOT\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:GITLAB_TOKEN_SSM_NAME" "$tmpfile" +If ($LASTEXITCODE -ne "0") { + exit $LASTEXITCODE +} +$Env:GITLAB_TOKEN=$(cat "$tmpfile") +Remove-Item "$tmpfile" +& inv -e junit-upload --tgz-path $Env:JUNIT_TAR if($err -ne 0){ Write-Host -ForegroundColor Red "test failed $err" [Environment]::Exit($err) diff --git a/tools/ci/aws_ssm_get_wrapper.ps1 b/tools/ci/aws_ssm_get_wrapper.ps1 index 158301f4a08c6..0e345e80ff32a 100644 --- a/tools/ci/aws_ssm_get_wrapper.ps1 +++ b/tools/ci/aws_ssm_get_wrapper.ps1 @@ -1,5 +1,6 @@ param ( - [string]$parameterName + [string]$parameterName, + [string]$tempFile ) $retryCount = 0 @@ -9,15 +10,18 @@ while ($retryCount -lt $maxRetries) { $result = (aws ssm get-parameter --region us-east-1 --name $parameterName --with-decryption --query "Parameter.Value" --output text 2> awsErrorFile.txt) $error = Get-Content awsErrorFile.txt if ($result) { - $result - break + "$result" | Out-File -FilePath "$tempFile" -Encoding ASCII + exit 0 } if ($error -match "Unable to locate credentials") { # See 5th row in https://docs.google.com/spreadsheets/d/1JvdN0N-RdNEeOJKmW_ByjBsr726E3ZocCKU8QoYchAc Write-Error "Permanent error: unable to locate AWS credentials, not retrying" - exit 1 + exit 42 } $retryCount++ Start-Sleep -Seconds ([math]::Pow(2, $retryCount)) } + +Write-Error "Failed to retrieve $parameterName after $maxRetries retries" +exit 1 diff --git a/tools/ci/docker-login.ps1 b/tools/ci/docker-login.ps1 index c0d4194bafa5a..81842426672b2 100644 --- a/tools/ci/docker-login.ps1 +++ b/tools/ci/docker-login.ps1 @@ -6,8 +6,18 @@ If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } # DockerHub login -$DOCKER_REGISTRY_LOGIN = $(& "C:\mnt\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:DOCKER_REGISTRY_LOGIN_SSM_KEY") -$DOCKER_REGISTRY_PWD = $(& "C:\mnt\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:DOCKER_REGISTRY_PWD_SSM_KEY") +$tmpfile = [System.IO.Path]::GetTempFileName() +& "C:\mnt\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:DOCKER_REGISTRY_LOGIN_SSM_KEY" "$tmpfile" +If ($lastExitCode -ne "0") { + throw "Previous command returned $lastExitCode" +} +$DOCKER_REGISTRY_LOGIN = $(cat "$tmpfile") +& "C:\mnt\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:DOCKER_REGISTRY_PWD_SSM_KEY" "$tmpfile" +If ($lastExitCode -ne "0") { + throw "Previous command returned $lastExitCode" +} +$DOCKER_REGISTRY_PWD = $(cat "$tmpfile") +Remove-Item "$tmpfile" docker login --username "${DOCKER_REGISTRY_LOGIN}" --password "${DOCKER_REGISTRY_PWD}" "docker.io" If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" From f7ffdc03cce3c473cb39778ab22ac81dc9b639c1 Mon Sep 17 00:00:00 2001 From: Nicolas Schweitzer Date: Fri, 6 Sep 2024 16:40:11 +0200 Subject: [PATCH 054/128] fix(check-for-changes): Sort by semver and filter tags (#29101) --- tasks/libs/common/git.py | 28 +++++++-- tasks/release.py | 4 +- tasks/unit_tests/libs/common/git_tests.py | 71 +++++++++++++++++++++++ 3 files changed, 97 insertions(+), 6 deletions(-) diff --git a/tasks/libs/common/git.py b/tasks/libs/common/git.py index c16a5c42e0cbe..c3c6781a98bb8 100644 --- a/tasks/libs/common/git.py +++ b/tasks/libs/common/git.py @@ -167,7 +167,12 @@ def get_last_commit(ctx, repo, branch): ) -def get_last_tag(ctx, repo, pattern): +def get_last_release_tag(ctx, repo, pattern): + import re + from functools import cmp_to_key + + import semver + tags = ctx.run( rf'git ls-remote -t https://github.com/DataDog/{repo} "{pattern}"', hide=True, @@ -180,9 +185,24 @@ def get_last_tag(ctx, repo, pattern): ), code=1, ) - last_tag = tags.splitlines()[-1] + + release_pattern = re.compile(r'.*7\.[0-9]+\.[0-9]+(-rc.*|-devel.*)?$') + tags_without_suffix = [ + line for line in tags.splitlines() if not line.endswith("^{}") and release_pattern.match(line) + ] + last_tag = max(tags_without_suffix, key=lambda x: cmp_to_key(semver.compare)(x.split('/')[-1])) last_tag_commit, last_tag_name = last_tag.split() - if last_tag_name.endswith("^{}"): - last_tag_name = last_tag_name.removesuffix("^{}") + tags_with_suffix = [line for line in tags.splitlines() if line.endswith("^{}") and release_pattern.match(line)] + if tags_with_suffix: + last_tag_with_suffix = max( + tags_with_suffix, key=lambda x: cmp_to_key(semver.compare)(x.split('/')[-1].removesuffix("^{}")) + ) + last_tag_commit_with_suffix, last_tag_name_with_suffix = last_tag_with_suffix.split() + if ( + semver.compare(last_tag_name_with_suffix.split('/')[-1].removesuffix("^{}"), last_tag_name.split("/")[-1]) + >= 0 + ): + last_tag_commit = last_tag_commit_with_suffix + last_tag_name = last_tag_name_with_suffix.removesuffix("^{}") last_tag_name = last_tag_name.removeprefix("refs/tags/") return last_tag_commit, last_tag_name diff --git a/tasks/release.py b/tasks/release.py index ac168432829c9..4132df03ec59d 100644 --- a/tasks/release.py +++ b/tasks/release.py @@ -29,7 +29,7 @@ clone, get_current_branch, get_last_commit, - get_last_tag, + get_last_release_tag, try_git_command, ) from tasks.libs.common.user_interactions import yes_no_question @@ -1008,7 +1008,7 @@ def check_for_changes(ctx, release_branch, warning_mode=False): changes = 'false' for repo_name, repo in repo_data.items(): head_commit = get_last_commit(ctx, repo_name, repo['branch']) - last_tag_commit, last_tag_name = get_last_tag(ctx, repo_name, next_version.tag_pattern()) + last_tag_commit, last_tag_name = get_last_release_tag(ctx, repo_name, next_version.tag_pattern()) if last_tag_commit != "" and last_tag_commit != head_commit: changes = 'true' print(f"{repo_name} has new commits since {last_tag_name}", file=sys.stderr) diff --git a/tasks/unit_tests/libs/common/git_tests.py b/tasks/unit_tests/libs/common/git_tests.py index dcfd884f76115..4a803a58b2758 100644 --- a/tasks/unit_tests/libs/common/git_tests.py +++ b/tasks/unit_tests/libs/common/git_tests.py @@ -1,11 +1,14 @@ import unittest from unittest.mock import MagicMock +from invoke import MockContext, Result + from tasks.libs.common.git import ( check_local_branch, check_uncommitted_changes, get_commit_sha, get_current_branch, + get_last_release_tag, get_staged_files, ) @@ -107,3 +110,71 @@ def test_get_commit_sha(self): f"git rev-parse {'--short ' if test['short'] else ''}HEAD", hide=True ) self.ctx_mock.run.reset_mock() + + +class TestGetLastTag(unittest.TestCase): + def test_ordered(self): + c = MockContext( + run={ + 'git ls-remote -t https://github.com/DataDog/woof "7.56.*"': Result( + "e1b8e9163203b7446c74fac0b8d4153eb24227a0 refs/tags/7.56.0-rc.1\n7c6777bb7add533a789c69293b59e3261711d330 refs/tags/7.56.0-rc.2\n2b8b710b322feb03148f871a77ab92163a0a12de refs/tags/7.56.0-rc.3" + ) + } + ) + _, name = get_last_release_tag(c, "woof", "7.56.*") + self.assertEqual(name, "7.56.0-rc.3") + + def test_non_ordered(self): + c = MockContext( + run={ + 'git ls-remote -t https://github.com/DataDog/woof "7.56.*"': Result( + "e1b8e9163203b7446c74fac0b8d4153eb24227a0 refs/tags/7.56.0-rc.1\n7c6777bb7add533a789c69293b59e3261711d330 refs/tags/7.56.0-rc.11\n2b8b710b322feb03148f871a77ab92163a0a12de refs/tags/7.56.0-rc.3" + ) + } + ) + _, name = get_last_release_tag(c, "woof", "7.56.*") + self.assertEqual(name, "7.56.0-rc.11") + + def test_suffix_lower(self): + c = MockContext( + run={ + 'git ls-remote -t https://github.com/DataDog/woof "7.56.*"': Result( + "e1b8e9163203b7446c74fac0b8d4153eb24227a0 refs/tags/7.56.0-rc.1\n7c6777bb7add533a789c69293b59e3261711d330 refs/tags/7.56.0-rc.2^{}\n2b8b710b322feb03148f871a77ab92163a0a12de refs/tags/7.56.0-rc.3" + ) + } + ) + _, name = get_last_release_tag(c, "woof", "7.56.*") + self.assertEqual(name, "7.56.0-rc.3") + + def test_suffix_equal(self): + c = MockContext( + run={ + 'git ls-remote -t https://github.com/DataDog/woof "7.56.*"': Result( + "e1b8e9163203b7446c74fac0b8d4153eb24227a0 refs/tags/7.56.0-rc.1\n7c6777bb7add533a789c69293b59e3261711d330 refs/tags/7.56.0-rc.3^{}\n2b8b710b322feb03148f871a77ab92163a0a12de refs/tags/7.56.0-rc.3" + ) + } + ) + commit, _ = get_last_release_tag(c, "woof", "7.56.*") + self.assertEqual(commit, "7c6777bb7add533a789c69293b59e3261711d330") + + def test_suffix_greater(self): + c = MockContext( + run={ + 'git ls-remote -t https://github.com/DataDog/woof "7.56.*"': Result( + "e1b8e9163203b7446c74fac0b8d4153eb24227a0 refs/tags/7.56.0-rc.1\n7c6777bb7add533a789c69293b59e3261711d330 refs/tags/7.56.0-rc.4^{}\n2b8b710b322feb03148f871a77ab92163a0a12de refs/tags/7.56.0-rc.3" + ) + } + ) + _, name = get_last_release_tag(c, "woof", "7.56.*") + self.assertEqual(name, "7.56.0-rc.4") + + def test_only_release_tags(self): + c = MockContext( + run={ + 'git ls-remote -t https://github.com/DataDog/woof "7.57.*"': Result( + "43638bd55a74fd6ec51264cc7b3b1003d0b1c7ac\trefs/tags/7.57.0-dbm-mongo-1.5\ne01bcf3d12e6d6742b1fa8296882938c6dba9922\trefs/tags/7.57.0-devel\n6a5ad7fda590c7b8ba7036bca70dc8a0872e7afe\trefs/tags/7.57.0-devel^{}\n2c2eb2293cccd33100d7d930a59c136319942915\trefs/tags/7.57.0-installer-0.5.0-rc.1\n2c2eb2293cccd33100d7d930a59c136319942915\trefs/tags/7.57.0-installer-0.5.0-rc.2\n6a91fcca0ade9f77f08cd98d923a8d9ec18d7e8f\trefs/tags/7.57.0-installer-0.5.0-rc.3\n7e8ffc3de15f0486e6cb2184fa59f02da6ecfab9\trefs/tags/7.57.0-rc.1\nfa72fd12e3483a2d5957ea71fe01a8b1af376424\trefs/tags/7.57.0-rc.1^{}\n22587b746d6a0876cb7477b9b335e8573bdc3ac5\trefs/tags/7.57.0-rc.2\nd6c151a36487c3b54145ae9bf200f6c356bb9348\trefs/tags/7.57.0-rc.2^{}\n948ed4dd8c8cdf0aae467997086bb2229d4f1916\trefs/tags/7.57.0-rc.3\n259ed086a45960006e110622332cc8a39f9c6bb9\trefs/tags/7.57.0-rc.3^{}\na249f4607e5da894715a3e011dba8046b46678ed\trefs/tags/7.57.0-rc.4\n51a3b405a244348aec711d38e5810a6d88075b77\trefs/tags/7.57.0-rc.4^{}\n06519be707d6f24fb8265cde5a50cf0a66d5cb02\trefs/tags/7.57.0-rc.5\n7f43a5180446290f498742e68d8b28a75da04188\trefs/tags/7.57.0-rc.5^{}\n6bb640559e7626131290c63dab3959ba806c9886\trefs/tags/7.57.0-rc.6\nc5ed1f8b4734d31e94c2a83f307dbcb2b5a1faac\trefs/tags/7.57.0-rc.6^{}\n260697e624bb1d92ad306fdc301aab9b2975a627\trefs/tags/7.57.0-rc.7\n48617a0f56747e33b75d3dcf570bc2237726dc0e\trefs/tags/7.57.0-rc.7^{}\n5e11e104ff99b40b01ff2cfa702c0e4a465f98de\trefs/tags/7.57.1-beta-ndm-rdns-enrichment\n91c7c85d7c8fbb94421a90b273aea75630617eef\trefs/tags/7.57.1-beta-ndm-rdns-enrichment^{}\n3ad359da2894fa3de6e265c56dea8fabdb128454\trefs/tags/7.57.1-beta-ndm-rdns-enrichment2\n86683ad80578912014cc947dcf247ba020532403\trefs/tags/7.57.1-beta-ndm-rdns-enrichment2^{}" + ) + } + ) + _, name = get_last_release_tag(c, "woof", "7.57.*") + self.assertEqual(name, "7.57.0-rc.7") From 3908ea2983930af0a538ad79390ab0e87cedc141 Mon Sep 17 00:00:00 2001 From: Kevin Fairise <132568982+KevinFairise2@users.noreply.github.com> Date: Fri, 6 Sep 2024 16:55:15 +0200 Subject: [PATCH 055/128] Remove all source `/root/.bashrc` using the honor entrypoint FF (#29007) --- .gitlab-ci.yml | 1 + .gitlab/.pre/cancel-prev-pipelines.yml | 1 - .gitlab/.pre/test_gitlab_configuration.yml | 1 - .gitlab/binary_build/cluster_agent.yml | 2 -- .gitlab/binary_build/cluster_agent_cloudfoundry.yml | 1 - .gitlab/binary_build/cws_instrumentation.yml | 2 -- .gitlab/binary_build/linux.yml | 6 ------ .gitlab/binary_build/serverless.yml | 1 - .gitlab/common/container_publish_job_templates.yml | 1 - .gitlab/deploy_containers/deploy_containers_a7.yml | 4 ---- .../deploy_cws_instrumentation.yml | 1 - .gitlab/deploy_dca/deploy_dca.yml | 1 - .gitlab/deps_fetch/deps_fetch.yml | 2 -- .../install_script_testing/install_script_testing.yml | 1 - .gitlab/integration_test/dogstatsd.yml | 1 - .gitlab/internal_image_deploy/internal_image_deploy.yml | 3 --- .../internal_kubernetes_deploy.yml | 1 - .../internal_kubernetes_deploy/rc_kubernetes_deploy.yml | 1 - .gitlab/kitchen_deploy/kitchen_deploy.yml | 9 --------- .gitlab/notify/notify.yml | 3 --- .gitlab/package_build/dmg.yml | 1 - .gitlab/package_build/heroku.yml | 1 - .gitlab/package_build/installer.yml | 5 ----- .gitlab/package_build/linux.yml | 3 --- .gitlab/packaging/deb.yml | 3 --- .gitlab/packaging/oci.yml | 1 - .gitlab/packaging/rpm.yml | 2 -- .gitlab/pkg_metrics/pkg_metrics.yml | 5 ----- .gitlab/post_rc_build/post_rc_tasks.yml | 1 - .gitlab/setup/setup.yml | 2 -- .gitlab/source_test/ebpf.yml | 3 --- .gitlab/source_test/go_generate_check.yml | 1 - .gitlab/source_test/golang_deps_diff.yml | 6 +++--- .gitlab/source_test/linux.yml | 4 ++-- .gitlab/source_test/macos.yml | 2 -- .gitlab/source_test/notify.yml | 1 - .gitlab/source_test/slack.yml | 1 - .gitlab/source_test/technical_linters.yml | 3 --- .gitlab/source_test/tooling_unit_tests.yml | 2 -- .gitlab/trigger_release/trigger_release.yml | 1 - tools/ci/aws_ssm_get_wrapper.sh | 3 --- tools/ci/junit_upload.sh | 1 - 42 files changed, 6 insertions(+), 89 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index a87ad8e0c6eaa..3c16b1839c04c 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -259,6 +259,7 @@ variables: RESTORE_CACHE_ATTEMPTS: 2 # Feature flags FF_SCRIPT_SECTIONS: 1 # Prevent multiline scripts log collapsing, see https://gitlab.com/gitlab-org/gitlab-runner/-/issues/3392 + FF_KUBERNETES_HONOR_ENTRYPOINT: true # Honor the entrypoint in the Docker image when running Kubernetes jobs # # Condition mixins for simplification of rules diff --git a/.gitlab/.pre/cancel-prev-pipelines.yml b/.gitlab/.pre/cancel-prev-pipelines.yml index afbdde2dbd51b..2bb87278b70ec 100644 --- a/.gitlab/.pre/cancel-prev-pipelines.yml +++ b/.gitlab/.pre/cancel-prev-pipelines.yml @@ -14,6 +14,5 @@ cancel-prev-pipelines: when: never - when: on_success script: - - source /root/.bashrc - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME) - inv pipeline.auto-cancel-previous-pipelines diff --git a/.gitlab/.pre/test_gitlab_configuration.yml b/.gitlab/.pre/test_gitlab_configuration.yml index 529cc1c8d2956..300d4f1a0999d 100644 --- a/.gitlab/.pre/test_gitlab_configuration.yml +++ b/.gitlab/.pre/test_gitlab_configuration.yml @@ -5,7 +5,6 @@ test_gitlab_configuration: rules: - !reference [.on_gitlab_changes] script: - - source /root/.bashrc - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_FULL_API_TOKEN_SSM_NAME) - inv -e linter.gitlab-ci - inv -e linter.job-change-path diff --git a/.gitlab/binary_build/cluster_agent.yml b/.gitlab/binary_build/cluster_agent.yml index ea2eac369c8ac..b20ff9d10331e 100644 --- a/.gitlab/binary_build/cluster_agent.yml +++ b/.gitlab/binary_build/cluster_agent.yml @@ -23,7 +23,6 @@ cluster_agent-build_amd64: variables: ARCH: amd64 before_script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] cluster_agent-build_arm64: @@ -36,5 +35,4 @@ cluster_agent-build_arm64: variables: ARCH: arm64 before_script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] diff --git a/.gitlab/binary_build/cluster_agent_cloudfoundry.yml b/.gitlab/binary_build/cluster_agent_cloudfoundry.yml index 501c0d8dd3d38..873a471a3b85a 100644 --- a/.gitlab/binary_build/cluster_agent_cloudfoundry.yml +++ b/.gitlab/binary_build/cluster_agent_cloudfoundry.yml @@ -15,7 +15,6 @@ cluster_agent_cloudfoundry-build_amd64: ARCH: amd64 KUBERNETES_CPU_REQUEST: 4 before_script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] script: - inv check-go-version diff --git a/.gitlab/binary_build/cws_instrumentation.yml b/.gitlab/binary_build/cws_instrumentation.yml index b6d517df2f52c..787be00f814cb 100644 --- a/.gitlab/binary_build/cws_instrumentation.yml +++ b/.gitlab/binary_build/cws_instrumentation.yml @@ -17,7 +17,6 @@ cws_instrumentation-build_amd64: variables: ARCH: amd64 before_script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] cws_instrumentation-build_arm64: @@ -30,5 +29,4 @@ cws_instrumentation-build_arm64: variables: ARCH: arm64 before_script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] diff --git a/.gitlab/binary_build/linux.yml b/.gitlab/binary_build/linux.yml index d8644d63a2c9c..50333acc2321e 100644 --- a/.gitlab/binary_build/linux.yml +++ b/.gitlab/binary_build/linux.yml @@ -10,7 +10,6 @@ build_dogstatsd_static-binary_x64: variables: ARCH: amd64 before_script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] script: - inv check-go-version @@ -28,7 +27,6 @@ build_dogstatsd_static-binary_arm64: variables: ARCH: arm64 before_script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] script: - inv check-go-version @@ -44,7 +42,6 @@ build_dogstatsd-binary_x64: tags: ["arch:amd64"] needs: ["lint_linux-x64", "go_deps"] before_script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] script: - inv check-go-version @@ -62,7 +59,6 @@ build_dogstatsd-binary_arm64: variables: ARCH: arm64 before_script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] script: - inv check-go-version @@ -83,7 +79,6 @@ build_iot_agent-binary_x64: before_script: - !reference [.retrieve_linux_go_deps] script: - - source /root/.bashrc - inv check-go-version - inv -e agent.build --flavor iot --major-version 7 - $S3_CP_CMD $CI_PROJECT_DIR/$AGENT_BINARIES_DIR/agent $S3_ARTIFACTS_URI/iot/agent @@ -100,7 +95,6 @@ build_iot_agent-binary_arm64: before_script: - !reference [.retrieve_linux_go_deps] script: - - source /root/.bashrc - inv check-go-version - inv -e agent.build --flavor iot --major-version 7 diff --git a/.gitlab/binary_build/serverless.yml b/.gitlab/binary_build/serverless.yml index 8861528211fab..fa626581965be 100644 --- a/.gitlab/binary_build/serverless.yml +++ b/.gitlab/binary_build/serverless.yml @@ -5,7 +5,6 @@ - !reference [.except_mergequeue] - when: on_success before_script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] script: - inv check-go-version diff --git a/.gitlab/common/container_publish_job_templates.yml b/.gitlab/common/container_publish_job_templates.yml index ed119645aa883..76b44efeba6fc 100644 --- a/.gitlab/common/container_publish_job_templates.yml +++ b/.gitlab/common/container_publish_job_templates.yml @@ -13,7 +13,6 @@ IMG_VARIABLES: "" IMG_SIGNING: "" script: # We can't use the 'trigger' keyword on manual jobs, otherwise they can't be run if the pipeline fails and is retried - - source /root/.bashrc - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME) - | if [[ "$BUCKET_BRANCH" == "nightly" && ( "$IMG_SOURCES" =~ "$SRC_AGENT" || "$IMG_SOURCES" =~ "$SRC_DCA" || "$IMG_SOURCES" =~ "$SRC_CWS_INSTRUMENTATION" || "$IMG_VARIABLES" =~ "$SRC_AGENT" || "$IMG_VARIABLES" =~ "$SRC_DCA" || "$IMG_VARIABLES" =~ "$SRC_CWS_INSTRUMENTATION" ) ]]; then diff --git a/.gitlab/deploy_containers/deploy_containers_a7.yml b/.gitlab/deploy_containers/deploy_containers_a7.yml index b63cd6cc74972..5d3fdac92be9a 100644 --- a/.gitlab/deploy_containers/deploy_containers_a7.yml +++ b/.gitlab/deploy_containers/deploy_containers_a7.yml @@ -25,7 +25,6 @@ include: stage: deploy_containers dependencies: [] before_script: - - source /root/.bashrc - if [[ "$VERSION" == "" ]]; then export VERSION="$(inv agent.version --major-version 7 --url-safe --pipeline-id $PARENT_PIPELINE_ID)"; fi - export IMG_BASE_SRC="${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}" - export IMG_LINUX_SOURCES="${IMG_BASE_SRC}-7${JMX}-amd64,${IMG_BASE_SRC}-7${JMX}-arm64" @@ -66,7 +65,6 @@ deploy_containers-dogstatsd: !reference [.manual_on_deploy_auto_on_rc] dependencies: [] before_script: - - source /root/.bashrc - export VERSION="$(inv agent.version --major-version 7 --url-safe --pipeline-id $PARENT_PIPELINE_ID)" - export IMG_SOURCES="${SRC_DSD}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-amd64,${SRC_DSD}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-arm64" - export IMG_DESTINATIONS="${DSD_REPOSITORY}:${VERSION}" @@ -98,8 +96,6 @@ deploy_containers-ot: IMG_REGISTRIES: public VERSION: 7 dependencies: [] - before_script: - - source /root/.bashrc parallel: matrix: - IMG_SOURCES: ${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-ot-beta-amd64,${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-ot-beta-arm64 diff --git a/.gitlab/deploy_cws_instrumentation/deploy_cws_instrumentation.yml b/.gitlab/deploy_cws_instrumentation/deploy_cws_instrumentation.yml index 47b3566f320a1..2d7301d70bca4 100644 --- a/.gitlab/deploy_cws_instrumentation/deploy_cws_instrumentation.yml +++ b/.gitlab/deploy_cws_instrumentation/deploy_cws_instrumentation.yml @@ -11,7 +11,6 @@ include: stage: deploy_cws_instrumentation dependencies: [] before_script: - - source /root/.bashrc - if [[ "$VERSION" == "" ]]; then export VERSION="$(inv agent.version --major-version 7 --url-safe)"; fi - if [[ "$CWS_INSTRUMENTATION_REPOSITORY" == "" ]]; then export CWS_INSTRUMENTATION_REPOSITORY="cws-instrumentation"; fi - export IMG_BASE_SRC="${SRC_CWS_INSTRUMENTATION}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}" diff --git a/.gitlab/deploy_dca/deploy_dca.yml b/.gitlab/deploy_dca/deploy_dca.yml index 5db81a7297552..5065744f8e315 100644 --- a/.gitlab/deploy_dca/deploy_dca.yml +++ b/.gitlab/deploy_dca/deploy_dca.yml @@ -15,7 +15,6 @@ include: - job: "docker_build_cluster_agent_arm64" artifacts: false before_script: - - source /root/.bashrc - if [[ "$VERSION" == "" ]]; then export VERSION="$(inv agent.version --major-version 7 --url-safe)"; fi - if [[ "$CLUSTER_AGENT_REPOSITORY" == "" ]]; then export CLUSTER_AGENT_REPOSITORY="cluster-agent"; fi - export IMG_BASE_SRC="${SRC_DCA}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}" diff --git a/.gitlab/deps_fetch/deps_fetch.yml b/.gitlab/deps_fetch/deps_fetch.yml index 617a272ab2cb3..4fde8af699088 100644 --- a/.gitlab/deps_fetch/deps_fetch.yml +++ b/.gitlab/deps_fetch/deps_fetch.yml @@ -40,7 +40,6 @@ go_deps: # If the cache already contains the dependencies, don't redownload them # but still provide the artifact that's expected for the other jobs to run - if [ -f modcache.tar.xz ]; then exit 0; fi - - source /root/.bashrc - inv -e deps --verbose - cd $GOPATH/pkg/mod/ && tar c -I "pxz -T${KUBERNETES_CPU_REQUEST}" -f $CI_PROJECT_DIR/modcache.tar.xz . artifacts: @@ -60,7 +59,6 @@ go_tools_deps: extends: .cache script: - if [ -f modcache_tools.tar.xz ]; then exit 0; fi - - source /root/.bashrc - inv -e download-tools - cd $GOPATH/pkg/mod/ && tar c -I "pxz -T${KUBERNETES_CPU_REQUEST}" -f $CI_PROJECT_DIR/modcache_tools.tar.xz . artifacts: diff --git a/.gitlab/install_script_testing/install_script_testing.yml b/.gitlab/install_script_testing/install_script_testing.yml index 45f93afe64309..cafe094eaa5f2 100644 --- a/.gitlab/install_script_testing/install_script_testing.yml +++ b/.gitlab/install_script_testing/install_script_testing.yml @@ -4,7 +4,6 @@ test_install_script: image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:amd64"] script: - - source /root/.bashrc - set +x - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME) - export TESTING_APT_URL=$DEB_TESTING_S3_BUCKET diff --git a/.gitlab/integration_test/dogstatsd.yml b/.gitlab/integration_test/dogstatsd.yml index 5e5484df1024f..6cce87ab2a04c 100644 --- a/.gitlab/integration_test/dogstatsd.yml +++ b/.gitlab/integration_test/dogstatsd.yml @@ -11,7 +11,6 @@ dogstatsd_x64_size_test: tags: ["arch:amd64"] needs: ["build_dogstatsd_static-binary_x64"] before_script: - - source /root/.bashrc - mkdir -p $STATIC_BINARIES_DIR - $S3_CP_CMD $S3_ARTIFACTS_URI/static/dogstatsd.amd64 $STATIC_BINARIES_DIR/dogstatsd script: diff --git a/.gitlab/internal_image_deploy/internal_image_deploy.yml b/.gitlab/internal_image_deploy/internal_image_deploy.yml index f4cb34de1588a..bdf209e744f58 100644 --- a/.gitlab/internal_image_deploy/internal_image_deploy.yml +++ b/.gitlab/internal_image_deploy/internal_image_deploy.yml @@ -22,7 +22,6 @@ docker_trigger_internal: TMPL_SRC_REPO: ci/datadog-agent/agent RELEASE_STAGING: "true" script: - - source /root/.bashrc - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME) - if [ "$BUCKET_BRANCH" = "beta" ] || [ "$BUCKET_BRANCH" = "stable" ]; then TMPL_SRC_REPO="${TMPL_SRC_REPO}-release"; fi - | @@ -115,7 +114,6 @@ docker_trigger_cluster_agent_internal: RELEASE_STAGING: "true" RELEASE_PROD: "true" script: - - source /root/.bashrc - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME) - if [ "$BUCKET_BRANCH" = "beta" ] || [ "$BUCKET_BRANCH" = "stable" ]; then TMPL_SRC_REPO="${TMPL_SRC_REPO}-release"; fi - | @@ -162,7 +160,6 @@ docker_trigger_cws_instrumentation_internal: RELEASE_STAGING: "true" RELEASE_PROD: "true" script: - - source /root/.bashrc - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME) - if [ "$BUCKET_BRANCH" = "beta" ] || [ "$BUCKET_BRANCH" = "stable" ]; then TMPL_SRC_REPO="${TMPL_SRC_REPO}-release"; fi - | diff --git a/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml b/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml index 0ecef941fe652..87bae387f610c 100644 --- a/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml +++ b/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml @@ -36,7 +36,6 @@ internal_kubernetes_deploy_experimental: EXPLICIT_WORKFLOWS: "//workflows:beta_builds.agents_nightly.staging-deploy.publish,//workflows:beta_builds.agents_nightly.staging-validate.publish,//workflows:beta_builds.agents_nightly.prod-wait-business-hours.publish,//workflows:beta_builds.agents_nightly.prod-deploy.publish,//workflows:beta_builds.agents_nightly.prod-validate.publish,//workflows:beta_builds.agents_nightly.publish-image-confirmation.publish" BUNDLE_VERSION_OVERRIDE: "v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}" script: - - source /root/.bashrc - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME) - "inv pipeline.trigger-child-pipeline --project-name DataDog/k8s-datadog-agent-ops --git-ref main --variable OPTION_AUTOMATIC_ROLLOUT diff --git a/.gitlab/internal_kubernetes_deploy/rc_kubernetes_deploy.yml b/.gitlab/internal_kubernetes_deploy/rc_kubernetes_deploy.yml index d3069ea0b320f..44b48d2829115 100644 --- a/.gitlab/internal_kubernetes_deploy/rc_kubernetes_deploy.yml +++ b/.gitlab/internal_kubernetes_deploy/rc_kubernetes_deploy.yml @@ -22,7 +22,6 @@ rc_kubernetes_deploy: EXPLICIT_WORKFLOWS: "//workflows:deploy_rc.agents_rc" AGENT_IMAGE_TAG: $CI_COMMIT_REF_NAME script: - - source /root/.bashrc - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME) - "inv pipeline.trigger-child-pipeline --project-name DataDog/k8s-datadog-agent-ops --git-ref main --variable OPTION_AUTOMATIC_ROLLOUT diff --git a/.gitlab/kitchen_deploy/kitchen_deploy.yml b/.gitlab/kitchen_deploy/kitchen_deploy.yml index 4184c08c53f8b..1ee5a881c07d1 100644 --- a/.gitlab/kitchen_deploy/kitchen_deploy.yml +++ b/.gitlab/kitchen_deploy/kitchen_deploy.yml @@ -40,7 +40,6 @@ variables: DD_PIPELINE_ID: $CI_PIPELINE_ID-a6 before_script: - - source /root/.bashrc - ls $OMNIBUS_PACKAGE_DIR deploy_deb_testing-a6_x64: @@ -85,7 +84,6 @@ deploy_deb_testing-a6_arm64: variables: DD_PIPELINE_ID: $CI_PIPELINE_ID-a7 before_script: - - source /root/.bashrc - ls $OMNIBUS_PACKAGE_DIR deploy_deb_testing-a7_x64: @@ -139,7 +137,6 @@ deploy_deb_testing-a7_arm64: variables: DD_PIPELINE_ID: $CI_PIPELINE_ID-a6 before_script: - - source /root/.bashrc - ls $OMNIBUS_PACKAGE_DIR deploy_rpm_testing-a6_x64: @@ -177,7 +174,6 @@ deploy_rpm_testing-a6_arm64: variables: DD_PIPELINE_ID: $CI_PIPELINE_ID-a7 before_script: - - source /root/.bashrc - ls $OMNIBUS_PACKAGE_DIR deploy_rpm_testing-a7_x64: @@ -226,7 +222,6 @@ deploy_suse_rpm_testing_x64-a6: variables: DD_PIPELINE_ID: $CI_PIPELINE_ID-a6 before_script: - - source /root/.bashrc - ls $OMNIBUS_PACKAGE_DIR_SUSE script: - *setup_rpm_signing_key @@ -252,7 +247,6 @@ deploy_suse_rpm_testing_x64-a7: variables: DD_PIPELINE_ID: $CI_PIPELINE_ID-a7 before_script: - - source /root/.bashrc - ls $OMNIBUS_PACKAGE_DIR_SUSE script: - *setup_rpm_signing_key @@ -271,7 +265,6 @@ deploy_suse_rpm_testing_arm64-a7: variables: DD_PIPELINE_ID: $CI_PIPELINE_ID-a7 before_script: - - source /root/.bashrc - ls $OMNIBUS_PACKAGE_DIR_SUSE script: - *setup_rpm_signing_key @@ -288,7 +281,6 @@ deploy_windows_testing-a6: tags: ["arch:amd64"] needs: ["lint_windows-x64", "windows_msi_x64-a6"] before_script: - - source /root/.bashrc - ls $OMNIBUS_PACKAGE_DIR script: - $S3_CP_CMD --recursive --exclude "*" --include "datadog-agent-6.*.msi" $OMNIBUS_PACKAGE_DIR s3://$WIN_S3_BUCKET/$WINDOWS_TESTING_S3_BUCKET_A6 --grants read=uri=http://acs.amazonaws.com/groups/global/AllUsers full=id=3a6e02b08553fd157ae3fb918945dd1eaae5a1aa818940381ef07a430cf25732 @@ -304,7 +296,6 @@ deploy_windows_testing-a7: needs: ["lint_windows-x64", "windows_msi_and_bosh_zip_x64-a7", "windows-installer-amd64"] before_script: - - source /root/.bashrc - ls $OMNIBUS_PACKAGE_DIR script: - $S3_CP_CMD diff --git a/.gitlab/notify/notify.yml b/.gitlab/notify/notify.yml index 7c6e8aa580159..efa437a64cdde 100644 --- a/.gitlab/notify/notify.yml +++ b/.gitlab/notify/notify.yml @@ -53,7 +53,6 @@ send_pipeline_stats: when: always dependencies: [] script: - - source /root/.bashrc - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_READ_API_TOKEN_SSM_NAME) - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) - invoke -e notify.send-stats @@ -79,7 +78,6 @@ notify_github: dependencies: [] allow_failure: true script: - - source /root/.bashrc - !reference [.install_pr_commenter] - messagefile="$(mktemp)" - echo "Use this command from [test-infra-definitions](https://github.com/DataDog/test-infra-definitions) to manually test this PR changes on a VM:" >> "$messagefile" @@ -127,7 +125,6 @@ notify_gitlab_ci_changes: timeout: 15 minutes # Added to prevent a stuck job blocking the resource_group defined above .failure_summary_setup: - - source /root/.bashrc - export SLACK_API_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SLACK_AGENT_CI_TOKEN_SSM_NAME) - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_READ_API_TOKEN_SSM_NAME) - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) diff --git a/.gitlab/package_build/dmg.yml b/.gitlab/package_build/dmg.yml index cf07e07415e7d..02d0b830cc910 100644 --- a/.gitlab/package_build/dmg.yml +++ b/.gitlab/package_build/dmg.yml @@ -34,5 +34,4 @@ agent_dmg-x64-a7: PYTHON_RUNTIMES: "3" timeout: 6h before_script: - - source /root/.bashrc - export RELEASE_VERSION=$RELEASE_VERSION_7 diff --git a/.gitlab/package_build/heroku.yml b/.gitlab/package_build/heroku.yml index 337bc54932f56..c036313447986 100644 --- a/.gitlab/package_build/heroku.yml +++ b/.gitlab/package_build/heroku.yml @@ -14,7 +14,6 @@ "generate_minimized_btfs_x64", ] script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] - !reference [.cache_omnibus_ruby_deps, setup] - echo "About to build for $RELEASE_VERSION" diff --git a/.gitlab/package_build/installer.yml b/.gitlab/package_build/installer.yml index ac335677c8bad..98232f8910e08 100644 --- a/.gitlab/package_build/installer.yml +++ b/.gitlab/package_build/installer.yml @@ -55,7 +55,6 @@ datadog-agent-oci-x64-a7: PACKAGE_ARCH: amd64 DESTINATION_OCI: "datadog-agent-7-remote-updater-amd64.tar.xz" before_script: - - source /root/.bashrc - export RELEASE_VERSION=$RELEASE_VERSION_7 datadog-agent-oci-arm64-a7: @@ -79,7 +78,6 @@ datadog-agent-oci-arm64-a7: PACKAGE_ARCH: arm64 DESTINATION_OCI: "datadog-agent-7-remote-updater-arm64.tar.xz" before_script: - - source /root/.bashrc - export RELEASE_VERSION=$RELEASE_VERSION_7 # @@ -87,7 +85,6 @@ datadog-agent-oci-arm64-a7: # .installer_build_common: script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] - !reference [.cache_omnibus_ruby_deps, setup] - echo "About to build for $RELEASE_VERSION" @@ -146,7 +143,6 @@ installer-amd64-oci: variables: DESTINATION_FILE: "datadog-updater_7-amd64-oci.tar.xz" before_script: - - source /root/.bashrc - export INSTALL_DIR=/opt/datadog-packages/datadog-installer/$(inv agent.version -u)-1 - export INSTALL_DIR_PARAM="--install-directory=$INSTALL_DIR" @@ -155,7 +151,6 @@ installer-arm64-oci: variables: DESTINATION_FILE: "datadog-updater_7-arm64-oci.tar.xz" before_script: - - source /root/.bashrc - export INSTALL_DIR=/opt/datadog-packages/datadog-installer/$(inv agent.version -u)-1 - export INSTALL_DIR_PARAM="--install-directory=$INSTALL_DIR" diff --git a/.gitlab/package_build/linux.yml b/.gitlab/package_build/linux.yml index db66682e4d356..04aafb966b0e7 100644 --- a/.gitlab/package_build/linux.yml +++ b/.gitlab/package_build/linux.yml @@ -4,7 +4,6 @@ - when: on_success stage: package_build script: - - source /root/.bashrc - echo "About to build for $RELEASE_VERSION" - !reference [.retrieve_linux_go_deps] - !reference [.cache_omnibus_ruby_deps, setup] @@ -110,7 +109,6 @@ datadog-ot-agent-7-arm64: extends: .agent_build_common needs: ["go_mod_tidy_check", "go_deps"] script: - - source /root/.bashrc - echo "About to build for $RELEASE_VERSION" - !reference [.retrieve_linux_go_deps] - !reference [.cache_omnibus_ruby_deps, setup] @@ -149,7 +147,6 @@ iot-agent-armhf: - when: on_success stage: package_build script: - - source /root/.bashrc - echo "About to build for $RELEASE_VERSION" - !reference [.retrieve_linux_go_deps] - !reference [.cache_omnibus_ruby_deps, setup] diff --git a/.gitlab/packaging/deb.yml b/.gitlab/packaging/deb.yml index cd136706ff1e6..08a02809ed244 100644 --- a/.gitlab/packaging/deb.yml +++ b/.gitlab/packaging/deb.yml @@ -2,7 +2,6 @@ .package_deb_common: stage: packaging script: - - source /root/.bashrc - !reference [.cache_omnibus_ruby_deps, setup] - echo "About to package for $RELEASE_VERSION" - !reference [.setup_deb_signing_key] @@ -88,7 +87,6 @@ agent_deb-arm64-a7: .package_ot_deb_common: extends: [.package_deb_common] script: - - source /root/.bashrc - !reference [.cache_omnibus_ruby_deps, setup] - echo "About to package for $RELEASE_VERSION" - !reference [.setup_deb_signing_key] @@ -143,7 +141,6 @@ installer_deb-arm64: - when: on_success stage: packaging script: - - source /root/.bashrc - !reference [.cache_omnibus_ruby_deps, setup] - echo "About to package for $RELEASE_VERSION" - !reference [.setup_deb_signing_key] diff --git a/.gitlab/packaging/oci.yml b/.gitlab/packaging/oci.yml index 4598fa8050336..b2dcd8eaf740a 100644 --- a/.gitlab/packaging/oci.yml +++ b/.gitlab/packaging/oci.yml @@ -6,7 +6,6 @@ image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:amd64"] before_script: - - source /root/.bashrc - export PACKAGE_VERSION=$(inv agent.version --url-safe --major-version 7)-1 - export INSTALL_DIR=/opt/datadog-packages/${OCI_PRODUCT}/${PACKAGE_VERSION} variables: diff --git a/.gitlab/packaging/rpm.yml b/.gitlab/packaging/rpm.yml index f337dc8124924..104e66cfa40c1 100644 --- a/.gitlab/packaging/rpm.yml +++ b/.gitlab/packaging/rpm.yml @@ -5,7 +5,6 @@ - !reference [.except_mergequeue] - when: on_success before_script: - - source /root/.bashrc script: - echo "About to build for $RELEASE_VERSION" - !reference [.cache_omnibus_ruby_deps, setup] @@ -136,7 +135,6 @@ installer_suse_rpm-arm64: - !reference [.except_mergequeue] - when: on_success script: - - source /root/.bashrc - echo "About to build for $RELEASE_VERSION" - !reference [.cache_omnibus_ruby_deps, setup] - RPM_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_GPG_KEY_SSM_NAME) diff --git a/.gitlab/pkg_metrics/pkg_metrics.yml b/.gitlab/pkg_metrics/pkg_metrics.yml index 8ff246b8c2667..9ea6639e0780e 100644 --- a/.gitlab/pkg_metrics/pkg_metrics.yml +++ b/.gitlab/pkg_metrics/pkg_metrics.yml @@ -56,8 +56,6 @@ send_pkg_size: - job: iot_agent_suse-x64 optional: true script: - - source /root/.bashrc - # Get API key to send metrics - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) @@ -107,12 +105,9 @@ send_pkg_size: image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:amd64"] script: - - source /root/.bashrc - - ls -l $OMNIBUS_PACKAGE_DIR - if [[ "${ARCH}" == "amd64" ]]; then ls -l $OMNIBUS_PACKAGE_DIR_SUSE; fi - - source /root/.bashrc - export failures=0 - export last_stable=$(inv release.get-release-json-value "last_stable::${MAJOR_VERSION}") # Get stable packages from S3 buckets, send new package sizes & compare stable and new package sizes diff --git a/.gitlab/post_rc_build/post_rc_tasks.yml b/.gitlab/post_rc_build/post_rc_tasks.yml index 00efc95005fa5..e00f0e8599bdb 100644 --- a/.gitlab/post_rc_build/post_rc_tasks.yml +++ b/.gitlab/post_rc_build/post_rc_tasks.yml @@ -11,7 +11,6 @@ update_rc_build_links: image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:amd64"] script: - - source /root/.bashrc - export ATLASSIAN_PASSWORD=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $JIRA_READ_API_TOKEN_SSM_NAME) - export ATLASSIAN_USERNAME=robot-jira-agentplatform@datadoghq.com - python3 -m pip install -r tasks/requirements_release_tasks.txt diff --git a/.gitlab/setup/setup.yml b/.gitlab/setup/setup.yml index 3ef2bdd4dfeb0..3d800618dde97 100644 --- a/.gitlab/setup/setup.yml +++ b/.gitlab/setup/setup.yml @@ -4,7 +4,6 @@ setup_agent_version: image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:amd64"] script: - - source /root/.bashrc - inv -e agent.version --cache-version - $S3_CP_CMD $CI_PROJECT_DIR/agent-version.cache $S3_ARTIFACTS_URI/agent-version.cache needs: [] @@ -17,7 +16,6 @@ github_rate_limit_info: - !reference [.except_mergequeue] - when: on_success script: - - source /root/.bashrc - python3 -m pip install -r tasks/libs/requirements-github.txt datadog_api_client # Send stats for app 1 - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_KEY_SSM_NAME) diff --git a/.gitlab/source_test/ebpf.yml b/.gitlab/source_test/ebpf.yml index 11dfc05354333..012406f1b85aa 100644 --- a/.gitlab/source_test/ebpf.yml +++ b/.gitlab/source_test/ebpf.yml @@ -16,7 +16,6 @@ before_script: - !reference [.retrieve_linux_go_deps] - !reference [.retrieve_linux_go_tools_deps] - - source /root/.bashrc script: - inv -e install-tools - inv -e system-probe.object-files @@ -53,7 +52,6 @@ tests_ebpf_arm64: paths: - $CI_PROJECT_DIR/kmt-deps before_script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] - !reference [.retrieve_linux_go_tools_deps] - inv -e install-tools @@ -87,7 +85,6 @@ prepare_sysprobe_ebpf_functional_tests_x64: - $CI_PROJECT_DIR/kmt-deps - $DD_AGENT_TESTING_DIR/site-cookbooks/dd-security-agent-check/files before_script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] - !reference [.retrieve_linux_go_tools_deps] - inv -e install-tools diff --git a/.gitlab/source_test/go_generate_check.yml b/.gitlab/source_test/go_generate_check.yml index ad4af5b2ee47c..51d3293a17370 100644 --- a/.gitlab/source_test/go_generate_check.yml +++ b/.gitlab/source_test/go_generate_check.yml @@ -10,7 +10,6 @@ security_go_generate_check: before_script: - !reference [.retrieve_linux_go_deps] - !reference [.retrieve_linux_go_tools_deps] - - source /root/.bashrc - pip3 install wheel - pip3 install -r docs/cloud-workload-security/scripts/requirements-docs.txt - inv -e install-tools diff --git a/.gitlab/source_test/golang_deps_diff.yml b/.gitlab/source_test/golang_deps_diff.yml index 5a01ac2d74a13..7fee28e886445 100644 --- a/.gitlab/source_test/golang_deps_diff.yml +++ b/.gitlab/source_test/golang_deps_diff.yml @@ -12,7 +12,6 @@ golang_deps_diff: variables: KUBERNETES_CPU_REQUEST: 4 before_script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] script: # Get API key to send metrics @@ -32,6 +31,9 @@ golang_deps_commenter: - !reference [.except_deploy] - when: on_success needs: ["golang_deps_diff"] + variables: + # Not using the entrypoint script for the pr-commenter image + FF_KUBERNETES_HONOR_ENTRYPOINT: false script: # ignore error message about no PR, because it happens for dev branches without PRs - echo "${CI_COMMIT_REF_NAME}" - | @@ -59,7 +61,6 @@ golang_deps_send_count_metrics: - when: on_success needs: ["go_deps"] before_script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] script: # Get API key to send metrics @@ -74,7 +75,6 @@ golang_deps_test: - when: on_success needs: ["go_deps"] before_script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] script: - inv -e go-deps.test-list diff --git a/.gitlab/source_test/linux.yml b/.gitlab/source_test/linux.yml index c4d57130833a5..529ee8a9aeda1 100644 --- a/.gitlab/source_test/linux.yml +++ b/.gitlab/source_test/linux.yml @@ -65,7 +65,6 @@ script: - !reference [.retrieve_linux_go_deps] - !reference [.retrieve_linux_go_tools_deps] - - source /root/.bashrc && conda activate ddpy3 - inv -e rtloader.make --install-prefix=$CI_PROJECT_DIR/dev --python-runtimes "3" - inv -e rtloader.install - inv -e install-tools @@ -252,7 +251,6 @@ go_mod_tidy_check: extends: .linux_x64 needs: ["go_deps"] before_script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] script: - inv -e check-mod-tidy @@ -282,4 +280,6 @@ new-e2e-unit-tests: KUBERNETES_MEMORY_REQUEST: 12Gi KUBERNETES_MEMORY_LIMIT: 16Gi KUBERNETES_CPU_REQUEST: 6 + # Not using the entrypoint script for the e2e runner image + FF_KUBERNETES_HONOR_ENTRYPOINT: false timeout: 10m diff --git a/.gitlab/source_test/macos.yml b/.gitlab/source_test/macos.yml index 76adde53972bf..fe93fb8860e5b 100644 --- a/.gitlab/source_test/macos.yml +++ b/.gitlab/source_test/macos.yml @@ -11,7 +11,6 @@ tests_macos: variables: PYTHON_RUNTIMES: "3" script: - - source /root/.bashrc - !reference [.setup_macos_github_app] - $S3_CP_CMD $S3_ARTIFACTS_URI/agent-version.cache . - export VERSION_CACHE_CONTENT=$(cat agent-version.cache | base64 -) @@ -43,7 +42,6 @@ lint_macos: PYTHON_RUNTIMES: "3" timeout: 6h script: - - source /root/.bashrc - !reference [.setup_macos_github_app] - $S3_CP_CMD $S3_ARTIFACTS_URI/agent-version.cache . - export VERSION_CACHE_CONTENT=$(cat agent-version.cache | base64 -) diff --git a/.gitlab/source_test/notify.yml b/.gitlab/source_test/notify.yml index aee23f82cfae7..097a12f564aff 100644 --- a/.gitlab/source_test/notify.yml +++ b/.gitlab/source_test/notify.yml @@ -7,7 +7,6 @@ unit_tests_notify: - !reference [.except_disable_unit_tests] - when: always script: - - source /root/.bashrc - python3 -m pip install -r tasks/libs/requirements-github.txt - !reference [.setup_agent_github_app] - inv notify.unit-tests --pipeline-id $CI_PIPELINE_ID --pipeline-url $CI_PIPELINE_URL --branch-name $CI_COMMIT_REF_NAME diff --git a/.gitlab/source_test/slack.yml b/.gitlab/source_test/slack.yml index 920d88bed12ff..5d357a98d2446 100644 --- a/.gitlab/source_test/slack.yml +++ b/.gitlab/source_test/slack.yml @@ -9,6 +9,5 @@ slack_teams_channels_check: - !reference [.except_mergequeue] - when: on_success script: - - source /root/.bashrc - python3 -m pip install codeowners -c tasks/libs/requirements-notifications.txt - inv -e notify.check-teams diff --git a/.gitlab/source_test/technical_linters.yml b/.gitlab/source_test/technical_linters.yml index 5bf8f5fe25518..c7759eda331bc 100644 --- a/.gitlab/source_test/technical_linters.yml +++ b/.gitlab/source_test/technical_linters.yml @@ -4,7 +4,6 @@ lint_python: tags: ["arch:amd64"] needs: [] script: - - source /root/.bashrc - inv -e linter.python lint_update_go: @@ -13,7 +12,6 @@ lint_update_go: tags: ["arch:amd64"] needs: [] script: - - source /root/.bashrc - inv -e linter.update-go validate_modules: @@ -22,6 +20,5 @@ validate_modules: tags: ["arch:amd64"] needs: [] script: - - source /root/.bashrc - inv -e modules.validate - inv -e modules.validate-used-by-otel diff --git a/.gitlab/source_test/tooling_unit_tests.yml b/.gitlab/source_test/tooling_unit_tests.yml index 25419b084d120..e7a7ab4e1c133 100644 --- a/.gitlab/source_test/tooling_unit_tests.yml +++ b/.gitlab/source_test/tooling_unit_tests.yml @@ -8,7 +8,6 @@ invoke_unit_tests: rules: - !reference [.on_invoke_tasks_changes] script: - - source /root/.bashrc - python3 -m pip install -r tasks/libs/requirements-github.txt - inv -e invoke-unit-tests.run @@ -20,7 +19,6 @@ kitchen_invoke_unit_tests: rules: - !reference [.on_kitchen_invoke_tasks_changes] script: - - source /root/.bashrc - python3 -m pip install -r tasks/libs/requirements-github.txt - pushd test/kitchen - inv -e kitchen.invoke-unit-tests diff --git a/.gitlab/trigger_release/trigger_release.yml b/.gitlab/trigger_release/trigger_release.yml index 11b204e7c7a9c..5084298b2adc0 100644 --- a/.gitlab/trigger_release/trigger_release.yml +++ b/.gitlab/trigger_release/trigger_release.yml @@ -18,7 +18,6 @@ script: # agent-release-management creates pipeline for both Agent 6 and Agent 7 # when triggered with major version 7 - - source /root/.bashrc - export RELEASE_VERSION=$(inv agent.version --major-version 7 --url-safe --omnibus-format)-1 - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME) - 'inv pipeline.trigger-child-pipeline --project-name "DataDog/agent-release-management" --git-ref "main" diff --git a/tools/ci/aws_ssm_get_wrapper.sh b/tools/ci/aws_ssm_get_wrapper.sh index 2fb8298145d9b..d7b8406a7c458 100755 --- a/tools/ci/aws_ssm_get_wrapper.sh +++ b/tools/ci/aws_ssm_get_wrapper.sh @@ -4,9 +4,6 @@ retry_count=0 max_retries=10 parameter_name="$1" -# shellcheck disable=SC1091 -source /root/.bashrc > /dev/null 2>&1 - set +x while [[ $retry_count -lt $max_retries ]]; do diff --git a/tools/ci/junit_upload.sh b/tools/ci/junit_upload.sh index 8d13895f0e912..68f839b21f0c6 100755 --- a/tools/ci/junit_upload.sh +++ b/tools/ci/junit_upload.sh @@ -1,6 +1,5 @@ #!/bin/bash # shellcheck source=/dev/null -source /root/.bashrc # junit file name can differ in kitchen or macos context junit_files="junit-*.tgz" if [[ -n "$1" ]]; then From f54fcda7c5c047aee8edfb85c63e2346a241a52c Mon Sep 17 00:00:00 2001 From: Kevin Fairise <132568982+KevinFairise2@users.noreply.github.com> Date: Fri, 6 Sep 2024 16:58:21 +0200 Subject: [PATCH 056/128] Bump dogstatsd max size (#29109) --- tasks/dogstatsd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tasks/dogstatsd.py b/tasks/dogstatsd.py index 874eb4974341d..6373af271bf0b 100644 --- a/tasks/dogstatsd.py +++ b/tasks/dogstatsd.py @@ -18,7 +18,7 @@ # constants DOGSTATSD_BIN_PATH = os.path.join(".", "bin", "dogstatsd") STATIC_BIN_PATH = os.path.join(".", "bin", "static") -MAX_BINARY_SIZE = 42 * 1024 +MAX_BINARY_SIZE = 44 * 1024 DOGSTATSD_TAG = "datadog/dogstatsd:master" From 66f36bbf52a27ce23fee8ee4ba1dd63ee636b84a Mon Sep 17 00:00:00 2001 From: Vincent Whitchurch Date: Fri, 6 Sep 2024 17:22:23 +0200 Subject: [PATCH 057/128] discovery: Ignore ephemeral UDP ports (#29106) --- .../servicediscovery/module/impl_linux.go | 28 +++++++++++++---- .../module/impl_linux_test.go | 30 ++++++++++++------- 2 files changed, 43 insertions(+), 15 deletions(-) diff --git a/pkg/collector/corechecks/servicediscovery/module/impl_linux.go b/pkg/collector/corechecks/servicediscovery/module/impl_linux.go index 2500353ccc052..d0678edecf891 100644 --- a/pkg/collector/corechecks/servicediscovery/module/impl_linux.go +++ b/pkg/collector/corechecks/servicediscovery/module/impl_linux.go @@ -28,6 +28,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/model" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/usm" "github.com/DataDog/datadog-agent/pkg/languagedetection/privileged" + "github.com/DataDog/datadog-agent/pkg/network" "github.com/DataDog/datadog-agent/pkg/process/procutil" "github.com/DataDog/datadog-agent/pkg/util/kernel" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -230,7 +231,7 @@ func parseNetIPSocketLine(fields []string, expectedState uint64) (uint64, uint16 // newNetIPSocket reads the content of the provided file and returns a map of socket inodes to ports. // Based on newNetIPSocket() in net_ip_socket.go from github.com/prometheus/procfs -func newNetIPSocket(file string, expectedState uint64) (map[uint64]uint16, error) { +func newNetIPSocket(file string, expectedState uint64, shouldIgnore func(uint16) bool) (map[uint64]uint16, error) { f, err := os.Open(file) if err != nil { return nil, err @@ -248,6 +249,11 @@ func newNetIPSocket(file string, expectedState uint64) (map[uint64]uint16, error if err != nil { continue } + + if shouldIgnore != nil && shouldIgnore(port) { + continue + } + netIPSocket[inode] = port } if err := s.Err(); err != nil { @@ -260,19 +266,31 @@ func newNetIPSocket(file string, expectedState uint64) (map[uint64]uint16, error // protocols for the provided namespace. Based on snapshotBoundSockets() in // pkg/security/security_profile/activity_tree/process_node_snapshot.go. func getNsInfo(pid int) (*namespaceInfo, error) { - tcp, err := newNetIPSocket(kernel.HostProc(fmt.Sprintf("%d/net/tcp", pid)), tcpListen) + // Don't ignore ephemeral ports on TCP, unlike on UDP (see below). + var noIgnore func(uint16) bool + tcp, err := newNetIPSocket(kernel.HostProc(fmt.Sprintf("%d/net/tcp", pid)), tcpListen, noIgnore) if err != nil { log.Debugf("couldn't snapshot TCP sockets: %v", err) } - udp, err := newNetIPSocket(kernel.HostProc(fmt.Sprintf("%d/net/udp", pid)), udpListen) + udp, err := newNetIPSocket(kernel.HostProc(fmt.Sprintf("%d/net/udp", pid)), udpListen, + func(port uint16) bool { + // As in NPM (see initializePortBind() in + // pkg/network/tracer/connection): Ignore ephemeral port binds on + // UDP as they are more likely to be from clients calling bind with + // port 0. + return network.IsPortInEphemeralRange(network.AFINET, network.UDP, port) == network.EphemeralTrue + }) if err != nil { log.Debugf("couldn't snapshot UDP sockets: %v", err) } - tcpv6, err := newNetIPSocket(kernel.HostProc(fmt.Sprintf("%d/net/tcp6", pid)), tcpListen) + tcpv6, err := newNetIPSocket(kernel.HostProc(fmt.Sprintf("%d/net/tcp6", pid)), tcpListen, noIgnore) if err != nil { log.Debugf("couldn't snapshot TCP6 sockets: %v", err) } - udpv6, err := newNetIPSocket(kernel.HostProc(fmt.Sprintf("%d/net/udp6", pid)), udpListen) + udpv6, err := newNetIPSocket(kernel.HostProc(fmt.Sprintf("%d/net/udp6", pid)), udpListen, + func(port uint16) bool { + return network.IsPortInEphemeralRange(network.AFINET6, network.UDP, port) == network.EphemeralTrue + }) if err != nil { log.Debugf("couldn't snapshot UDP6 sockets: %v", err) } diff --git a/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go b/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go index 1ae02c62fee82..8020f41750457 100644 --- a/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go +++ b/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go @@ -44,6 +44,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/apm" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/language" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/model" + "github.com/DataDog/datadog-agent/pkg/network" "github.com/DataDog/datadog-agent/pkg/network/protocols/http/testutil" protocolUtils "github.com/DataDog/datadog-agent/pkg/network/protocols/testutil" "github.com/DataDog/datadog-agent/pkg/network/protocols/tls/nodejs" @@ -135,8 +136,8 @@ func startTCPClient(t *testing.T, proto string, server *net.TCPAddr) (*os.File, return f, client.LocalAddr().(*net.TCPAddr) } -func startUDPServer(t *testing.T, proto string) (*os.File, *net.UDPAddr) { - lnPacket, err := net.ListenPacket(proto, "") +func startUDPServer(t *testing.T, proto string, address string) (*os.File, *net.UDPAddr) { + lnPacket, err := net.ListenPacket(proto, address) require.NoError(t, err) t.Cleanup(func() { _ = lnPacket.Close() }) @@ -200,7 +201,7 @@ func TestBasic(t *testing.T) { } var startUDP = func(proto string) { - f, server := startUDPServer(t, proto) + f, server := startUDPServer(t, proto, ":8083") cmd := startProcessWithFile(t, f) expectedPIDs = append(expectedPIDs, cmd.Process.Pid) expectedPorts[cmd.Process.Pid] = server.Port @@ -251,13 +252,17 @@ func TestPorts(t *testing.T) { } var startUDP = func(proto string) { - serverf, server := startUDPServer(t, proto) + serverf, server := startUDPServer(t, proto, ":8083") t.Cleanup(func() { _ = serverf.Close() }) clientf, client := startUDPClient(t, proto, server) t.Cleanup(func() { clientf.Close() }) expectedPorts = append(expectedPorts, uint16(server.Port)) unexpectedPorts = append(unexpectedPorts, uint16(client.Port)) + + ephemeralf, ephemeral := startUDPServer(t, proto, "") + t.Cleanup(func() { _ = ephemeralf.Close() }) + unexpectedPorts = append(unexpectedPorts, uint16(ephemeral.Port)) } startTCP("tcp4") @@ -916,12 +921,17 @@ func BenchmarkOldGetSockets(b *testing.B) { } // addSockets adds only listening sockets to a map to be used for later looksups. -func addSockets[P procfs.NetTCP | procfs.NetUDP](sockMap map[uint64]socketInfo, sockets P, state uint64) { +func addSockets[P procfs.NetTCP | procfs.NetUDP](sockMap map[uint64]socketInfo, sockets P, + family network.ConnectionFamily, ctype network.ConnectionType, state uint64) { for _, sock := range sockets { if sock.St != state { continue } - sockMap[sock.Inode] = socketInfo{port: uint16(sock.LocalPort)} + port := uint16(sock.LocalPort) + if state == udpListen && network.IsPortInEphemeralRange(family, ctype, port) == network.EphemeralTrue { + continue + } + sockMap[sock.Inode] = socketInfo{port: port} } } @@ -939,10 +949,10 @@ func getNsInfoOld(pid int) (*namespaceInfo, error) { listeningSockets := make(map[uint64]socketInfo) - addSockets(listeningSockets, TCP, tcpListen) - addSockets(listeningSockets, TCP6, tcpListen) - addSockets(listeningSockets, UDP, udpListen) - addSockets(listeningSockets, UDP6, udpListen) + addSockets(listeningSockets, TCP, network.AFINET, network.TCP, tcpListen) + addSockets(listeningSockets, TCP6, network.AFINET6, network.TCP, tcpListen) + addSockets(listeningSockets, UDP, network.AFINET, network.UDP, udpListen) + addSockets(listeningSockets, UDP6, network.AFINET6, network.UDP, udpListen) return &namespaceInfo{ listeningSockets: listeningSockets, From 017279b47d44cbeae09d3c9dcc760a2751929bcb Mon Sep 17 00:00:00 2001 From: Kevin Fairise <132568982+KevinFairise2@users.noreply.github.com> Date: Fri, 6 Sep 2024 17:25:25 +0200 Subject: [PATCH 058/128] Add dogstatsd check size in MQ (#29112) --- .gitlab/binary_build/linux.yml | 3 --- .gitlab/integration_test/dogstatsd.yml | 3 --- 2 files changed, 6 deletions(-) diff --git a/.gitlab/binary_build/linux.yml b/.gitlab/binary_build/linux.yml index 50333acc2321e..56ae035b566e4 100644 --- a/.gitlab/binary_build/linux.yml +++ b/.gitlab/binary_build/linux.yml @@ -1,9 +1,6 @@ --- build_dogstatsd_static-binary_x64: stage: binary_build - rules: - - !reference [.except_mergequeue] - - when: on_success image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:amd64"] needs: ["lint_linux-x64", "go_deps"] diff --git a/.gitlab/integration_test/dogstatsd.yml b/.gitlab/integration_test/dogstatsd.yml index 6cce87ab2a04c..ab1862d716dae 100644 --- a/.gitlab/integration_test/dogstatsd.yml +++ b/.gitlab/integration_test/dogstatsd.yml @@ -4,9 +4,6 @@ dogstatsd_x64_size_test: stage: integration_test - rules: - - !reference [.except_mergequeue] - - when: on_success image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:amd64"] needs: ["build_dogstatsd_static-binary_x64"] From 7f9eae20418557606d5b18652a1703b1dfba206c Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Fri, 6 Sep 2024 18:24:45 +0200 Subject: [PATCH 059/128] [CWS] Fix TestWriteFileEventWithCreate (#29079) --- pkg/security/tests/file_windows_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/security/tests/file_windows_test.go b/pkg/security/tests/file_windows_test.go index 491fa3ea6cab4..15b7c57e7b0a3 100644 --- a/pkg/security/tests/file_windows_test.go +++ b/pkg/security/tests/file_windows_test.go @@ -223,7 +223,7 @@ func TestWriteFileEventWithCreate(t *testing.T) { } return f.Close() }, test.validateFileEvent(t, noWrapperType, func(event *model.Event, rule *rules.Rule) { - assertFieldEqualCaseInsensitve(t, event, "write.file.name", "test.bad", event, "write.file.name file didn't match") + assertFieldEqualCaseInsensitve(t, event, "write.file.name", "test.bad", "write.file.name file didn't match") })) }) } From 93fd55814de014e54de8ff50c4105267a79b88ae Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Fri, 6 Sep 2024 19:03:20 +0200 Subject: [PATCH 060/128] [CWS] Disable enforcement on remote configuration loss (#29093) --- pkg/config/remote/client/client.go | 71 ++++++++++++++++++++++++---- pkg/fleet/daemon/daemon_test.go | 7 ++- pkg/fleet/daemon/remote_config.go | 4 +- pkg/security/probe/probe.go | 6 +++ pkg/security/probe/probe_ebpf.go | 5 ++ pkg/security/probe/probe_ebpfless.go | 5 ++ pkg/security/probe/probe_others.go | 3 ++ pkg/security/probe/probe_windows.go | 5 ++ pkg/security/probe/process_killer.go | 17 +++++++ pkg/security/rconfig/policies.go | 22 +++++++-- pkg/security/rules/engine.go | 11 ++++- 11 files changed, 135 insertions(+), 21 deletions(-) diff --git a/pkg/config/remote/client/client.go b/pkg/config/remote/client/client.go index a97cecda5f899..124ce3de40f59 100644 --- a/pkg/config/remote/client/client.go +++ b/pkg/config/remote/client/client.go @@ -46,6 +46,12 @@ type ConfigFetcher interface { ClientGetConfigs(context.Context, *pbgo.ClientGetConfigsRequest) (*pbgo.ClientGetConfigsResponse, error) } +// Listener defines the interface of a remote config listener +type Listener interface { + OnUpdate(map[string]state.RawConfig, func(cfgPath string, status state.ApplyStatus)) + OnStateChange(bool) +} + // fetchConfigs defines the function that an agent client uses to get config updates type fetchConfigs func(context.Context, *pbgo.ClientGetConfigsRequest, ...grpc.CallOption) (*pbgo.ClientGetConfigsResponse, error) @@ -69,7 +75,7 @@ type Client struct { state *state.Repository - listeners map[string][]func(update map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus)) + listeners map[string][]Listener // Elements that can be changed during the execution of listeners // They are atomics so that they don't have to share the top-level mutex @@ -160,9 +166,6 @@ func (g *agentGRPCConfigFetcher) ClientGetConfigs(ctx context.Context, request * return g.fetchConfigs(ctx, request) } -// Handler is a function that is called when a config update is received. -type Handler func(update map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus)) - // NewClient creates a new client func NewClient(updater ConfigFetcher, opts ...func(o *Options)) (*Client, error) { return newClient(updater, opts...) @@ -289,7 +292,7 @@ func newClient(cf ConfigFetcher, opts ...func(opts *Options)) (*Client, error) { installerState: installerState, state: repository, backoffPolicy: backoffPolicy, - listeners: make(map[string][]func(update map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus))), + listeners: make(map[string][]Listener), configFetcher: cf, }, nil } @@ -324,8 +327,8 @@ func (c *Client) SetAgentName(agentName string) { } } -// Subscribe subscribes to config updates of a product. -func (c *Client) Subscribe(product string, fn func(update map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus))) { +// SubscribeAll subscribes to all events (config updates, state changed, ...) +func (c *Client) SubscribeAll(product string, listener Listener) { c.m.Lock() defer c.m.Unlock() @@ -341,7 +344,12 @@ func (c *Client) Subscribe(product string, fn func(update map[string]state.RawCo c.products = append(c.products, product) } - c.listeners[product] = append(c.listeners[product], fn) + c.listeners[product] = append(c.listeners[product], listener) +} + +// Subscribe subscribes to config updates of a product. +func (c *Client) Subscribe(product string, cb func(update map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus))) { + c.SubscribeAll(product, NewUpdateListener(cb)) } // GetConfigs returns the current configs applied of a product. @@ -428,11 +436,29 @@ func (c *Client) pollLoop() { log.Infof("retrying the first update of remote-config state (%v)", err) } } else { + c.m.Lock() + for _, productListeners := range c.listeners { + for _, listener := range productListeners { + listener.OnStateChange(false) + } + } + c.m.Unlock() + c.lastUpdateError = err c.backoffErrorCount = c.backoffPolicy.IncError(c.backoffErrorCount) log.Errorf("could not update remote-config state: %v", c.lastUpdateError) } } else { + if c.lastUpdateError != nil { + c.m.Lock() + for _, productListeners := range c.listeners { + for _, listener := range productListeners { + listener.OnStateChange(true) + } + } + c.m.Unlock() + } + c.lastUpdateError = nil successfulFirstRun = true c.backoffErrorCount = c.backoffPolicy.DecError(c.backoffErrorCount) @@ -470,7 +496,7 @@ func (c *Client) update() error { for product, productListeners := range c.listeners { if containsProduct(changedProducts, product) { for _, listener := range productListeners { - listener(c.state.GetConfigs(product), c.state.UpdateApplyStatus) + listener.OnUpdate(c.state.GetConfigs(product), c.state.UpdateApplyStatus) } } } @@ -594,6 +620,33 @@ func (c *Client) newUpdateRequest() (*pbgo.ClientGetConfigsRequest, error) { return req, nil } +type listener struct { + onUpdate func(map[string]state.RawConfig, func(cfgPath string, status state.ApplyStatus)) + onStateChange func(bool) +} + +func (l *listener) OnUpdate(configs map[string]state.RawConfig, cb func(cfgPath string, status state.ApplyStatus)) { + if l.onUpdate != nil { + l.onUpdate(configs, cb) + } +} + +func (l *listener) OnStateChange(state bool) { + if l.onStateChange != nil { + l.onStateChange(state) + } +} + +// NewUpdateListener creates a remote config listener from a update callback +func NewUpdateListener(onUpdate func(updates map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus))) Listener { + return &listener{onUpdate: onUpdate} +} + +// NewListener creates a remote config listener from a couple of update and state change callbacks +func NewListener(onUpdate func(updates map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus)), onStateChange func(bool)) Listener { + return &listener{onUpdate: onUpdate, onStateChange: onStateChange} +} + var ( idSize = 21 idAlphabet = []rune("_-0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") diff --git a/pkg/fleet/daemon/daemon_test.go b/pkg/fleet/daemon/daemon_test.go index 4e3d0941553ee..1aa78c95d4b4e 100644 --- a/pkg/fleet/daemon/daemon_test.go +++ b/pkg/fleet/daemon/daemon_test.go @@ -20,7 +20,6 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config/remote/client" "github.com/DataDog/datadog-agent/pkg/fleet/env" "github.com/DataDog/datadog-agent/pkg/fleet/installer/repository" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" @@ -104,13 +103,13 @@ func (m *testPackageManager) UninstrumentAPMInjector(ctx context.Context, method type testRemoteConfigClient struct { sync.Mutex t *testing.T - listeners map[string][]client.Handler + listeners map[string][]func(map[string]state.RawConfig, func(cfgPath string, status state.ApplyStatus)) } func newTestRemoteConfigClient(t *testing.T) *testRemoteConfigClient { return &testRemoteConfigClient{ t: t, - listeners: make(map[string][]client.Handler), + listeners: make(map[string][]func(map[string]state.RawConfig, func(cfgPath string, status state.ApplyStatus))), } } @@ -123,7 +122,7 @@ func (c *testRemoteConfigClient) Close() { func (c *testRemoteConfigClient) Subscribe(product string, fn func(update map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus))) { c.Lock() defer c.Unlock() - c.listeners[product] = append(c.listeners[product], client.Handler(fn)) + c.listeners[product] = append(c.listeners[product], fn) } func (c *testRemoteConfigClient) SetInstallerState(_ []*pbgo.PackageState) { diff --git a/pkg/fleet/daemon/remote_config.go b/pkg/fleet/daemon/remote_config.go index 9509185899ccd..d68b8489ad62e 100644 --- a/pkg/fleet/daemon/remote_config.go +++ b/pkg/fleet/daemon/remote_config.go @@ -100,7 +100,7 @@ func (c *catalog) getPackage(pkg string, version string, arch string, platform s type handleCatalogUpdate func(catalog catalog) error -func handleUpdaterCatalogDDUpdate(h handleCatalogUpdate, firstCatalogApplied func()) client.Handler { +func handleUpdaterCatalogDDUpdate(h handleCatalogUpdate, firstCatalogApplied func()) func(map[string]state.RawConfig, func(cfgPath string, status state.ApplyStatus)) { var catalogOnce sync.Once return func(catalogConfigs map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus)) { var mergedCatalog catalog @@ -192,7 +192,7 @@ type taskWithVersionParams struct { type handleRemoteAPIRequest func(request remoteAPIRequest) error -func handleUpdaterTaskUpdate(h handleRemoteAPIRequest) client.Handler { +func handleUpdaterTaskUpdate(h handleRemoteAPIRequest) func(map[string]state.RawConfig, func(cfgPath string, status state.ApplyStatus)) { var executedRequests = make(map[string]struct{}) return func(requestConfigs map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus)) { requests := map[string]remoteAPIRequest{} diff --git a/pkg/security/probe/probe.go b/pkg/security/probe/probe.go index 549f56d5c2a0c..00e5d62d53b7a 100644 --- a/pkg/security/probe/probe.go +++ b/pkg/security/probe/probe.go @@ -55,6 +55,7 @@ type PlatformProbe interface { AddDiscarderPushedCallback(_ DiscarderPushedCallback) GetEventTags(_ string) []string GetProfileManager() interface{} + EnableEnforcement(bool) } // EventHandler represents a handler for events sent by the probe that needs access to all the fields in the SECL model @@ -397,3 +398,8 @@ func (p *Probe) IsActivityDumpTagRulesEnabled() bool { func (p *Probe) IsSecurityProfileEnabled() bool { return p.Config.RuntimeSecurity.SecurityProfileEnabled } + +// EnableEnforcement sets the enforcement mode +func (p *Probe) EnableEnforcement(state bool) { + p.PlatformProbe.EnableEnforcement(state) +} diff --git a/pkg/security/probe/probe_ebpf.go b/pkg/security/probe/probe_ebpf.go index 86be48d856c76..61d25cdfd16d2 100644 --- a/pkg/security/probe/probe_ebpf.go +++ b/pkg/security/probe/probe_ebpf.go @@ -1628,6 +1628,11 @@ func (p *EBPFProbe) DumpProcessCache(withArgs bool) (string, error) { return p.Resolvers.ProcessResolver.ToDot(withArgs) } +// EnableEnforcement sets the enforcement mode +func (p *EBPFProbe) EnableEnforcement(state bool) { + p.processKiller.SetState(state) +} + // NewEBPFProbe instantiates a new runtime security agent probe func NewEBPFProbe(probe *Probe, config *config.Config, opts Opts, wmeta workloadmeta.Component, telemetry telemetry.Component) (*EBPFProbe, error) { nerpc, err := erpc.NewERPC() diff --git a/pkg/security/probe/probe_ebpfless.go b/pkg/security/probe/probe_ebpfless.go index 492abb05f42b6..e11e94969ab29 100644 --- a/pkg/security/probe/probe_ebpfless.go +++ b/pkg/security/probe/probe_ebpfless.go @@ -633,6 +633,11 @@ func (p *EBPFLessProbe) zeroEvent() *model.Event { return p.event } +// EnableEnforcement sets the enforcement mode +func (p *EBPFLessProbe) EnableEnforcement(state bool) { + p.processKiller.SetState(state) +} + // NewEBPFLessProbe returns a new eBPF less probe func NewEBPFLessProbe(probe *Probe, config *config.Config, opts Opts, telemetry telemetry.Component) (*EBPFLessProbe, error) { opts.normalize() diff --git a/pkg/security/probe/probe_others.go b/pkg/security/probe/probe_others.go index 4e37810f70d5a..099b646d32bd4 100644 --- a/pkg/security/probe/probe_others.go +++ b/pkg/security/probe/probe_others.go @@ -107,3 +107,6 @@ func (p *Probe) RefreshUserCache(_ string) error { // HandleActions executes the actions of a triggered rule func (p *Probe) HandleActions(_ *rules.Rule, _ eval.Event) {} + +// EnableEnforcement sets the enforcement mode +func (p *Probe) EnableEnforcement(_ bool) {} diff --git a/pkg/security/probe/probe_windows.go b/pkg/security/probe/probe_windows.go index 0bff0416519fe..00998e6200b55 100644 --- a/pkg/security/probe/probe_windows.go +++ b/pkg/security/probe/probe_windows.go @@ -1386,6 +1386,11 @@ func (p *Probe) Origin() string { return "" } +// EnableEnforcement sets the enforcement mode +func (p *WindowsProbe) EnableEnforcement(state bool) { + p.processKiller.SetState(state) +} + // NewProbe instantiates a new runtime security agent probe func NewProbe(config *config.Config, opts Opts, _ workloadmeta.Component, telemetry telemetry.Component) (*Probe, error) { opts.normalize() diff --git a/pkg/security/probe/process_killer.go b/pkg/security/probe/process_killer.go index 9af3dd54c1975..b2937e45560a9 100644 --- a/pkg/security/probe/process_killer.go +++ b/pkg/security/probe/process_killer.go @@ -36,6 +36,7 @@ type ProcessKiller struct { cfg *config.Config + enabled bool pendingReports []*KillActionReport binariesExcluded []*eval.Glob sourceAllowed []string @@ -48,6 +49,7 @@ type ProcessKiller struct { func NewProcessKiller(cfg *config.Config) (*ProcessKiller, error) { p := &ProcessKiller{ cfg: cfg, + enabled: true, ruleDisarmers: make(map[rules.RuleID]*killDisarmer), sourceAllowed: cfg.RuntimeSecurity.EnforcementRuleSourceAllowed, } @@ -66,6 +68,14 @@ func NewProcessKiller(cfg *config.Config) (*ProcessKiller, error) { return p, nil } +// SetState sets the state - enabled or disabled - for the process killer +func (p *ProcessKiller) SetState(enabled bool) { + p.Lock() + defer p.Unlock() + + p.enabled = enabled +} + // AddPendingReports add a pending reports func (p *ProcessKiller) AddPendingReports(report *KillActionReport) { p.Lock() @@ -110,6 +120,13 @@ func (p *ProcessKiller) HandleProcessExited(event *model.Event) { } func (p *ProcessKiller) isKillAllowed(pids []uint32, paths []string) bool { + p.Lock() + if !p.enabled { + p.Unlock() + return false + } + p.Unlock() + for i, pid := range pids { if pid <= 1 || pid == utils.Getpid() { return false diff --git a/pkg/security/rconfig/policies.go b/pkg/security/rconfig/policies.go index c9bed099acaff..3d8576481e07a 100644 --- a/pkg/security/rconfig/policies.go +++ b/pkg/security/rconfig/policies.go @@ -41,12 +41,13 @@ type RCPolicyProvider struct { lastCustoms map[string]state.RawConfig debouncer *debouncer.Debouncer dumpPolicies bool + setEnforcementCb func(bool) } var _ rules.PolicyProvider = (*RCPolicyProvider)(nil) // NewRCPolicyProvider returns a new Remote Config based policy provider -func NewRCPolicyProvider(dumpPolicies bool) (*RCPolicyProvider, error) { +func NewRCPolicyProvider(dumpPolicies bool, setEnforcementCallback func(bool)) (*RCPolicyProvider, error) { agentVersion, err := utils.GetAgentSemverVersion() if err != nil { return nil, fmt.Errorf("failed to parse agent version: %w", err) @@ -68,8 +69,9 @@ func NewRCPolicyProvider(dumpPolicies bool) (*RCPolicyProvider, error) { } r := &RCPolicyProvider{ - client: c, - dumpPolicies: dumpPolicies, + client: c, + dumpPolicies: dumpPolicies, + setEnforcementCb: setEnforcementCallback, } r.debouncer = debouncer.New(debounceDelay, r.onNewPoliciesReady) @@ -82,12 +84,18 @@ func (r *RCPolicyProvider) Start() { r.debouncer.Start() - r.client.Subscribe(state.ProductCWSDD, r.rcDefaultsUpdateCallback) - r.client.Subscribe(state.ProductCWSCustom, r.rcCustomsUpdateCallback) + r.client.SubscribeAll(state.ProductCWSDD, client.NewListener(r.rcDefaultsUpdateCallback, r.rcStateChanged)) + r.client.SubscribeAll(state.ProductCWSCustom, client.NewListener(r.rcCustomsUpdateCallback, r.rcStateChanged)) r.client.Start() } +func (r *RCPolicyProvider) rcStateChanged(state bool) { + if r.setEnforcementCb != nil { + r.setEnforcementCb(state) + } +} + func (r *RCPolicyProvider) rcDefaultsUpdateCallback(configs map[string]state.RawConfig, _ func(string, state.ApplyStatus)) { r.Lock() if len(r.lastDefaults) == 0 && len(configs) == 0 { @@ -191,6 +199,10 @@ func (r *RCPolicyProvider) onNewPoliciesReady() { defer r.RUnlock() if r.onNewPoliciesReadyCb != nil { + if r.setEnforcementCb != nil { + r.setEnforcementCb(true) + } + r.onNewPoliciesReadyCb() } } diff --git a/pkg/security/rules/engine.go b/pkg/security/rules/engine.go index 67e48fa8845b2..03ee84af37c56 100644 --- a/pkg/security/rules/engine.go +++ b/pkg/security/rules/engine.go @@ -360,7 +360,7 @@ func (e *RuleEngine) gatherDefaultPolicyProviders() []rules.PolicyProvider { // add remote config as config provider if enabled. if e.config.RemoteConfigurationEnabled { - rcPolicyProvider, err := rconfig.NewRCPolicyProvider(e.config.RemoteConfigurationDumpPolicies) + rcPolicyProvider, err := rconfig.NewRCPolicyProvider(e.config.RemoteConfigurationDumpPolicies, e.rcStateCallback) if err != nil { seclog.Errorf("will be unable to load remote policies: %s", err) } else { @@ -378,6 +378,15 @@ func (e *RuleEngine) gatherDefaultPolicyProviders() []rules.PolicyProvider { return policyProviders } +func (e *RuleEngine) rcStateCallback(state bool) { + if state { + seclog.Infof("Connection to remote config established") + } else { + seclog.Infof("Connection to remote config lost") + } + e.probe.EnableEnforcement(state) +} + // EventDiscarderFound is called by the ruleset when a new discarder discovered func (e *RuleEngine) EventDiscarderFound(rs *rules.RuleSet, event eval.Event, field eval.Field, eventType eval.EventType) { if e.reloading.Load() { From 378550466bd1b33466c564006b3bf9afd148c9bd Mon Sep 17 00:00:00 2001 From: Yoann Ghigoff Date: Fri, 6 Sep 2024 19:04:25 +0200 Subject: [PATCH 061/128] [CWS] Add enforcement metrics (#29111) --- pkg/security/metrics/metrics.go | 15 ++++ pkg/security/probe/probe_ebpf.go | 2 + pkg/security/probe/probe_ebpfless.go | 1 + pkg/security/probe/probe_windows.go | 3 + pkg/security/probe/process_killer.go | 110 +++++++++++++++++++++++---- 5 files changed, 118 insertions(+), 13 deletions(-) diff --git a/pkg/security/metrics/metrics.go b/pkg/security/metrics/metrics.go index 9213c0c4ff739..481f0d16bfaea 100644 --- a/pkg/security/metrics/metrics.go +++ b/pkg/security/metrics/metrics.go @@ -331,6 +331,21 @@ var ( // Tags: - MetricRulesStatus = newRuntimeMetric(".rules_status") + // Enforcement metrics + + // MetricEnforcementKillActionPerformed is the name of the metric used to report that a kill action was performed + // Tags: rule_id + MetricEnforcementKillActionPerformed = newRuntimeMetric(".enforcement.kill_action_performed") + // MetricEnforcementProcessKilled is the name of the metric used to report the number of processes killed + // Tags: rule_id + MetricEnforcementProcessKilled = newRuntimeMetric(".enforcement.process_killed") + // MetricEnforcementRuleDisarmed is the name of the metric used to report that a rule was disarmed + // Tags: rule_id, disarmer_type ('executable', 'container') + MetricEnforcementRuleDisarmed = newRuntimeMetric(".enforcement.rule_disarmed") + // MetricEnforcementRuleRearmed is the name of the metric used to report that a rule was rearmed + // Tags: rule_id + MetricEnforcementRuleRearmed = newRuntimeMetric(".enforcement.rule_rearmed") + // Others // MetricSelfTest is the name of the metric used to report that a self test was performed diff --git a/pkg/security/probe/probe_ebpf.go b/pkg/security/probe/probe_ebpf.go index 61d25cdfd16d2..ae9a8c70350cc 100644 --- a/pkg/security/probe/probe_ebpf.go +++ b/pkg/security/probe/probe_ebpf.go @@ -467,6 +467,8 @@ func (p *EBPFProbe) DispatchEvent(event *model.Event) { func (p *EBPFProbe) SendStats() error { p.Resolvers.TCResolver.SendTCProgramsStats(p.statsdClient) + p.processKiller.SendStats(p.statsdClient) + if err := p.profileManagers.SendStats(); err != nil { return err } diff --git a/pkg/security/probe/probe_ebpfless.go b/pkg/security/probe/probe_ebpfless.go index e11e94969ab29..b2ee591f4becf 100644 --- a/pkg/security/probe/probe_ebpfless.go +++ b/pkg/security/probe/probe_ebpfless.go @@ -558,6 +558,7 @@ func (p *EBPFLessProbe) NewModel() *model.Model { // SendStats send the stats func (p *EBPFLessProbe) SendStats() error { + p.processKiller.SendStats(p.statsdClient) return nil } diff --git a/pkg/security/probe/probe_windows.go b/pkg/security/probe/probe_windows.go index 00998e6200b55..d1989f88b9f0c 100644 --- a/pkg/security/probe/probe_windows.go +++ b/pkg/security/probe/probe_windows.go @@ -1111,6 +1111,9 @@ func (p *WindowsProbe) SendStats() error { if err != nil { return err } + + p.processKiller.SendStats(p.statsdClient) + return nil } diff --git a/pkg/security/probe/process_killer.go b/pkg/security/probe/process_killer.go index b2937e45560a9..7c9a54d785028 100644 --- a/pkg/security/probe/process_killer.go +++ b/pkg/security/probe/process_killer.go @@ -16,7 +16,10 @@ import ( "github.com/jellydator/ttlcache/v3" + "github.com/DataDog/datadog-go/v5/statsd" + "github.com/DataDog/datadog-agent/pkg/security/config" + "github.com/DataDog/datadog-agent/pkg/security/metrics" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" @@ -27,7 +30,7 @@ import ( const ( defaultKillActionFlushDelay = 2 * time.Second - dismarmerCacheFlushInterval = 5 * time.Second + disarmerCacheFlushInterval = 5 * time.Second ) // ProcessKiller defines a process killer structure @@ -43,6 +46,14 @@ type ProcessKiller struct { ruleDisarmersLock sync.Mutex ruleDisarmers map[rules.RuleID]*killDisarmer + + perRuleStatsLock sync.Mutex + perRuleStats map[rules.RuleID]*processKillerStats +} + +type processKillerStats struct { + actionPerformed int64 + processesKilled int64 } // NewProcessKiller returns a new ProcessKiller @@ -52,6 +63,7 @@ func NewProcessKiller(cfg *config.Config) (*ProcessKiller, error) { enabled: true, ruleDisarmers: make(map[rules.RuleID]*killDisarmer), sourceAllowed: cfg.RuntimeSecurity.EnforcementRuleSourceAllowed, + perRuleStats: make(map[rules.RuleID]*processKillerStats), } binaries := append(binariesExcluded, cfg.RuntimeSecurity.EnforcementBinaryExcluded...) @@ -160,18 +172,18 @@ func (p *ProcessKiller) KillAndReport(scope string, signal string, rule *rules.R rsConfig := p.cfg.RuntimeSecurity if rsConfig.EnforcementDisarmerContainerEnabled || rsConfig.EnforcementDisarmerExecutableEnabled { - var dismarmer *killDisarmer + var disarmer *killDisarmer p.ruleDisarmersLock.Lock() - if dismarmer = p.ruleDisarmers[rule.ID]; dismarmer == nil { - dismarmer = newKillDisarmer(rsConfig, rule.ID) - p.ruleDisarmers[rule.ID] = dismarmer + if disarmer = p.ruleDisarmers[rule.ID]; disarmer == nil { + disarmer = newKillDisarmer(rsConfig, rule.ID) + p.ruleDisarmers[rule.ID] = disarmer } p.ruleDisarmersLock.Unlock() if rsConfig.EnforcementDisarmerContainerEnabled { if containerID := ev.FieldHandlers.ResolveContainerID(ev, ev.ContainerContext); containerID != "" { - if !dismarmer.allow(dismarmer.containerCache, containerID, func() { - seclog.Warnf("disarming kill action of rule `%s` because more than %d different containers triggered it in the last %s", rule.ID, dismarmer.containerCache.capacity, rsConfig.EnforcementDisarmerContainerPeriod) + if !disarmer.allow(disarmer.containerCache, containerDisarmer, containerID, func() { + seclog.Warnf("disarming kill action of rule `%s` because more than %d different containers triggered it in the last %s", rule.ID, disarmer.containerCache.capacity, rsConfig.EnforcementDisarmerContainerPeriod) }) { seclog.Warnf("skipping kill action of rule `%s` because it has been disarmed", rule.ID) return @@ -181,8 +193,8 @@ func (p *ProcessKiller) KillAndReport(scope string, signal string, rule *rules.R if rsConfig.EnforcementDisarmerExecutableEnabled { executable := entry.Process.FileEvent.PathnameStr - if !dismarmer.allow(dismarmer.executableCache, executable, func() { - seclog.Warnf("disarmed kill action of rule `%s` because more than %d different executables triggered it in the last %s", rule.ID, dismarmer.executableCache.capacity, rsConfig.EnforcementDisarmerExecutablePeriod) + if !disarmer.allow(disarmer.executableCache, executableDisarmer, executable, func() { + seclog.Warnf("disarmed kill action of rule `%s` because more than %d different executables triggered it in the last %s", rule.ID, disarmer.executableCache.capacity, rsConfig.EnforcementDisarmerExecutablePeriod) }) { seclog.Warnf("skipping kill action of rule `%s` because it has been disarmed", rule.ID) return @@ -210,15 +222,28 @@ func (p *ProcessKiller) KillAndReport(scope string, signal string, rule *rules.R sig := model.SignalConstants[signal] + var processesKilled int64 killedAt := time.Now() for _, pid := range pids { log.Debugf("requesting signal %s to be sent to %d", signal, pid) if err := killFnc(uint32(pid), uint32(sig)); err != nil { seclog.Debugf("failed to kill process %d: %s", pid, err) + } else { + processesKilled++ } } + p.perRuleStatsLock.Lock() + var stats *processKillerStats + if stats = p.perRuleStats[rule.ID]; stats == nil { + stats = &processKillerStats{} + p.perRuleStats[rule.ID] = stats + } + stats.actionPerformed++ + stats.processesKilled += processesKilled + p.perRuleStatsLock.Unlock() + p.Lock() defer p.Unlock() @@ -237,11 +262,57 @@ func (p *ProcessKiller) KillAndReport(scope string, signal string, rule *rules.R // Reset resets the disarmer state func (p *ProcessKiller) Reset() { + p.perRuleStatsLock.Lock() + clear(p.perRuleStats) + p.perRuleStatsLock.Unlock() p.ruleDisarmersLock.Lock() clear(p.ruleDisarmers) p.ruleDisarmersLock.Unlock() } +// SendStats sends runtime security enforcement statistics to Datadog +func (p *ProcessKiller) SendStats(statsd statsd.ClientInterface) { + p.perRuleStatsLock.Lock() + for ruleID, stats := range p.perRuleStats { + ruleIDTag := []string{ + "rule_id:" + string(ruleID), + } + + if stats.actionPerformed > 0 { + _ = statsd.Count(metrics.MetricEnforcementKillActionPerformed, stats.actionPerformed, ruleIDTag, 1) + stats.actionPerformed = 0 + } + + if stats.processesKilled > 0 { + _ = statsd.Count(metrics.MetricEnforcementProcessKilled, stats.processesKilled, ruleIDTag, 1) + stats.processesKilled = 0 + } + } + p.perRuleStatsLock.Unlock() + + p.ruleDisarmersLock.Lock() + for ruleID, disarmer := range p.ruleDisarmers { + ruleIDTag := []string{ + "rule_id:" + string(ruleID), + } + + disarmer.Lock() + for disarmerType, count := range disarmer.disarmedCount { + if count > 0 { + tags := append([]string{"disarmer_type:" + string(disarmerType)}, ruleIDTag...) + _ = statsd.Count(metrics.MetricEnforcementRuleDisarmed, count, tags, 1) + disarmer.disarmedCount[disarmerType] = 0 + } + } + if disarmer.rearmedCount > 0 { + _ = statsd.Count(metrics.MetricEnforcementRuleRearmed, disarmer.rearmedCount, ruleIDTag, 1) + disarmer.rearmedCount = 0 + } + disarmer.Unlock() + } + p.ruleDisarmersLock.Unlock() +} + // Start starts the go rountine responsible for flushing the disarmer caches func (p *ProcessKiller) Start(ctx context.Context, wg *sync.WaitGroup) { if !p.cfg.RuntimeSecurity.EnforcementEnabled || (!p.cfg.RuntimeSecurity.EnforcementDisarmerContainerEnabled && !p.cfg.RuntimeSecurity.EnforcementDisarmerExecutableEnabled) { @@ -251,7 +322,7 @@ func (p *ProcessKiller) Start(ctx context.Context, wg *sync.WaitGroup) { wg.Add(1) go func() { defer wg.Done() - ticker := time.NewTicker(dismarmerCacheFlushInterval) + ticker := time.NewTicker(disarmerCacheFlushInterval) defer ticker.Stop() for { select { @@ -268,6 +339,7 @@ func (p *ProcessKiller) Start(ctx context.Context, wg *sync.WaitGroup) { } if disarmer.disarmed && cLength == 0 && eLength == 0 { disarmer.disarmed = false + disarmer.rearmedCount++ seclog.Infof("kill action of rule `%s` has been re-armed", disarmer.ruleID) } disarmer.Unlock() @@ -280,12 +352,22 @@ func (p *ProcessKiller) Start(ctx context.Context, wg *sync.WaitGroup) { }() } +type disarmerType string + +const ( + containerDisarmer = disarmerType("container") + executableDisarmer = disarmerType("executable") +) + type killDisarmer struct { sync.Mutex disarmed bool ruleID rules.RuleID containerCache *disarmerCache[string, bool] executableCache *disarmerCache[string, bool] + // stats + disarmedCount map[disarmerType]int64 + rearmedCount int64 } type disarmerCache[K comparable, V any] struct { @@ -315,8 +397,9 @@ func (c *disarmerCache[K, V]) flush() int { func newKillDisarmer(cfg *config.RuntimeSecurityConfig, ruleID rules.RuleID) *killDisarmer { kd := &killDisarmer{ - disarmed: false, - ruleID: ruleID, + disarmed: false, + ruleID: ruleID, + disarmedCount: make(map[disarmerType]int64), } if cfg.EnforcementDisarmerContainerEnabled { @@ -330,7 +413,7 @@ func newKillDisarmer(cfg *config.RuntimeSecurityConfig, ruleID rules.RuleID) *ki return kd } -func (kd *killDisarmer) allow(cache *disarmerCache[string, bool], key string, onDisarm func()) bool { +func (kd *killDisarmer) allow(cache *disarmerCache[string, bool], typ disarmerType, key string, onDisarm func()) bool { kd.Lock() defer kd.Unlock() @@ -350,6 +433,7 @@ func (kd *killDisarmer) allow(cache *disarmerCache[string, bool], key string, on cache.Set(key, true, ttlcache.DefaultTTL) if alreadyAtCapacity && !kd.disarmed { kd.disarmed = true + kd.disarmedCount[typ]++ onDisarm() } } From 897e31f7577e89a0afc9b6b85ac6670d5b63cfcd Mon Sep 17 00:00:00 2001 From: "agent-platform-auto-pr[bot]" <153269286+agent-platform-auto-pr[bot]@users.noreply.github.com> Date: Fri, 6 Sep 2024 17:13:36 +0000 Subject: [PATCH 062/128] [test-infra-definitions][automated] Bump test-infra-definitions to e0fb9ce404ac63e084447f8512d94653c940a063 (#29104) Co-authored-by: agent-platform-auto-pr[bot] <153269286+agent-platform-auto-pr[bot]@users.noreply.github.com> --- .gitlab/common/test_infra_version.yml | 2 +- test/new-e2e/go.mod | 2 +- test/new-e2e/go.sum | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.gitlab/common/test_infra_version.yml b/.gitlab/common/test_infra_version.yml index 6e55837cfb724..f3612f51eea60 100644 --- a/.gitlab/common/test_infra_version.yml +++ b/.gitlab/common/test_infra_version.yml @@ -4,4 +4,4 @@ variables: # and check the job creating the image to make sure you have the right SHA prefix TEST_INFRA_DEFINITIONS_BUILDIMAGES_SUFFIX: "" # Make sure to update test-infra-definitions version in go.mod as well - TEST_INFRA_DEFINITIONS_BUILDIMAGES: c9ee795ec752 + TEST_INFRA_DEFINITIONS_BUILDIMAGES: e0fb9ce404ac diff --git a/test/new-e2e/go.mod b/test/new-e2e/go.mod index 5c978c93c1ca3..41578f1a45b19 100644 --- a/test/new-e2e/go.mod +++ b/test/new-e2e/go.mod @@ -32,7 +32,7 @@ require ( // `TEST_INFRA_DEFINITIONS_BUILDIMAGES` matches the commit sha in the module version // Example: github.com/DataDog/test-infra-definitions v0.0.0-YYYYMMDDHHmmSS-0123456789AB // => TEST_INFRA_DEFINITIONS_BUILDIMAGES: 0123456789AB - github.com/DataDog/test-infra-definitions v0.0.0-20240904143845-c9ee795ec752 + github.com/DataDog/test-infra-definitions v0.0.0-20240906113819-e0fb9ce404ac github.com/aws/aws-sdk-go-v2 v1.30.4 github.com/aws/aws-sdk-go-v2/config v1.27.19 github.com/aws/aws-sdk-go-v2/service/ec2 v1.164.2 diff --git a/test/new-e2e/go.sum b/test/new-e2e/go.sum index 30a72e12faf32..a944c13f2e155 100644 --- a/test/new-e2e/go.sum +++ b/test/new-e2e/go.sum @@ -14,8 +14,8 @@ github.com/DataDog/datadog-api-client-go/v2 v2.27.0 h1:AGZj41frjnjMufQHQbJH2fzmi github.com/DataDog/datadog-api-client-go/v2 v2.27.0/go.mod h1:QKOu6vscsh87fMY1lHfLEmNSunyXImj8BUaUWJXOehc= github.com/DataDog/mmh3 v0.0.0-20200805151601-30884ca2197a h1:m9REhmyaWD5YJ0P53ygRHxKKo+KM+nw+zz0hEdKztMo= github.com/DataDog/mmh3 v0.0.0-20200805151601-30884ca2197a/go.mod h1:SvsjzyJlSg0rKsqYgdcFxeEVflx3ZNAyFfkUHP0TxXg= -github.com/DataDog/test-infra-definitions v0.0.0-20240904143845-c9ee795ec752 h1:J+KnqV0jYvXvcN1LzRiRxYJo/nHGYsnBQ22VXIdAJD8= -github.com/DataDog/test-infra-definitions v0.0.0-20240904143845-c9ee795ec752/go.mod h1:QEQPOdzBcxZly/1KtAPFgF1R7Tp98FajB06gZ75E+/U= +github.com/DataDog/test-infra-definitions v0.0.0-20240906113819-e0fb9ce404ac h1:epY6p93MEB4W6ViKzxPhaFP8iWBybU6G2jWr4h+oHEQ= +github.com/DataDog/test-infra-definitions v0.0.0-20240906113819-e0fb9ce404ac/go.mod h1:QEQPOdzBcxZly/1KtAPFgF1R7Tp98FajB06gZ75E+/U= github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f h1:5Vuo4niPKFkfwW55jV4vY0ih3VQ9RaQqeqY67fvRn8A= From 2a712cb9239863f6d8b5011108f8447e70d159a8 Mon Sep 17 00:00:00 2001 From: Dustin Long Date: Fri, 6 Sep 2024 13:21:11 -0400 Subject: [PATCH 063/128] UnmarshalKey implemented using pure reflection (#28821) --- pkg/config/structure/unmarshal.go | 440 +++++++++++++++++++++++++ pkg/config/structure/unmarshal_test.go | 154 +++++++++ 2 files changed, 594 insertions(+) create mode 100644 pkg/config/structure/unmarshal.go create mode 100644 pkg/config/structure/unmarshal_test.go diff --git a/pkg/config/structure/unmarshal.go b/pkg/config/structure/unmarshal.go new file mode 100644 index 0000000000000..97420043e2268 --- /dev/null +++ b/pkg/config/structure/unmarshal.go @@ -0,0 +1,440 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package structure defines a helper to retrieve structured data from the config +package structure + +import ( + "fmt" + "reflect" + "strings" + "unicode" + "unicode/utf8" + + "github.com/DataDog/datadog-agent/pkg/config/model" +) + +// UnmarshalKey retrieves data from the config at the given key and deserializes it +// to be stored on the target struct. It is implemented entirely using reflection, and +// does not depend upon details of the data model of the config. +// Target struct can use of struct tag of "yaml", "json", or "mapstructure" to rename fields +func UnmarshalKey(cfg model.Reader, key string, target interface{}) error { + source, err := newNode(reflect.ValueOf(cfg.Get(key))) + if err != nil { + return err + } + outValue := reflect.ValueOf(target) + if outValue.Kind() == reflect.Pointer { + outValue = reflect.Indirect(outValue) + } + switch outValue.Kind() { + case reflect.Map: + return copyMap(outValue, source) + case reflect.Struct: + return copyStruct(outValue, source) + case reflect.Slice: + if arr, ok := source.(arrayNode); ok { + return copyList(outValue, arr) + } + return fmt.Errorf("can not UnmarshalKey to a slice from a non-list source") + default: + return fmt.Errorf("can only UnmarshalKey to struct, map, or slice, got %v", outValue.Kind()) + } +} + +var errNotFound = fmt.Errorf("not found") + +// leafNode represents a leaf with a scalar value + +type leafNode interface { + GetBool() (bool, error) + GetInt() (int, error) + GetFloat() (float64, error) + GetString() (string, error) +} + +type leafNodeImpl struct { + // val must be a scalar kind + val reflect.Value +} + +var _ leafNode = (*leafNodeImpl)(nil) +var _ node = (*leafNodeImpl)(nil) + +// arrayNode represents a node with an ordered array of children + +type arrayNode interface { + Size() int + Index(int) (node, error) +} + +type arrayNodeImpl struct { + // val must be a Slice with Len() and Index() + val reflect.Value +} + +var _ arrayNode = (*arrayNodeImpl)(nil) +var _ node = (*arrayNodeImpl)(nil) + +// node represents an arbitrary node of the tree + +type node interface { + GetChild(string) (node, error) + ChildrenKeys() ([]string, error) +} + +type innerNodeImpl struct { + // val must be a struct + val reflect.Value +} + +type innerMapNodeImpl struct { + // val must be a map[string]interface{} + val reflect.Value +} + +var _ node = (*innerNodeImpl)(nil) +var _ node = (*innerMapNodeImpl)(nil) + +// all nodes, leaf, inner, and array nodes, each act as nodes +func newNode(v reflect.Value) (node, error) { + if v.Kind() == reflect.Struct { + return &innerNodeImpl{val: v}, nil + } else if v.Kind() == reflect.Map { + return &innerMapNodeImpl{val: v}, nil + } else if v.Kind() == reflect.Slice { + return &arrayNodeImpl{val: v}, nil + } else if isScalarKind(v) { + return &leafNodeImpl{val: v}, nil + } + return nil, fmt.Errorf("could not create node from: %v of type %T and kind %v", v, v, v.Kind()) +} + +// GetChild returns the child node at the given key, or an error if not found +func (n *innerNodeImpl) GetChild(key string) (node, error) { + findex := findFieldMatch(n.val, key) + if findex == -1 { + return nil, errNotFound + } + inner := n.val.Field(findex) + if inner.Kind() == reflect.Interface { + inner = inner.Elem() + } + return newNode(inner) +} + +// ChildrenKeys returns the list of keys of the children of the given node, if it is a map +func (n *innerNodeImpl) ChildrenKeys() ([]string, error) { + structType := n.val.Type() + keys := make([]string, 0, n.val.NumField()) + for i := 0; i < structType.NumField(); i++ { + f := structType.Field(i) + ch, _ := utf8.DecodeRuneInString(f.Name) + if unicode.IsLower(ch) { + continue + } + keys = append(keys, fieldNameToKey(f)) + } + return keys, nil +} + +// GetChild returns the child node at the given key, or an error if not found +func (n *innerMapNodeImpl) GetChild(key string) (node, error) { + inner := n.val.MapIndex(reflect.ValueOf(key)) + if !inner.IsValid() { + return nil, errNotFound + } + if inner.Kind() == reflect.Interface { + inner = inner.Elem() + } + return newNode(inner) +} + +// ChildrenKeys returns the list of keys of the children of the given node, if it is a map +func (n *innerMapNodeImpl) ChildrenKeys() ([]string, error) { + mapkeys := n.val.MapKeys() + keys := make([]string, 0, len(mapkeys)) + for _, kv := range mapkeys { + if kstr, ok := kv.Interface().(string); ok { + keys = append(keys, kstr) + } else { + return nil, fmt.Errorf("map node has invalid non-string key: %v", kv) + } + } + return keys, nil +} + +// GetChild returns an error because array node does not have children accessible by name +func (n *arrayNodeImpl) GetChild(string) (node, error) { + return nil, fmt.Errorf("arrayNodeImpl.GetChild not implemented") +} + +// ChildrenKeys returns an error because array node does not have children accessible by name +func (n *arrayNodeImpl) ChildrenKeys() ([]string, error) { + return nil, fmt.Errorf("arrayNodeImpl.ChildrenKeys not implemented") +} + +// Size returns number of children in the list +func (n *arrayNodeImpl) Size() int { + return n.val.Len() +} + +// Index returns the kth element of the list +func (n *arrayNodeImpl) Index(k int) (node, error) { + // arrayNodeImpl assumes val is an Array with Len() and Index() + elem := n.val.Index(k) + if elem.Kind() == reflect.Interface { + elem = elem.Elem() + } + return newNode(elem) +} + +// GetChild returns an error because a leaf has no children +func (n *leafNodeImpl) GetChild(string) (node, error) { + return nil, fmt.Errorf("can't GetChild of a leaf node") +} + +// ChildrenKeys returns an error because a leaf has no children +func (n *leafNodeImpl) ChildrenKeys() ([]string, error) { + return nil, fmt.Errorf("can't get ChildrenKeys of a leaf node") +} + +// GetBool returns the scalar as a bool, or an error otherwise +func (n *leafNodeImpl) GetBool() (bool, error) { + if n.val.Kind() == reflect.Bool { + return n.val.Bool(), nil + } else if n.val.Kind() == reflect.String { + return convertToBool(n.val.String()) + } + return false, newConversionError(n.val, "bool") +} + +// GetInt returns the scalar as a int, or an error otherwise +func (n *leafNodeImpl) GetInt() (int, error) { + switch n.val.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return int(n.val.Int()), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return int(n.val.Uint()), nil + case reflect.Float32, reflect.Float64: + return int(n.val.Float()), nil + } + return 0, newConversionError(n.val, "int") +} + +// GetFloat returns the scalar as a float64, or an error otherwise +func (n *leafNodeImpl) GetFloat() (float64, error) { + switch n.val.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(n.val.Int()), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return float64(n.val.Uint()), nil + case reflect.Float32, reflect.Float64: + return float64(n.val.Float()), nil + } + return 0, newConversionError(n.val, "float") +} + +// GetString returns the scalar as a string, or an error otherwise +func (n *leafNodeImpl) GetString() (string, error) { + if n.val.Kind() == reflect.String { + return n.val.String(), nil + } + return "", newConversionError(n.val, "string") +} + +// convert a string to a bool using standard yaml constants +func convertToBool(text string) (bool, error) { + lower := strings.ToLower(text) + if lower == "y" || lower == "yes" || lower == "on" || lower == "true" || lower == "1" { + return true, nil + } else if lower == "n" || lower == "no" || lower == "off" || lower == "false" || lower == "0" { + return false, nil + } + return false, newConversionError(reflect.ValueOf(text), "bool") +} + +func fieldNameToKey(field reflect.StructField) string { + name := field.Name + if tagtext := field.Tag.Get("yaml"); tagtext != "" { + name = tagtext + } else if tagtext := field.Tag.Get("json"); tagtext != "" { + name = tagtext + } else if tagtext := field.Tag.Get("mapstructure"); tagtext != "" { + name = tagtext + } + // skip any additional specifiers such as ",omitempty" + if commaPos := strings.IndexRune(name, ','); commaPos != -1 { + name = name[:commaPos] + } + return name +} + +func copyStruct(target reflect.Value, source node) error { + targetType := target.Type() + for i := 0; i < targetType.NumField(); i++ { + f := targetType.Field(i) + ch, _ := utf8.DecodeRuneInString(f.Name) + if unicode.IsLower(ch) { + continue + } + child, err := source.GetChild(fieldNameToKey(f)) + if err == errNotFound { + continue + } else if err != nil { + return err + } + err = copyAny(target.FieldByName(f.Name), child) + if err != nil { + return err + } + } + return nil +} + +func copyMap(target reflect.Value, source node) error { + // TODO: Should handle maps with more complex types in a future PR + ktype := reflect.TypeOf("") + vtype := reflect.TypeOf("") + mtype := reflect.MapOf(ktype, vtype) + results := reflect.MakeMap(mtype) + + mapKeys, err := source.ChildrenKeys() + if err != nil { + return err + } + for _, mkey := range mapKeys { + child, err := source.GetChild(mkey) + if err != nil { + return err + } + if child == nil { + continue + } + if scalar, ok := child.(leafNode); ok { + if mval, err := scalar.GetString(); err == nil { + results.SetMapIndex(reflect.ValueOf(mkey), reflect.ValueOf(mval)) + } else { + return fmt.Errorf("TODO: only map[string]string supported currently") + } + } + } + target.Set(results) + return nil +} + +func copyLeaf(target reflect.Value, source leafNode) error { + if source == nil { + return fmt.Errorf("source value is not a scalar") + } + switch target.Kind() { + case reflect.Bool: + v, err := source.GetBool() + if err != nil { + return err + } + target.SetBool(v) + return nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + v, err := source.GetInt() + if err != nil { + return err + } + target.SetInt(int64(v)) + return nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + v, err := source.GetInt() + if err != nil { + return err + } + target.SetUint(uint64(v)) + return nil + case reflect.Float32, reflect.Float64: + v, err := source.GetFloat() + if err != nil { + return err + } + target.SetFloat(float64(v)) + return nil + case reflect.String: + v, err := source.GetString() + if err != nil { + return err + } + target.SetString(v) + return nil + } + return fmt.Errorf("unsupported scalar type %v", target.Kind()) +} + +func copyList(target reflect.Value, source arrayNode) error { + if source == nil { + return fmt.Errorf("source value is not a list") + } + elemType := target.Type() + elemType = elemType.Elem() + numElems := source.Size() + results := reflect.MakeSlice(reflect.SliceOf(elemType), numElems, numElems) + for k := 0; k < numElems; k++ { + elemSource, err := source.Index(k) + if err != nil { + return err + } + ptrOut := reflect.New(elemType) + outTarget := ptrOut.Elem() + err = copyAny(outTarget, elemSource) + if err != nil { + return err + } + results.Index(k).Set(outTarget) + } + target.Set(results) + return nil +} + +func copyAny(target reflect.Value, source node) error { + if target.Kind() == reflect.Pointer { + allocPtr := reflect.New(target.Type().Elem()) + target.Set(allocPtr) + target = allocPtr.Elem() + } + if isScalarKind(target) { + if leaf, ok := source.(leafNode); ok { + return copyLeaf(target, leaf) + } + return fmt.Errorf("can't copy into target: scalar required, but source is not a leaf") + } else if target.Kind() == reflect.Map { + return copyMap(target, source) + } else if target.Kind() == reflect.Struct { + return copyStruct(target, source) + } else if target.Kind() == reflect.Slice { + if arr, ok := source.(arrayNode); ok { + return copyList(target, arr) + } + return fmt.Errorf("can't copy into target: []T required, but source is not an array") + } else if target.Kind() == reflect.Invalid { + return fmt.Errorf("can't copy invalid value %s : %v", target, target.Kind()) + } + return fmt.Errorf("unknown value to copy: %v", target.Type()) +} + +func isScalarKind(v reflect.Value) bool { + k := v.Kind() + return (k >= reflect.Bool && k <= reflect.Float64) || k == reflect.String +} + +func findFieldMatch(val reflect.Value, key string) int { + schema := val.Type() + for i := 0; i < schema.NumField(); i++ { + if key == fieldNameToKey(schema.Field(i)) { + return i + } + } + return -1 +} + +func newConversionError(v reflect.Value, expectType string) error { + return fmt.Errorf("could not convert to %s: %v of type %T and Kind %v", expectType, v, v, v.Kind()) +} diff --git a/pkg/config/structure/unmarshal_test.go b/pkg/config/structure/unmarshal_test.go new file mode 100644 index 0000000000000..3f8546020811f --- /dev/null +++ b/pkg/config/structure/unmarshal_test.go @@ -0,0 +1,154 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package structure + +import ( + "reflect" + "testing" + + "github.com/DataDog/datadog-agent/pkg/config/mock" + "github.com/stretchr/testify/assert" +) + +// Struct that is used within the config +type UserV3 struct { + Username string `yaml:"user"` + UsernameLegacy string `yaml:"username"` + AuthKey string `yaml:"authKey"` + AuthProtocol string `yaml:"authProtocol"` + PrivKey string `yaml:"privKey"` + PrivProtocol string `yaml:"privProtocol"` +} + +// Type that gets parsed out of config +type TrapsConfig struct { + Enabled bool `yaml:"enabled"` + Port uint16 `yaml:"port"` + Users []UserV3 `yaml:"users"` + CommunityStrings []string `yaml:"community_strings"` + BindHost string `yaml:"bind_host"` + StopTimeout int `yaml:"stop_timeout"` + Namespace string `yaml:"namespace"` +} + +func TestUnmarshalKeyTrapsConfig(t *testing.T) { + confYaml := ` +network_devices: + snmp_traps: + enabled: true + port: 1234 + community_strings: ["a","b","c"] + users: + - user: alice + authKey: hunter2 + authProtocol: MD5 + privKey: pswd + privProtocol: AE5 + - user: bob + authKey: "123456" + authProtocol: MD5 + privKey: secret + privProtocol: AE5 + bind_host: ok + stop_timeout: 4 + namespace: abc +` + mockConfig := mock.NewFromYAML(t, confYaml) + + var trapsCfg = TrapsConfig{} + err := UnmarshalKey(mockConfig, "network_devices.snmp_traps", &trapsCfg) + assert.NoError(t, err) + + assert.Equal(t, trapsCfg.Enabled, true) + assert.Equal(t, trapsCfg.Port, uint16(1234)) + assert.Equal(t, trapsCfg.CommunityStrings, []string{"a", "b", "c"}) + + assert.Equal(t, len(trapsCfg.Users), 2) + assert.Equal(t, trapsCfg.Users[0].Username, "alice") + assert.Equal(t, trapsCfg.Users[0].AuthKey, "hunter2") + assert.Equal(t, trapsCfg.Users[0].AuthProtocol, "MD5") + assert.Equal(t, trapsCfg.Users[0].PrivKey, "pswd") + assert.Equal(t, trapsCfg.Users[0].PrivProtocol, "AE5") + assert.Equal(t, trapsCfg.Users[1].Username, "bob") + assert.Equal(t, trapsCfg.Users[1].AuthKey, "123456") + assert.Equal(t, trapsCfg.Users[1].AuthProtocol, "MD5") + assert.Equal(t, trapsCfg.Users[1].PrivKey, "secret") + assert.Equal(t, trapsCfg.Users[1].PrivProtocol, "AE5") + + assert.Equal(t, trapsCfg.BindHost, "ok") + assert.Equal(t, trapsCfg.StopTimeout, 4) + assert.Equal(t, trapsCfg.Namespace, "abc") +} + +type Endpoint struct { + Name string `yaml:"name"` + APIKey string `yaml:"apikey"` +} + +func TestUnmarshalKeySliceOfStructures(t *testing.T) { + confYaml := ` +endpoints: +- name: intake + apikey: abc1 +- name: config + apikey: abc2 +- name: health + apikey: abc3 +` + mockConfig := mock.NewFromYAML(t, confYaml) + mockConfig.SetKnown("endpoints") + + var endpoints = []Endpoint{} + err := UnmarshalKey(mockConfig, "endpoints", &endpoints) + assert.NoError(t, err) + + assert.Equal(t, len(endpoints), 3) + assert.Equal(t, endpoints[0].Name, "intake") + assert.Equal(t, endpoints[0].APIKey, "abc1") + assert.Equal(t, endpoints[1].Name, "config") + assert.Equal(t, endpoints[1].APIKey, "abc2") + assert.Equal(t, endpoints[2].Name, "health") + assert.Equal(t, endpoints[2].APIKey, "abc3") +} + +type FeatureConfig struct { + Enabled bool `yaml:"enabled"` +} + +func TestUnmarshalKeyParseStringAsBool(t *testing.T) { + confYaml := ` +feature: + enabled: "true" +` + mockConfig := mock.NewFromYAML(t, confYaml) + mockConfig.SetKnown("feature") + + var feature = FeatureConfig{} + err := UnmarshalKey(mockConfig, "feature", &feature) + assert.NoError(t, err) + + assert.Equal(t, feature.Enabled, true) +} + +func TestMapGetChildNotFound(t *testing.T) { + m := map[string]string{"a": "apple", "b": "banana"} + n, err := newNode(reflect.ValueOf(m)) + assert.NoError(t, err) + + val, err := n.GetChild("a") + assert.NoError(t, err) + str, err := val.(leafNode).GetString() + assert.NoError(t, err) + assert.Equal(t, str, "apple") + + _, err = n.GetChild("c") + assert.Error(t, err) + assert.Equal(t, err.Error(), "not found") + + keys, err := n.ChildrenKeys() + assert.NoError(t, err) + assert.Equal(t, keys, []string{"a", "b"}) +} From be05ca5bd5be766249a6b1b4a5ef632ba325641a Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Fri, 6 Sep 2024 19:38:03 +0200 Subject: [PATCH 064/128] [SBOM] Collect host SBOM in flare (#28994) Co-authored-by: Maxime Mouial --- .../collector/collectorimpl/collector.go | 36 +++++++++++++++++++ pkg/sbom/collectors/host/collector.go | 5 +++ pkg/sbom/collectors/host/request.go | 18 +++------- pkg/sbom/sbom.go | 23 +++--------- pkg/sbom/scanner/scanner.go | 14 +++++--- pkg/sbom/types/types.go | 34 ++++++++++++++++++ 6 files changed, 95 insertions(+), 35 deletions(-) create mode 100644 pkg/sbom/types/types.go diff --git a/comp/collector/collector/collectorimpl/collector.go b/comp/collector/collector/collectorimpl/collector.go index e265871190fef..5b262c93d0223 100644 --- a/comp/collector/collector/collectorimpl/collector.go +++ b/comp/collector/collector/collectorimpl/collector.go @@ -8,7 +8,9 @@ package collectorimpl import ( "context" + "encoding/json" "fmt" + "os" "sync" "time" @@ -20,6 +22,7 @@ import ( "github.com/DataDog/datadog-agent/comp/collector/collector" "github.com/DataDog/datadog-agent/comp/collector/collector/collectorimpl/internal/middleware" "github.com/DataDog/datadog-agent/comp/core/config" + flaretypes "github.com/DataDog/datadog-agent/comp/core/flare/types" log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/core/status" metadata "github.com/DataDog/datadog-agent/comp/metadata/runner/runnerimpl" @@ -30,6 +33,8 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/runner" "github.com/DataDog/datadog-agent/pkg/collector/runner/expvars" "github.com/DataDog/datadog-agent/pkg/collector/scheduler" + "github.com/DataDog/datadog-agent/pkg/sbom/collectors/host" + "github.com/DataDog/datadog-agent/pkg/sbom/scanner" "github.com/DataDog/datadog-agent/pkg/serializer" collectorStatus "github.com/DataDog/datadog-agent/pkg/status/collector" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -81,6 +86,7 @@ type provides struct { StatusProvider status.InformationProvider MetadataProvider metadata.Provider APIGetPyStatus api.AgentEndpointProvider + FlareProvider flaretypes.Provider } // Module defines the fx options for this component. @@ -106,6 +112,7 @@ func newProvides(deps dependencies) provides { StatusProvider: status.NewInformationProvider(collectorStatus.Provider{}), MetadataProvider: agentCheckMetadata, APIGetPyStatus: api.NewAgentEndpointProvider(getPythonStatus, "/py/status", "GET"), + FlareProvider: flaretypes.NewProvider(c.fillFlare), } } @@ -132,6 +139,35 @@ func newCollector(deps dependencies) *collectorImpl { return c } +// fillFlare collects all the information related to integrations that need to be added to each flare +func (c *collectorImpl) fillFlare(fb flaretypes.FlareBuilder) error { + scanner := scanner.GetGlobalScanner() + if scanner == nil { + return nil + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + + scanRequest := host.NewScanRequest("/", os.DirFS("/")) + scanResult := scanner.PerformScan(ctx, scanRequest, scanner.GetCollector(scanRequest.Collector())) + if scanResult.Error != nil { + return scanResult.Error + } + + cycloneDX, err := scanResult.Report.ToCycloneDX() + if err != nil { + return err + } + + jsonContent, err := json.MarshalIndent(cycloneDX, "", " ") + if err != nil { + return err + } + + return fb.AddFile("host-sbom.json", jsonContent) +} + // AddEventReceiver adds a callback to the collector to be called each time a check is added or removed. func (c *collectorImpl) AddEventReceiver(cb collector.EventReceiver) { c.m.Lock() diff --git a/pkg/sbom/collectors/host/collector.go b/pkg/sbom/collectors/host/collector.go index b5d1a5a992a97..b92d8ad7360f2 100644 --- a/pkg/sbom/collectors/host/collector.go +++ b/pkg/sbom/collectors/host/collector.go @@ -35,6 +35,11 @@ func (c *Collector) Shutdown() { c.closed = true } +// channelSize defines the result channel size +// It doesn't need more than 1 because the host collector should +// not trigger multiple scans at the same time unlike for container-images. +const channelSize = 1 + func init() { collectors.RegisterCollector(collectors.HostCollector, &Collector{ resChan: make(chan sbom.ScanResult, channelSize), diff --git a/pkg/sbom/collectors/host/request.go b/pkg/sbom/collectors/host/request.go index 8ecd73f19a20e..3b3481f7c2633 100644 --- a/pkg/sbom/collectors/host/request.go +++ b/pkg/sbom/collectors/host/request.go @@ -3,22 +3,14 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build trivy || (windows && wmi) - package host import ( "io/fs" - "github.com/DataDog/datadog-agent/pkg/sbom" - "github.com/DataDog/datadog-agent/pkg/sbom/collectors" + "github.com/DataDog/datadog-agent/pkg/sbom/types" ) -// channelSize defines the result channel size -// It doesn't need more than 1 because the host collector should -// not trigger multiple scans at the same time unlike for container-images. -const channelSize = 1 - // scanRequest defines a scan request. This struct should be // hashable to be pushed in the work queue for processing. type scanRequest struct { @@ -27,18 +19,18 @@ type scanRequest struct { } // NewScanRequest creates a new scan request -func NewScanRequest(path string, fs fs.FS) sbom.ScanRequest { +func NewScanRequest(path string, fs fs.FS) types.ScanRequest { return scanRequest{Path: path, FS: fs} } // Collector returns the collector name func (r scanRequest) Collector() string { - return collectors.HostCollector + return "host" } // Type returns the scan request type -func (r scanRequest) Type(sbom.ScanOptions) string { - return sbom.ScanFilesystemType +func (r scanRequest) Type(types.ScanOptions) string { + return types.ScanFilesystemType } // ID returns the scan request ID diff --git a/pkg/sbom/sbom.go b/pkg/sbom/sbom.go index 551547d870a3a..accf0e8eb702b 100644 --- a/pkg/sbom/sbom.go +++ b/pkg/sbom/sbom.go @@ -11,6 +11,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" + "github.com/DataDog/datadog-agent/pkg/sbom/types" cyclonedxgo "github.com/CycloneDX/cyclonedx-go" ) @@ -26,19 +27,6 @@ type Report interface { ID() string } -// ScanOptions defines the scan options -type ScanOptions struct { - Analyzers []string - CheckDiskUsage bool - MinAvailableDisk uint64 - Timeout time.Duration - WaitAfter time.Duration - Fast bool - CollectFiles bool - UseMount bool - OverlayFsScan bool -} - // ScanOptionsFromConfig loads the scanning options from the configuration func ScanOptionsFromConfig(cfg config.Component, containers bool) (scanOpts ScanOptions) { if containers { @@ -59,11 +47,10 @@ func ScanOptionsFromConfig(cfg config.Component, containers bool) (scanOpts Scan } // ScanRequest defines the scan request interface -type ScanRequest interface { - Collector() string - Type(ScanOptions) string - ID() string -} +type ScanRequest = types.ScanRequest + +// ScanOptions defines the scan options +type ScanOptions = types.ScanOptions // ScanResult defines the scan result type ScanResult struct { diff --git a/pkg/sbom/scanner/scanner.go b/pkg/sbom/scanner/scanner.go index bf4e25676d723..19d25a01c808a 100644 --- a/pkg/sbom/scanner/scanner.go +++ b/pkg/sbom/scanner/scanner.go @@ -224,6 +224,11 @@ func (s *Scanner) startScanRequestHandler(ctx context.Context) { }() } +// GetCollector returns the collector with the specified name +func (s *Scanner) GetCollector(collector string) collectors.Collector { + return s.collectors[collector] +} + func (s *Scanner) handleScanRequest(ctx context.Context, r interface{}) { request, ok := r.(sbom.ScanRequest) if !ok { @@ -232,8 +237,8 @@ func (s *Scanner) handleScanRequest(ctx context.Context, r interface{}) { return } - collector, ok := s.collectors[request.Collector()] - if !ok { + collector := s.GetCollector(request.Collector()) + if collector == nil { _ = log.Errorf("invalid collector '%s'", request.Collector()) s.scanQueue.Forget(request) return @@ -276,7 +281,7 @@ func (s *Scanner) processScan(ctx context.Context, request sbom.ScanRequest, img if result == nil { scanContext, cancel := context.WithTimeout(ctx, timeout(collector)) defer cancel() - result = s.performScan(scanContext, request, collector) + result = s.PerformScan(scanContext, request, collector) errorType = "scan" } sendResult(ctx, request.ID(), result, collector) @@ -299,7 +304,8 @@ func (s *Scanner) checkDiskSpace(imgMeta *workloadmeta.ContainerImageMetadata, c return result } -func (s *Scanner) performScan(ctx context.Context, request sbom.ScanRequest, collector collectors.Collector) *sbom.ScanResult { +// PerformScan processes a scan request with the selected collector and returns the SBOM +func (s *Scanner) PerformScan(ctx context.Context, request sbom.ScanRequest, collector collectors.Collector) *sbom.ScanResult { createdAt := time.Now() s.cacheMutex.Lock() diff --git a/pkg/sbom/types/types.go b/pkg/sbom/types/types.go new file mode 100644 index 0000000000000..8b5989cd5ed23 --- /dev/null +++ b/pkg/sbom/types/types.go @@ -0,0 +1,34 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package types holds sbom related types +package types + +import "time" + +// ScanRequest defines the scan request interface +type ScanRequest interface { + Collector() string + Type(ScanOptions) string + ID() string +} + +// ScanOptions defines the scan options +type ScanOptions struct { + Analyzers []string + CheckDiskUsage bool + MinAvailableDisk uint64 + Timeout time.Duration + WaitAfter time.Duration + Fast bool + CollectFiles bool + UseMount bool + OverlayFsScan bool +} + +const ( + ScanFilesystemType = "filesystem" // ScanFilesystemType defines the type for file-system scan + ScanDaemonType = "daemon" // ScanDaemonType defines the type for daemon scan +) From 8da9184f4f4851fb6e8ce1dae208c052f11c8553 Mon Sep 17 00:00:00 2001 From: Daniel Tafoya <63120739+daniel-taf@users.noreply.github.com> Date: Fri, 6 Sep 2024 13:42:13 -0400 Subject: [PATCH 065/128] [PROCS-4328] Add ECS EC2 e2e test for process checks in the core agent. (#29095) --- test/new-e2e/tests/process/ecs_test.go | 39 ++++++++++++++++++++++---- 1 file changed, 33 insertions(+), 6 deletions(-) diff --git a/test/new-e2e/tests/process/ecs_test.go b/test/new-e2e/tests/process/ecs_test.go index 84ee94ec56d2d..10c42d696a512 100644 --- a/test/new-e2e/tests/process/ecs_test.go +++ b/test/new-e2e/tests/process/ecs_test.go @@ -6,6 +6,7 @@ package process import ( + "fmt" "testing" "time" @@ -25,7 +26,7 @@ import ( "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/ecs" ) -type ECSSuite struct { +type ECSEC2Suite struct { e2e.BaseSuite[ecsCPUStressEnv] } @@ -33,7 +34,7 @@ type ecsCPUStressEnv struct { environments.ECS } -func ecsCPUStressProvisioner() e2e.PulumiEnvRunFunc[ecsCPUStressEnv] { +func ecsEC2CPUStressProvisioner(runInCoreAgent bool) e2e.PulumiEnvRunFunc[ecsCPUStressEnv] { return func(ctx *pulumi.Context, env *ecsCPUStressEnv) error { awsEnv, err := aws.NewEnvironment(ctx) if err != nil { @@ -45,6 +46,7 @@ func ecsCPUStressProvisioner() e2e.PulumiEnvRunFunc[ecsCPUStressEnv] { ecs.WithECSLinuxECSOptimizedNodeGroup(), ecs.WithAgentOptions( ecsagentparams.WithAgentServiceEnvVariable("DD_PROCESS_CONFIG_PROCESS_COLLECTION_ENABLED", "true"), + ecsagentparams.WithAgentServiceEnvVariable("DD_PROCESS_CONFIG_RUN_IN_CORE_AGENT_ENABLED", fmt.Sprintf("%t", runInCoreAgent)), ), ecs.WithWorkloadApp(func(e aws.Environment, clusterArn pulumi.StringInput) (*ecsComp.Workload, error) { return cpustress.EcsAppDefinition(e, clusterArn) @@ -59,16 +61,16 @@ func ecsCPUStressProvisioner() e2e.PulumiEnvRunFunc[ecsCPUStressEnv] { } } -func TestECSTestSuite(t *testing.T) { +func TestECSEC2TestSuite(t *testing.T) { t.Parallel() - s := ECSSuite{} + s := ECSEC2Suite{} e2eParams := []e2e.SuiteOption{e2e.WithProvisioner( - e2e.NewTypedPulumiProvisioner("ecsCPUStress", ecsCPUStressProvisioner(), nil))} + e2e.NewTypedPulumiProvisioner("ecsEC2CPUStress", ecsEC2CPUStressProvisioner(false), nil))} e2e.Run(t, &s, e2eParams...) } -func (s *ECSSuite) TestECSProcessCheck() { +func (s *ECSEC2Suite) TestProcessCheck() { t := s.T() // PROCS-4219 flake.Mark(t) @@ -86,3 +88,28 @@ func (s *ECSSuite) TestECSProcessCheck() { assertProcessCollected(t, payloads, false, "stress-ng-cpu [run]") assertContainersCollected(t, payloads, []string{"stress-ng"}) } + +func (s *ECSEC2Suite) TestProcessCheckInCoreAgent() { + t := s.T() + // PROCS-4219 + flake.Mark(t) + + s.UpdateEnv(e2e.NewTypedPulumiProvisioner("ecsEC2CPUStress", ecsEC2CPUStressProvisioner(true), nil)) + + // Flush fake intake to remove any payloads which may have + s.Env().FakeIntake.Client().FlushServerAndResetAggregators() + + var payloads []*aggregator.ProcessPayload + assert.EventuallyWithT(t, func(c *assert.CollectT) { + var err error + payloads, err = s.Env().FakeIntake.Client().GetProcesses() + assert.NoError(c, err, "failed to get process payloads from fakeintake") + + // Wait for two payloads, as processes must be detected in two check runs to be returned + assert.GreaterOrEqual(c, len(payloads), 2, "fewer than 2 payloads returned") + }, 2*time.Minute, 10*time.Second) + + assertProcessCollected(t, payloads, false, "stress-ng-cpu [run]") + requireProcessNotCollected(t, payloads, "process-agent") + assertContainersCollected(t, payloads, []string{"stress-ng"}) +} From f254ad22c044338f0dc69529f4380e099f46ada1 Mon Sep 17 00:00:00 2001 From: andrewqian2001datadog Date: Fri, 6 Sep 2024 13:43:42 -0400 Subject: [PATCH 066/128] Enable v2 AD label support for logs (#28767) --- .../common/utils/pod_annotations.go | 4 ++++ .../common/utils/pod_annotations_test.go | 19 +++++++++++++++++++ ...d-label-support-logs-24ddb721e3f429fe.yaml | 12 ++++++++++++ 3 files changed, 35 insertions(+) create mode 100644 releasenotes/notes/ad-label-support-logs-24ddb721e3f429fe.yaml diff --git a/comp/core/autodiscovery/common/utils/pod_annotations.go b/comp/core/autodiscovery/common/utils/pod_annotations.go index d498245beb39f..2db5b0c323305 100644 --- a/comp/core/autodiscovery/common/utils/pod_annotations.go +++ b/comp/core/autodiscovery/common/utils/pod_annotations.go @@ -58,6 +58,7 @@ func parseChecksJSON(adIdentifier string, checksJSON string) ([]integration.Conf Name string `json:"name"` InitConfig json.RawMessage `json:"init_config"` Instances []interface{} `json:"instances"` + Logs json.RawMessage `json:"logs"` IgnoreAutodiscoveryTags bool `json:"ignore_autodiscovery_tags"` } @@ -83,6 +84,9 @@ func parseChecksJSON(adIdentifier string, checksJSON string) ([]integration.Conf IgnoreAutodiscoveryTags: config.IgnoreAutodiscoveryTags, } + if len(config.Logs) > 0 { + c.LogsConfig = integration.Data(config.Logs) + } for _, i := range config.Instances { instance, err := parseJSONObjToData(i) if err != nil { diff --git a/comp/core/autodiscovery/common/utils/pod_annotations_test.go b/comp/core/autodiscovery/common/utils/pod_annotations_test.go index a583e94c422ed..7043e7c19e301 100644 --- a/comp/core/autodiscovery/common/utils/pod_annotations_test.go +++ b/comp/core/autodiscovery/common/utils/pod_annotations_test.go @@ -428,6 +428,25 @@ func TestExtractTemplatesFromAnnotations(t *testing.T) { }, }, }, + { + name: "v2 annotations label logs", + annotations: map[string]string{ + "ad.datadoghq.com/foobar.checks": `{ + "apache": { + "logs": [{"service":"any_service","source":"any_source"}] + } + }`, + }, + adIdentifier: "foobar", + output: []integration.Config{ + { + Name: "apache", + LogsConfig: integration.Data("[{\"service\":\"any_service\",\"source\":\"any_source\"}]"), + ADIdentifiers: []string{adID}, + InitConfig: integration.Data("{}"), + }, + }, + }, } for _, tt := range tests { diff --git a/releasenotes/notes/ad-label-support-logs-24ddb721e3f429fe.yaml b/releasenotes/notes/ad-label-support-logs-24ddb721e3f429fe.yaml new file mode 100644 index 0000000000000..1866763d95841 --- /dev/null +++ b/releasenotes/notes/ad-label-support-logs-24ddb721e3f429fe.yaml @@ -0,0 +1,12 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Adds missing support for the logs config key to work with AD annotations V2. + From 98cdec2db05acbd3cb02979b63abcc88d809b92d Mon Sep 17 00:00:00 2001 From: Vincent Whitchurch Date: Fri, 6 Sep 2024 19:52:31 +0200 Subject: [PATCH 067/128] discovery: Limit number of reported ports (#29113) --- .../servicediscovery/module/impl_linux.go | 16 +++++++ .../module/impl_linux_test.go | 45 ++++++++++++++++--- 2 files changed, 56 insertions(+), 5 deletions(-) diff --git a/pkg/collector/corechecks/servicediscovery/module/impl_linux.go b/pkg/collector/corechecks/servicediscovery/module/impl_linux.go index d0678edecf891..009b3ace69758 100644 --- a/pkg/collector/corechecks/servicediscovery/module/impl_linux.go +++ b/pkg/collector/corechecks/servicediscovery/module/impl_linux.go @@ -7,12 +7,14 @@ package module import ( "bufio" + "cmp" "errors" "fmt" "io" "net/http" "os" "path/filepath" + "slices" "strconv" "strings" "sync" @@ -390,6 +392,10 @@ var ignoreComms = map[string]struct{}{ "docker-proxy": {}, } +// maxNumberOfPorts is the maximum number of listening ports which we report per +// service. +const maxNumberOfPorts = 50 + // getService gets information for a single service. func (s *discovery) getService(context parsingContext, pid int32) *model.Service { proc, err := customNewProcess(pid) @@ -451,6 +457,16 @@ func (s *discovery) getService(context parsingContext, pid int32) *model.Service return nil } + if len(ports) > maxNumberOfPorts { + // Sort the list so that non-ephemeral ports are given preference when + // we trim the list. + portCmp := func(a, b uint16) int { + return cmp.Compare(a, b) + } + slices.SortFunc(ports, portCmp) + ports = ports[:maxNumberOfPorts] + } + rss, err := getRSS(proc) if err != nil { return nil diff --git a/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go b/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go index 8020f41750457..2b37036a8f9d8 100644 --- a/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go +++ b/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go @@ -20,6 +20,7 @@ import ( "path/filepath" "regexp" "runtime" + "slices" "strconv" "strings" "syscall" @@ -111,8 +112,8 @@ func getServicesMap(t *testing.T, url string) map[int]model.Service { return servicesMap } -func startTCPServer(t *testing.T, proto string) (*os.File, *net.TCPAddr) { - listener, err := net.Listen(proto, "") +func startTCPServer(t *testing.T, proto string, address string) (*os.File, *net.TCPAddr) { + listener, err := net.Listen(proto, address) require.NoError(t, err) t.Cleanup(func() { _ = listener.Close() }) tcpAddr := listener.Addr().(*net.TCPAddr) @@ -190,7 +191,7 @@ func TestBasic(t *testing.T) { expectedPorts := make(map[int]int) var startTCP = func(proto string) { - f, server := startTCPServer(t, proto) + f, server := startTCPServer(t, proto, "") cmd := startProcessWithFile(t, f) expectedPIDs = append(expectedPIDs, cmd.Process.Pid) expectedPorts[cmd.Process.Pid] = server.Port @@ -242,7 +243,7 @@ func TestPorts(t *testing.T) { var unexpectedPorts []uint16 var startTCP = func(proto string) { - serverf, server := startTCPServer(t, proto) + serverf, server := startTCPServer(t, proto, "") t.Cleanup(func() { serverf.Close() }) clientf, client := startTCPClient(t, proto, server) t.Cleanup(func() { clientf.Close() }) @@ -281,6 +282,40 @@ func TestPorts(t *testing.T) { } } +func TestPortsLimits(t *testing.T) { + url := setupDiscoveryModule(t) + + var expectedPorts []int + + var openPort = func(address string) { + serverf, server := startTCPServer(t, "tcp4", address) + t.Cleanup(func() { serverf.Close() }) + + expectedPorts = append(expectedPorts, server.Port) + } + + openPort("127.0.0.1:8081") + + for i := 0; i < maxNumberOfPorts; i++ { + openPort("") + } + + openPort("127.0.0.1:8082") + + slices.Sort(expectedPorts) + + serviceMap := getServicesMap(t, url) + pid := os.Getpid() + require.Contains(t, serviceMap, pid) + ports := serviceMap[pid].Ports + assert.Contains(t, ports, uint16(8081)) + assert.Contains(t, ports, uint16(8082)) + assert.Len(t, ports, maxNumberOfPorts) + for i := 0; i < maxNumberOfPorts-2; i++ { + assert.Contains(t, ports, uint16(expectedPorts[i])) + } +} + func TestServiceName(t *testing.T) { url := setupDiscoveryModule(t) @@ -765,7 +800,7 @@ func TestCache(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(func() { cancel() }) - f, _ := startTCPServer(t, "tcp4") + f, _ := startTCPServer(t, "tcp4", "") defer f.Close() disableCloseOnExec(t, f) From a4c64ad5a66061a7a73166ce3212a21a4d3314e8 Mon Sep 17 00:00:00 2001 From: Bryce Kahle Date: Fri, 6 Sep 2024 11:22:18 -0700 Subject: [PATCH 068/128] fix oom kill cgroup name to be victim (#28939) --- .../corechecks/ebpf/c/runtime/cgroup.h | 34 +++++++++++-------- .../ebpf/c/runtime/oom-kill-kern-user.h | 12 +++---- .../corechecks/ebpf/c/runtime/oom-kill-kern.c | 20 +++++------ .../corechecks/ebpf/oomkill/oom_kill.go | 12 +++---- .../ebpf/probe/oomkill/c_types_linux.go | 20 +++++------ .../probe/oomkill/model/oom_kill_types.go | 18 +++++----- .../corechecks/ebpf/probe/oomkill/oom_kill.go | 22 ++++++------ .../ebpf/probe/oomkill/oom_kill_test.go | 8 ++--- pkg/ebpf/cgo/genpost.go | 2 +- ...m-kill-cgroup-victim-2aa7ca4e8e3ffac2.yaml | 11 ++++++ 10 files changed, 88 insertions(+), 71 deletions(-) create mode 100644 releasenotes/notes/oom-kill-cgroup-victim-2aa7ca4e8e3ffac2.yaml diff --git a/pkg/collector/corechecks/ebpf/c/runtime/cgroup.h b/pkg/collector/corechecks/ebpf/c/runtime/cgroup.h index 58c63dfa59636..124081af8be78 100644 --- a/pkg/collector/corechecks/ebpf/c/runtime/cgroup.h +++ b/pkg/collector/corechecks/ebpf/c/runtime/cgroup.h @@ -9,28 +9,32 @@ #include "bpf_tracing.h" #include "bpf_builtins.h" -static __always_inline int get_cgroup_name(char *buf, size_t sz) { - if (!bpf_helper_exists(BPF_FUNC_get_current_task)) { - return 0; - } +static __always_inline int get_cgroup_name_for_task(struct task_struct *task, char *buf, size_t sz) { bpf_memset(buf, 0, sz); - struct task_struct *cur_tsk = (struct task_struct *)bpf_get_current_task(); + #ifdef COMPILE_CORE + enum cgroup_subsys_id___local { + memory_cgrp_id___local = 123, /* value doesn't matter */ + }; + int cgrp_id = bpf_core_enum_value(enum cgroup_subsys_id___local, memory_cgrp_id___local); + #else + int cgrp_id = memory_cgrp_id; + #endif -#ifdef COMPILE_CORE - enum cgroup_subsys_id___local { - memory_cgrp_id___local = 123, /* value doesn't matter */ - }; - int cgrp_id = bpf_core_enum_value(enum cgroup_subsys_id___local, memory_cgrp_id___local); -#else - int cgrp_id = memory_cgrp_id; -#endif - const char *name = BPF_CORE_READ(cur_tsk, cgroups, subsys[cgrp_id], cgroup, kn, name); + const char *name = BPF_CORE_READ(task, cgroups, subsys[cgrp_id], cgroup, kn, name); if (bpf_probe_read_kernel(buf, sz, name) < 0) { return 0; } - return 1; } +static __always_inline int get_cgroup_name(char *buf, size_t sz) { + if (!bpf_helper_exists(BPF_FUNC_get_current_task)) { + return 0; + } + + struct task_struct *cur_tsk = (struct task_struct *)bpf_get_current_task(); + return get_cgroup_name_for_task(cur_tsk, buf, sz); +} + #endif /* defined(BPF_CGROUP_H) */ diff --git a/pkg/collector/corechecks/ebpf/c/runtime/oom-kill-kern-user.h b/pkg/collector/corechecks/ebpf/c/runtime/oom-kill-kern-user.h index 6153c6ef6a711..c95008774b44f 100644 --- a/pkg/collector/corechecks/ebpf/c/runtime/oom-kill-kern-user.h +++ b/pkg/collector/corechecks/ebpf/c/runtime/oom-kill-kern-user.h @@ -9,14 +9,14 @@ struct oom_stats { char cgroup_name[129]; - // Pid of triggering process - __u32 pid; // Pid of killed process - __u32 tpid; - // Name of triggering process - char fcomm[TASK_COMM_LEN]; + __u32 victim_pid; + // Pid of triggering process + __u32 trigger_pid; // Name of killed process - char tcomm[TASK_COMM_LEN]; + char victim_comm[TASK_COMM_LEN]; + // Name of triggering process + char trigger_comm[TASK_COMM_LEN]; // OOM score of killed process __s64 score; // OOM score adjustment of killed process diff --git a/pkg/collector/corechecks/ebpf/c/runtime/oom-kill-kern.c b/pkg/collector/corechecks/ebpf/c/runtime/oom-kill-kern.c index ee2f1a76523a7..35c7b4be7165d 100644 --- a/pkg/collector/corechecks/ebpf/c/runtime/oom-kill-kern.c +++ b/pkg/collector/corechecks/ebpf/c/runtime/oom-kill-kern.c @@ -25,16 +25,17 @@ * the statistics per pid */ -BPF_HASH_MAP(oom_stats, u32, struct oom_stats, 10240) +BPF_HASH_MAP(oom_stats, u64, struct oom_stats, 10240) SEC("kprobe/oom_kill_process") int BPF_KPROBE(kprobe__oom_kill_process, struct oom_control *oc) { struct oom_stats zero = {}; struct oom_stats new = {}; + u64 ts = bpf_ktime_get_ns(); u32 pid = bpf_get_current_pid_tgid() >> 32; - bpf_map_update_elem(&oom_stats, &pid, &zero, BPF_NOEXIST); - struct oom_stats *s = bpf_map_lookup_elem(&oom_stats, &pid); + bpf_map_update_elem(&oom_stats, &ts, &zero, BPF_NOEXIST); + struct oom_stats *s = bpf_map_lookup_elem(&oom_stats, &ts); if (!s) { return 0; } @@ -43,15 +44,14 @@ int BPF_KPROBE(kprobe__oom_kill_process, struct oom_control *oc) { // expected a pointer to stack memory. Therefore, we work on stack // variable and update the map value at the end bpf_memcpy(&new, s, sizeof(struct oom_stats)); - - new.pid = pid; - get_cgroup_name(new.cgroup_name, sizeof(new.cgroup_name)); + new.trigger_pid = pid; struct task_struct *p = (struct task_struct *)BPF_CORE_READ(oc, chosen); if (!p) { return 0; } - BPF_CORE_READ_INTO(&new.tpid, p, pid); + get_cgroup_name_for_task(p, new.cgroup_name, sizeof(new.cgroup_name)); + BPF_CORE_READ_INTO(&new.victim_pid, p, pid); BPF_CORE_READ_INTO(&new.score, oc, chosen_points); #ifdef COMPILE_CORE if (bpf_core_field_exists(p->signal->oom_score_adj)) { @@ -63,11 +63,11 @@ int BPF_KPROBE(kprobe__oom_kill_process, struct oom_control *oc) { bpf_probe_read_kernel(&new.score_adj, sizeof(new.score_adj), &sig->oom_score_adj); #endif if (bpf_helper_exists(BPF_FUNC_get_current_comm)) { - bpf_get_current_comm(new.fcomm, sizeof(new.fcomm)); + bpf_get_current_comm(new.trigger_comm, sizeof(new.trigger_comm)); } - BPF_CORE_READ_INTO(&new.tcomm, p, comm); - new.tcomm[TASK_COMM_LEN - 1] = 0; + BPF_CORE_READ_INTO(&new.victim_comm, p, comm); + new.victim_comm[TASK_COMM_LEN - 1] = 0; struct mem_cgroup *memcg = NULL; #ifdef COMPILE_CORE diff --git a/pkg/collector/corechecks/ebpf/oomkill/oom_kill.go b/pkg/collector/corechecks/ebpf/oomkill/oom_kill.go index 75797cb5352a0..f7ff6312e769b 100644 --- a/pkg/collector/corechecks/ebpf/oomkill/oom_kill.go +++ b/pkg/collector/corechecks/ebpf/oomkill/oom_kill.go @@ -132,8 +132,8 @@ func (m *OOMKillCheck) Run() error { triggerTypeText = "This OOM kill was invoked by the system." } tags = append(tags, "trigger_type:"+triggerType) - tags = append(tags, "trigger_process_name:"+line.FComm) - tags = append(tags, "process_name:"+line.TComm) + tags = append(tags, "trigger_process_name:"+line.TriggerComm) + tags = append(tags, "process_name:"+line.VictimComm) // submit counter metric sender.Count("oom_kill.oom_process.count", 1, "", tags) @@ -145,7 +145,7 @@ func (m *OOMKillCheck) Run() error { SourceTypeName: CheckName, EventType: CheckName, AggregationKey: containerID, - Title: fmt.Sprintf("Process OOM Killed: oom_kill_process called on %s (pid: %d)", line.TComm, line.TPid), + Title: fmt.Sprintf("Process OOM Killed: oom_kill_process called on %s (pid: %d)", line.VictimComm, line.VictimPid), Tags: tags, } @@ -155,10 +155,10 @@ func (m *OOMKillCheck) Run() error { if line.ScoreAdj != 0 { oomScoreAdj = fmt.Sprintf(", oom_score_adj: %d", line.ScoreAdj) } - if line.Pid == line.TPid { - fmt.Fprintf(&b, "Process `%s` (pid: %d, oom_score: %d%s) triggered an OOM kill on itself.", line.FComm, line.Pid, line.Score, oomScoreAdj) + if line.VictimPid == line.TriggerPid { + fmt.Fprintf(&b, "Process `%s` (pid: %d, oom_score: %d%s) triggered an OOM kill on itself.", line.VictimComm, line.VictimPid, line.Score, oomScoreAdj) } else { - fmt.Fprintf(&b, "Process `%s` (pid: %d) triggered an OOM kill on process `%s` (pid: %d, oom_score: %d%s).", line.FComm, line.Pid, line.TComm, line.TPid, line.Score, oomScoreAdj) + fmt.Fprintf(&b, "Process `%s` (pid: %d) triggered an OOM kill on process `%s` (pid: %d, oom_score: %d%s).", line.TriggerComm, line.TriggerPid, line.VictimComm, line.VictimPid, line.Score, oomScoreAdj) } fmt.Fprintf(&b, "\n The process had reached %d pages in size. \n\n", line.Pages) b.WriteString(triggerTypeText) diff --git a/pkg/collector/corechecks/ebpf/probe/oomkill/c_types_linux.go b/pkg/collector/corechecks/ebpf/probe/oomkill/c_types_linux.go index 062036afd3cb7..9637c6f16adcf 100644 --- a/pkg/collector/corechecks/ebpf/probe/oomkill/c_types_linux.go +++ b/pkg/collector/corechecks/ebpf/probe/oomkill/c_types_linux.go @@ -4,14 +4,14 @@ package oomkill type oomStats struct { - Cgroup_name [129]byte - Pid uint32 - Tpid uint32 - Fcomm [16]byte - Tcomm [16]byte - Score int64 - Score_adj int16 - Pages uint64 - Memcg_oom uint32 - Pad_cgo_0 [4]byte + Cgroup_name [129]byte + Victim_pid uint32 + Trigger_pid uint32 + Victim_comm [16]byte + Trigger_comm [16]byte + Score int64 + Score_adj int16 + Pages uint64 + Memcg_oom uint32 + Pad_cgo_0 [4]byte } diff --git a/pkg/collector/corechecks/ebpf/probe/oomkill/model/oom_kill_types.go b/pkg/collector/corechecks/ebpf/probe/oomkill/model/oom_kill_types.go index 42272d27dbf7f..f065837bb3094 100644 --- a/pkg/collector/corechecks/ebpf/probe/oomkill/model/oom_kill_types.go +++ b/pkg/collector/corechecks/ebpf/probe/oomkill/model/oom_kill_types.go @@ -8,13 +8,13 @@ package model // OOMKillStats contains the statistics of a given socket type OOMKillStats struct { - CgroupName string `json:"cgroupName"` - Pid uint32 `json:"pid"` - TPid uint32 `json:"tpid"` - FComm string `json:"fcomm"` - TComm string `json:"tcomm"` - Score int64 `json:"score"` - ScoreAdj int16 `json:"scoreAdj"` - Pages uint64 `json:"pages"` - MemCgOOM uint32 `json:"memcgoom"` + CgroupName string `json:"cgroupName"` + VictimPid uint32 `json:"victimPid"` + TriggerPid uint32 `json:"triggerPid"` + VictimComm string `json:"victimComm"` + TriggerComm string `json:"triggerComm"` + Score int64 `json:"score"` + ScoreAdj int16 `json:"scoreAdj"` + Pages uint64 `json:"pages"` + MemCgOOM uint32 `json:"memcgoom"` } diff --git a/pkg/collector/corechecks/ebpf/probe/oomkill/oom_kill.go b/pkg/collector/corechecks/ebpf/probe/oomkill/oom_kill.go index fc9e27a78a31a..e277b3f48381b 100644 --- a/pkg/collector/corechecks/ebpf/probe/oomkill/oom_kill.go +++ b/pkg/collector/corechecks/ebpf/probe/oomkill/oom_kill.go @@ -34,7 +34,7 @@ const oomMapName = "oom_stats" // Probe is the eBPF side of the OOM Kill check type Probe struct { m *manager.Manager - oomMap *maps.GenericMap[uint32, oomStats] + oomMap *maps.GenericMap[uint64, oomStats] } // NewProbe creates a [Probe] @@ -117,7 +117,7 @@ func startOOMKillProbe(buf bytecode.AssetReader, managerOptions manager.Options) return nil, fmt.Errorf("failed to start manager: %w", err) } - oomMap, err := maps.GetMap[uint32, oomStats](m, oomMapName) + oomMap, err := maps.GetMap[uint64, oomStats](m, oomMapName) if err != nil { return nil, fmt.Errorf("failed to get map '%s': %w", oomMapName, err) } @@ -139,19 +139,21 @@ func (k *Probe) Close() { // GetAndFlush gets the stats func (k *Probe) GetAndFlush() (results []model.OOMKillStats) { - var pid uint32 + var allTimestamps []uint64 + var ts uint64 var stat oomStats it := k.oomMap.Iterate() - for it.Next(&pid, &stat) { + for it.Next(&ts, &stat) { results = append(results, convertStats(stat)) + allTimestamps = append(allTimestamps, ts) } if err := it.Err(); err != nil { log.Warnf("failed to iterate on OOM stats while flushing: %s", err) } - for _, r := range results { - if err := k.oomMap.Delete(&r.Pid); err != nil { + for _, ts := range allTimestamps { + if err := k.oomMap.Delete(&ts); err != nil { log.Warnf("failed to delete stat: %s", err) } } @@ -161,12 +163,12 @@ func (k *Probe) GetAndFlush() (results []model.OOMKillStats) { func convertStats(in oomStats) (out model.OOMKillStats) { out.CgroupName = unix.ByteSliceToString(in.Cgroup_name[:]) - out.Pid = in.Pid - out.TPid = in.Tpid + out.VictimPid = in.Victim_pid + out.TriggerPid = in.Trigger_pid out.Score = in.Score out.ScoreAdj = in.Score_adj - out.FComm = unix.ByteSliceToString(in.Fcomm[:]) - out.TComm = unix.ByteSliceToString(in.Tcomm[:]) + out.VictimComm = unix.ByteSliceToString(in.Victim_comm[:]) + out.TriggerComm = unix.ByteSliceToString(in.Trigger_comm[:]) out.Pages = in.Pages out.MemCgOOM = in.Memcg_oom return diff --git a/pkg/collector/corechecks/ebpf/probe/oomkill/oom_kill_test.go b/pkg/collector/corechecks/ebpf/probe/oomkill/oom_kill_test.go index 96f7b67bd043b..02e76e112b679 100644 --- a/pkg/collector/corechecks/ebpf/probe/oomkill/oom_kill_test.go +++ b/pkg/collector/corechecks/ebpf/probe/oomkill/oom_kill_test.go @@ -92,7 +92,7 @@ func TestOOMKillProbe(t *testing.T) { var result model.OOMKillStats require.Eventually(t, func() bool { for _, r := range oomKillProbe.GetAndFlush() { - if r.TPid == uint32(cmd.Process.Pid) { + if r.TriggerPid == uint32(cmd.Process.Pid) { result = r return true } @@ -101,11 +101,11 @@ func TestOOMKillProbe(t *testing.T) { }, 10*time.Second, 500*time.Millisecond, "failed to find an OOM killed process with pid %d", cmd.Process.Pid) assert.Regexp(t, regexp.MustCompile("run-([0-9|a-z]*).scope"), result.CgroupName, "cgroup name") - assert.Equal(t, result.TPid, result.Pid, "tpid == pid") + assert.Equal(t, result.TriggerPid, result.VictimPid, "tpid == pid") assert.NotZero(t, result.Score, "score") assert.Equal(t, int16(42), result.ScoreAdj, "score adj") - assert.Equal(t, "dd", result.FComm, "fcomm") - assert.Equal(t, "dd", result.TComm, "tcomm") + assert.Equal(t, "dd", result.VictimComm, "victim comm") + assert.Equal(t, "dd", result.TriggerComm, "trigger comm") assert.NotZero(t, result.Pages, "pages") assert.Equal(t, uint32(1), result.MemCgOOM, "memcg oom") }) diff --git a/pkg/ebpf/cgo/genpost.go b/pkg/ebpf/cgo/genpost.go index 65c357c2e6623..feb69e6ac5e04 100644 --- a/pkg/ebpf/cgo/genpost.go +++ b/pkg/ebpf/cgo/genpost.go @@ -25,7 +25,7 @@ func main() { // Convert []int8 to []byte in multiple generated fields from the kernel, to simplify // conversion to string; see golang.org/issue/20753 - convertInt8ArrayToByteArrayRegex := regexp.MustCompile(`(Request_fragment|Topic_name|Buf|Cgroup|RemoteAddr|LocalAddr|Cgroup_name|Fcomm|Tcomm)(\s+)\[(\d+)\]u?int8`) + convertInt8ArrayToByteArrayRegex := regexp.MustCompile(`(Request_fragment|Topic_name|Buf|Cgroup|RemoteAddr|LocalAddr|Cgroup_name|Victim_comm|Trigger_comm)(\s+)\[(\d+)\]u?int8`) b = convertInt8ArrayToByteArrayRegex.ReplaceAll(b, []byte("$1$2[$3]byte")) b, err = format.Source(b) diff --git a/releasenotes/notes/oom-kill-cgroup-victim-2aa7ca4e8e3ffac2.yaml b/releasenotes/notes/oom-kill-cgroup-victim-2aa7ca4e8e3ffac2.yaml new file mode 100644 index 0000000000000..e5bf2a5ef0573 --- /dev/null +++ b/releasenotes/notes/oom-kill-cgroup-victim-2aa7ca4e8e3ffac2.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + OOM Kill Check now reports the cgroup name of the victim process rather than the triggering process. From f627a34c87c60b6a606edb8e90c7c7500e00302d Mon Sep 17 00:00:00 2001 From: Bryce Kahle Date: Fri, 6 Sep 2024 12:24:54 -0700 Subject: [PATCH 069/128] remove unneeded Address functions (#28175) --- pkg/ebpf/cgo/genpost.go | 2 +- pkg/network/dns/cache_test.go | 3 +- pkg/network/driver/types_windows.go | 4 +- pkg/network/event_common.go | 11 ++--- pkg/network/event_windows.go | 10 ++-- pkg/network/gateway_lookup_linux.go | 2 +- pkg/network/nat.go | 4 +- pkg/network/netlink/conntracker.go | 29 +++--------- pkg/network/netlink/decoding.go | 4 +- .../protocols/http/etw_http_service.go | 2 +- pkg/network/protocols/http/model_linux.go | 2 +- .../protocols/http/statkeeper_test_linux.go | 4 +- .../protocols/http/statkeeper_test_windows.go | 5 +- .../protocols/kafka/statkeeper_test.go | 4 +- .../protocols/postgres/model_linux_test.go | 2 +- pkg/network/route_cache.go | 6 +-- pkg/network/tracer/cached_conntrack.go | 13 +---- .../tracer/connection/util/conn_tracer.go | 4 +- pkg/process/util/address.go | 47 +------------------ pkg/process/util/address_test.go | 43 ----------------- pkg/util/winutil/iphelper/routes.go | 17 ------- 21 files changed, 43 insertions(+), 175 deletions(-) diff --git a/pkg/ebpf/cgo/genpost.go b/pkg/ebpf/cgo/genpost.go index feb69e6ac5e04..f280dbe88ad73 100644 --- a/pkg/ebpf/cgo/genpost.go +++ b/pkg/ebpf/cgo/genpost.go @@ -25,7 +25,7 @@ func main() { // Convert []int8 to []byte in multiple generated fields from the kernel, to simplify // conversion to string; see golang.org/issue/20753 - convertInt8ArrayToByteArrayRegex := regexp.MustCompile(`(Request_fragment|Topic_name|Buf|Cgroup|RemoteAddr|LocalAddr|Cgroup_name|Victim_comm|Trigger_comm)(\s+)\[(\d+)\]u?int8`) + convertInt8ArrayToByteArrayRegex := regexp.MustCompile(`(Request_fragment|Topic_name|Buf|Cgroup|RemoteAddr|LocalAddr|Cgroup_name|Victim_comm|Trigger_comm|LocalAddress|RemoteAddress)(\s+)\[(\d+)\]u?int8`) b = convertInt8ArrayToByteArrayRegex.ReplaceAll(b, []byte("$1$2[$3]byte")) b, err = format.Source(b) diff --git a/pkg/network/dns/cache_test.go b/pkg/network/dns/cache_test.go index bb025deec57e8..b56e0359b6591 100644 --- a/pkg/network/dns/cache_test.go +++ b/pkg/network/dns/cache_test.go @@ -11,6 +11,7 @@ import ( cryptorand "crypto/rand" "fmt" "math/rand" + "net/netip" "strings" "testing" "time" @@ -329,7 +330,7 @@ func randomAddressGen() func() util.Address { continue } - return util.V4AddressFromBytes(b) + return util.Address{Addr: netip.AddrFrom4([4]byte(b))} } } } diff --git a/pkg/network/driver/types_windows.go b/pkg/network/driver/types_windows.go index 184464cd0965f..1dbeea0cb9197 100644 --- a/pkg/network/driver/types_windows.go +++ b/pkg/network/driver/types_windows.go @@ -114,8 +114,8 @@ type PerFlowData struct { AddressFamily uint16 Protocol uint16 Flags uint32 - LocalAddress [16]uint8 - RemoteAddress [16]uint8 + LocalAddress [16]byte + RemoteAddress [16]byte PacketsOut uint64 MonotonicSentBytes uint64 TransportBytesOut uint64 diff --git a/pkg/network/event_common.go b/pkg/network/event_common.go index 6908b02965048..990c9f40797b7 100644 --- a/pkg/network/event_common.go +++ b/pkg/network/event_common.go @@ -10,6 +10,7 @@ package network import ( "encoding/binary" "fmt" + "net/netip" "strings" "time" @@ -353,10 +354,8 @@ const keyFmt = "p:%d|src:%s:%d|dst:%s:%d|f:%d|t:%d" // Note: This is only used in /debug/* endpoints func BeautifyKey(key string) string { bytesToAddress := func(buf []byte) util.Address { - if len(buf) == 4 { - return util.V4AddressFromBytes(buf) - } - return util.V6AddressFromBytes(buf) + addr, _ := netip.AddrFromSlice(buf) + return util.Address{Addr: addr} } raw := []byte(key) @@ -464,8 +463,8 @@ func generateConnectionKey(c ConnectionStats, buf []byte, useNAT bool) []byte { buf[n] = uint8(c.Family)<<4 | uint8(c.Type) n++ - n += laddr.WriteTo(buf[n:]) // 4 or 16 bytes - n += raddr.WriteTo(buf[n:]) // 4 or 16 bytes + n += copy(buf[n:], laddr.AsSlice()) // 4 or 16 bytes + n += copy(buf[n:], raddr.AsSlice()) // 4 or 16 bytes return buf[:n] } diff --git a/pkg/network/event_windows.go b/pkg/network/event_windows.go index ff9bed37d33f9..54ad5399f8c63 100644 --- a/pkg/network/event_windows.go +++ b/pkg/network/event_windows.go @@ -8,7 +8,7 @@ package network import ( - "net" + "net/netip" "syscall" "github.com/DataDog/datadog-agent/pkg/network/driver" @@ -57,14 +57,14 @@ func isTCPFlowEstablished(flow *driver.PerFlowData) bool { return false } -func convertV4Addr(addr [16]uint8) util.Address { +func convertV4Addr(addr [16]byte) util.Address { // We only read the first 4 bytes for v4 address - return util.V4AddressFromBytes(addr[:net.IPv4len]) + return util.Address{Addr: netip.AddrFrom4([4]byte(addr[:]))} } -func convertV6Addr(addr [16]uint8) util.Address { +func convertV6Addr(addr [16]byte) util.Address { // We read all 16 bytes for v6 address - return util.V6AddressFromBytes(addr[:net.IPv6len]) + return util.Address{Addr: netip.AddrFrom16(addr)} } // Monotonic values include retransmits and headers, while transport does not. We default to using transport diff --git a/pkg/network/gateway_lookup_linux.go b/pkg/network/gateway_lookup_linux.go index 48657b61c1ece..a3131f40fc085 100644 --- a/pkg/network/gateway_lookup_linux.go +++ b/pkg/network/gateway_lookup_linux.go @@ -131,7 +131,7 @@ func (g *gatewayLookup) LookupWithIPs(source util.Address, dest util.Address, ne // if there is no gateway, we don't need to add subnet info // for gateway resolution in the backend - if r.Gateway.IsZero() || r.Gateway.IsUnspecified() { + if !r.Gateway.IsValid() || r.Gateway.IsUnspecified() { return nil } diff --git a/pkg/network/nat.go b/pkg/network/nat.go index ef5194b23ffd5..e686c65ea7d83 100644 --- a/pkg/network/nat.go +++ b/pkg/network/nat.go @@ -14,7 +14,7 @@ func GetNATLocalAddress(c ConnectionStats) (util.Address, uint16) { localIP := c.Source localPort := c.SPort - if c.IPTranslation != nil && !c.IPTranslation.ReplDstIP.IsZero() { + if c.IPTranslation != nil && c.IPTranslation.ReplDstIP.IsValid() { // Fields are flipped localIP = c.IPTranslation.ReplDstIP localPort = c.IPTranslation.ReplDstPort @@ -27,7 +27,7 @@ func GetNATRemoteAddress(c ConnectionStats) (util.Address, uint16) { remoteIP := c.Dest remotePort := c.DPort - if c.IPTranslation != nil && !c.IPTranslation.ReplSrcIP.IsZero() { + if c.IPTranslation != nil && c.IPTranslation.ReplSrcIP.IsValid() { // Fields are flipped remoteIP = c.IPTranslation.ReplSrcIP remotePort = c.IPTranslation.ReplSrcPort diff --git a/pkg/network/netlink/conntracker.go b/pkg/network/netlink/conntracker.go index 9f066d2f8cb97..885e4f5900b82 100644 --- a/pkg/network/netlink/conntracker.go +++ b/pkg/network/netlink/conntracker.go @@ -12,7 +12,6 @@ import ( "context" "errors" "fmt" - "net" "net/netip" "sync" "time" @@ -194,8 +193,8 @@ func (ctr *realConntracker) GetTranslationForConn(c *network.ConnectionStats) *n defer ctr.Unlock() k := connKey{ - src: netip.AddrPortFrom(ipFromAddr(c.Source), c.SPort), - dst: netip.AddrPortFrom(ipFromAddr(c.Dest), c.DPort), + src: netip.AddrPortFrom(c.Source.Addr, c.SPort), + dst: netip.AddrPortFrom(c.Dest.Addr, c.DPort), transport: c.Type, } @@ -226,8 +225,8 @@ func (ctr *realConntracker) DeleteTranslation(c *network.ConnectionStats) { defer ctr.Unlock() k := connKey{ - src: netip.AddrPortFrom(ipFromAddr(c.Source), c.SPort), - dst: netip.AddrPortFrom(ipFromAddr(c.Dest), c.DPort), + src: netip.AddrPortFrom(c.Source.Addr, c.SPort), + dst: netip.AddrPortFrom(c.Dest.Addr, c.DPort), transport: c.Type, } @@ -453,29 +452,13 @@ func IsNAT(c Con) bool { func formatIPTranslation(tuple *ConTuple) *network.IPTranslation { return &network.IPTranslation{ - ReplSrcIP: addrFromIP(tuple.Src.Addr()), - ReplDstIP: addrFromIP(tuple.Dst.Addr()), + ReplSrcIP: util.Address{Addr: tuple.Src.Addr().Unmap()}, + ReplDstIP: util.Address{Addr: tuple.Dst.Addr().Unmap()}, ReplSrcPort: tuple.Src.Port(), ReplDstPort: tuple.Dst.Port(), } } -func addrFromIP(ip netip.Addr) util.Address { - if ip.Is6() && !ip.Is4In6() { - b := ip.As16() - return util.V6AddressFromBytes(b[:]) - } - b := ip.As4() - return util.V4AddressFromBytes(b[:]) -} - -func ipFromAddr(a util.Address) netip.Addr { - if a.Len() == net.IPv6len { - return netip.AddrFrom16(*(*[16]byte)(a.Bytes())) - } - return netip.AddrFrom4(*(*[4]byte)(a.Bytes())) -} - func formatKey(tuple *ConTuple) (k connKey, ok bool) { ok = true k.src = tuple.Src diff --git a/pkg/network/netlink/decoding.go b/pkg/network/netlink/decoding.go index 003420fb3ca90..aff7d90466fd6 100644 --- a/pkg/network/netlink/decoding.go +++ b/pkg/network/netlink/decoding.go @@ -201,12 +201,12 @@ func ipv4(b []byte) (netip.Addr, error) { if len(b) != 4 { return netip.Addr{}, fmt.Errorf("invalid IPv4 size") } - return netip.AddrFrom4(*(*[4]byte)(b)), nil + return netip.AddrFrom4([4]byte(b)), nil } func ipv6(b []byte) (netip.Addr, error) { if len(b) != 16 { return netip.Addr{}, fmt.Errorf("invalid IPv6 size") } - return netip.AddrFrom16(*(*[16]byte)(b)), nil + return netip.AddrFrom16([16]byte(b)), nil } diff --git a/pkg/network/protocols/http/etw_http_service.go b/pkg/network/protocols/http/etw_http_service.go index ee913ae4b7c8d..8225553e059d3 100644 --- a/pkg/network/protocols/http/etw_http_service.go +++ b/pkg/network/protocols/http/etw_http_service.go @@ -1472,7 +1472,7 @@ func ipAndPortFromTup(tup driver.ConnTupleType, local bool) ([16]uint8, uint16) } func ip4format(ip [16]uint8) string { - ipObj := netip.AddrFrom4(*(*[4]byte)(ip[:4])) + ipObj := netip.AddrFrom4([4]byte(ip[:])) return ipObj.String() } diff --git a/pkg/network/protocols/http/model_linux.go b/pkg/network/protocols/http/model_linux.go index 4855bee8a00af..75034b59fb921 100644 --- a/pkg/network/protocols/http/model_linux.go +++ b/pkg/network/protocols/http/model_linux.go @@ -110,7 +110,7 @@ func (e *EbpfEvent) String() string { func requestFragment(fragment []byte) [BufferSize]byte { if len(fragment) >= BufferSize { - return *(*[BufferSize]byte)(fragment) + return [BufferSize]byte(fragment) } var b [BufferSize]byte copy(b[:], fragment) diff --git a/pkg/network/protocols/http/statkeeper_test_linux.go b/pkg/network/protocols/http/statkeeper_test_linux.go index ba8f1aa4308a8..3a4c59dfeb675 100644 --- a/pkg/network/protocols/http/statkeeper_test_linux.go +++ b/pkg/network/protocols/http/statkeeper_test_linux.go @@ -25,9 +25,9 @@ func generateIPv4HTTPTransaction(source util.Address, dest util.Address, sourceP event.Http.Response_last_seen = event.Http.Request_started + latencyNS event.Http.Response_status_code = uint16(code) event.Http.Request_fragment = requestFragment([]byte(reqFragment)) - event.Tuple.Saddr_l = uint64(binary.LittleEndian.Uint32(source.Bytes())) + event.Tuple.Saddr_l = uint64(binary.LittleEndian.Uint32(source.Unmap().AsSlice())) event.Tuple.Sport = uint16(sourcePort) - event.Tuple.Daddr_l = uint64(binary.LittleEndian.Uint32(dest.Bytes())) + event.Tuple.Daddr_l = uint64(binary.LittleEndian.Uint32(dest.Unmap().AsSlice())) event.Tuple.Dport = uint16(destPort) event.Tuple.Metadata = 1 diff --git a/pkg/network/protocols/http/statkeeper_test_windows.go b/pkg/network/protocols/http/statkeeper_test_windows.go index ea4fdc13f00bb..8516d667d41dd 100644 --- a/pkg/network/protocols/http/statkeeper_test_windows.go +++ b/pkg/network/protocols/http/statkeeper_test_windows.go @@ -25,11 +25,10 @@ func generateIPv4HTTPTransaction(source util.Address, dest util.Address, sourceP tx.Txn.ResponseStatusCode = uint16(code) tx.RequestFragment = []byte(reqFragment) - source.WriteTo(tx.Txn.Tup.RemoteAddr[:]) - + copy(tx.Txn.Tup.RemoteAddr[:], source.AsSlice()) tx.Txn.Tup.RemotePort = uint16(sourcePort) - dest.WriteTo(tx.Txn.Tup.LocalAddr[:]) + copy(tx.Txn.Tup.LocalAddr[:], dest.AsSlice()) tx.Txn.Tup.LocalPort = uint16(destPort) return &tx diff --git a/pkg/network/protocols/kafka/statkeeper_test.go b/pkg/network/protocols/kafka/statkeeper_test.go index dc99ff01902d3..21170f2ec8d92 100644 --- a/pkg/network/protocols/kafka/statkeeper_test.go +++ b/pkg/network/protocols/kafka/statkeeper_test.go @@ -135,9 +135,9 @@ func generateKafkaTransaction(source util.Address, dest util.Address, sourcePort event.Transaction.Records_count = recordsCount event.Transaction.Topic_name_size = uint8(len(topicName)) event.Transaction.Topic_name = topicNameFromString([]byte(topicName)) - event.Tup.Saddr_l = uint64(binary.LittleEndian.Uint32(source.Bytes())) + event.Tup.Saddr_l = uint64(binary.LittleEndian.Uint32(source.Unmap().AsSlice())) event.Tup.Sport = uint16(sourcePort) - event.Tup.Daddr_l = uint64(binary.LittleEndian.Uint32(dest.Bytes())) + event.Tup.Daddr_l = uint64(binary.LittleEndian.Uint32(dest.Unmap().AsSlice())) event.Tup.Dport = uint16(destPort) event.Tup.Metadata = 1 diff --git a/pkg/network/protocols/postgres/model_linux_test.go b/pkg/network/protocols/postgres/model_linux_test.go index f3888f9945c7e..da81378a4928a 100644 --- a/pkg/network/protocols/postgres/model_linux_test.go +++ b/pkg/network/protocols/postgres/model_linux_test.go @@ -77,7 +77,7 @@ func BenchmarkExtractTableName(b *testing.B) { func requestFragment(fragment []byte) [ebpf.BufferSize]byte { if len(fragment) >= ebpf.BufferSize { - return *(*[ebpf.BufferSize]byte)(fragment) + return [ebpf.BufferSize]byte(fragment) } var b [ebpf.BufferSize]byte copy(b[:], fragment) diff --git a/pkg/network/route_cache.go b/pkg/network/route_cache.go index 3537a4c2678f5..44a675b0f649b 100644 --- a/pkg/network/route_cache.go +++ b/pkg/network/route_cache.go @@ -169,10 +169,10 @@ func (c *routeCache) Get(source, dest util.Address, netns uint32) (Route, bool) func newRouteKey(source, dest util.Address, netns uint32) routeKey { k := routeKey{netns: netns, source: source, dest: dest} - switch dest.Len() { - case 4: + switch { + case dest.Is4(): k.connFamily = AFINET - case 16: + case dest.Is6(): k.connFamily = AFINET6 } return k diff --git a/pkg/network/tracer/cached_conntrack.go b/pkg/network/tracer/cached_conntrack.go index 51c691423f840..de4f6cec213f1 100644 --- a/pkg/network/tracer/cached_conntrack.go +++ b/pkg/network/tracer/cached_conntrack.go @@ -10,7 +10,6 @@ package tracer import ( "errors" "fmt" - "net" "net/netip" "os" "sync" @@ -21,7 +20,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/network" "github.com/DataDog/datadog-agent/pkg/network/netlink" - "github.com/DataDog/datadog-agent/pkg/process/util" "github.com/DataDog/datadog-agent/pkg/util/kernel" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -65,13 +63,6 @@ func (cache *cachedConntrack) Exists(c *network.ConnectionStats) (bool, error) { return cache.exists(c, c.NetNS, int(c.Pid)) } -func ipFromAddr(a util.Address) netip.Addr { - if a.Len() == net.IPv6len { - return netip.AddrFrom16(*(*[16]byte)(a.Bytes())) - } - return netip.AddrFrom4(*(*[4]byte)(a.Bytes())) -} - func (cache *cachedConntrack) exists(c *network.ConnectionStats, netns uint32, pid int) (bool, error) { ctrk, err := cache.ensureConntrack(uint64(netns), pid) if err != nil { @@ -89,8 +80,8 @@ func (cache *cachedConntrack) exists(c *network.ConnectionStats, netns uint32, p conn := netlink.Con{ Origin: netlink.ConTuple{ - Src: netip.AddrPortFrom(ipFromAddr(c.Source), c.SPort), - Dst: netip.AddrPortFrom(ipFromAddr(c.Dest), c.DPort), + Src: netip.AddrPortFrom(c.Source.Unmap(), c.SPort), + Dst: netip.AddrPortFrom(c.Dest.Unmap(), c.DPort), Proto: protoNumber, }, } diff --git a/pkg/network/tracer/connection/util/conn_tracer.go b/pkg/network/tracer/connection/util/conn_tracer.go index b97bf1272ae65..724e60ec4c4eb 100644 --- a/pkg/network/tracer/connection/util/conn_tracer.go +++ b/pkg/network/tracer/connection/util/conn_tracer.go @@ -163,10 +163,10 @@ func ConnStatsToTuple(c *network.ConnectionStats, tup *netebpf.ConnTuple) { } else { tup.SetType(netebpf.UDP) } - if !c.Source.IsZero() { + if c.Source.IsValid() { tup.Saddr_l, tup.Saddr_h = util.ToLowHigh(c.Source) } - if !c.Dest.IsZero() { + if c.Dest.IsValid() { tup.Daddr_l, tup.Daddr_h = util.ToLowHigh(c.Dest) } } diff --git a/pkg/process/util/address.go b/pkg/process/util/address.go index 15dde9f118d23..c98964820a071 100644 --- a/pkg/process/util/address.go +++ b/pkg/process/util/address.go @@ -20,41 +20,6 @@ type Address struct { netip.Addr } -// WriteTo writes the address byte representation into the supplied buffer -func (a Address) WriteTo(b []byte) int { - if a.Is4() { - v := a.As4() - return copy(b, v[:]) - } - - v := a.As16() - return copy(b, v[:]) - -} - -// Bytes returns a byte slice representing the Address. -// You may want to consider using `WriteTo` instead to avoid allocations -func (a Address) Bytes() []byte { - // Note: this implicitly converts IPv4-in-6 to IPv4 - if a.Is4() || a.Is4In6() { - v := a.As4() - return v[:] - } - - v := a.As16() - return v[:] -} - -// Len returns the number of bytes required to represent this IP -func (a Address) Len() int { - return int(a.BitLen()) / 8 -} - -// IsZero reports whether a is its zero value -func (a Address) IsZero() bool { - return a.Addr == netip.Addr{} -} - // AddressFromNetIP returns an Address from a provided net.IP func AddressFromNetIP(ip net.IP) Address { addr, _ := netipx.FromStdIP(ip) @@ -71,7 +36,7 @@ func AddressFromString(s string) Address { // Warning: the returned `net.IP` will share the same underlying // memory as the given `buf` argument. func NetIPFromAddress(addr Address, buf []byte) net.IP { - n := addr.WriteTo(buf) + n := copy(buf, addr.AsSlice()) return net.IP(buf[:n]) } @@ -115,11 +80,6 @@ func V4Address(ip uint32) Address { } } -// V4AddressFromBytes creates an Address using the byte representation of an v4 IP -func V4AddressFromBytes(buf []byte) Address { - return Address{netip.AddrFrom4(*(*[4]byte)(buf))} -} - // V6Address creates an Address using the uint128 representation of an v6 IP func V6Address(low, high uint64) Address { var a [16]byte @@ -128,10 +88,5 @@ func V6Address(low, high uint64) Address { return Address{netip.AddrFrom16(a)} } -// V6AddressFromBytes creates an Address using the byte representation of an v6 IP -func V6AddressFromBytes(buf []byte) Address { - return Address{netip.AddrFrom16(*(*[16]byte)(buf))} -} - // IPBufferPool is meant to be used in conjunction with `NetIPFromAddress` var IPBufferPool = ddsync.NewSlicePool[byte](net.IPv6len, net.IPv6len) diff --git a/pkg/process/util/address_test.go b/pkg/process/util/address_test.go index bfbf57c312efc..a1de755961b49 100644 --- a/pkg/process/util/address_test.go +++ b/pkg/process/util/address_test.go @@ -88,22 +88,16 @@ func TestAddressUsageInMaps(t *testing.T) { func TestAddressV4(t *testing.T) { addr := V4Address(889192575) - // Should be able to recreate addr from bytes alone - assert.Equal(t, addr, V4AddressFromBytes(addr.Bytes())) // Should be able to recreate addr from IP string assert.Equal(t, addr, AddressFromString("127.0.0.53")) assert.Equal(t, "127.0.0.53", addr.String()) addr = V4Address(0) - // Should be able to recreate addr from bytes alone - assert.Equal(t, addr, V4AddressFromBytes(addr.Bytes())) // Should be able to recreate addr from IP string assert.Equal(t, addr, AddressFromString("0.0.0.0")) assert.Equal(t, "0.0.0.0", addr.String()) addr = V4Address(16820416) - // Should be able to recreate addr from bytes alone - assert.Equal(t, addr, V4AddressFromBytes(addr.Bytes())) // Should be able to recreate addr from IP string assert.Equal(t, addr, AddressFromString("192.168.0.1")) assert.Equal(t, "192.168.0.1", addr.String()) @@ -111,31 +105,23 @@ func TestAddressV4(t *testing.T) { func TestAddressV6(t *testing.T) { addr := V6Address(889192575, 0) - // Should be able to recreate addr from bytes alone - assert.Equal(t, addr, V6AddressFromBytes(addr.Bytes())) // Should be able to recreate addr from IP string assert.Equal(t, addr, AddressFromString("::7f00:35:0:0")) assert.Equal(t, "::7f00:35:0:0", addr.String()) assert.False(t, addr.IsLoopback()) addr = V6Address(0, 0) - // Should be able to recreate addr from bytes alone - assert.Equal(t, addr, V6AddressFromBytes(addr.Bytes())) // Should be able to recreate addr from IP string assert.Equal(t, addr, AddressFromString("::")) assert.Equal(t, "::", addr.String()) addr = V6Address(72057594037927936, 0) - // Should be able to recreate addr from bytes alone - assert.Equal(t, addr, V6AddressFromBytes(addr.Bytes())) // Should be able to recreate addr from IP string assert.Equal(t, addr, AddressFromString("::1")) assert.Equal(t, "::1", addr.String()) assert.True(t, addr.IsLoopback()) addr = V6Address(72059793061183488, 3087860000) - // Should be able to recreate addr from bytes alone - assert.Equal(t, addr, V6AddressFromBytes(addr.Bytes())) // Should be able to recreate addr from IP string assert.Equal(t, addr, AddressFromString("2001:db8::2:1")) assert.Equal(t, "2001:db8::2:1", addr.String()) @@ -176,35 +162,6 @@ func BenchmarkV6Address(b *testing.B) { runtime.KeepAlive(addr) } -func BenchmarkBytes(b *testing.B) { - var ( - addr = AddressFromString("8.8.8.8") - bytes []byte - ) - b.ReportAllocs() - b.ResetTimer() - - for i := 0; i < b.N; i++ { - // this allocates a slice descriptor that escapes to the heap - bytes = addr.Bytes() - } - runtime.KeepAlive(bytes) -} - -func BenchmarkWriteTo(b *testing.B) { - addr := AddressFromString("8.8.8.8") - bytes := make([]byte, 4) - b.ReportAllocs() - b.ResetTimer() - - for i := 0; i < b.N; i++ { - // this method shouldn't allocate - _ = addr.WriteTo(bytes) - bytes = bytes[:0] - } - runtime.KeepAlive(bytes) -} - func BenchmarkToLowHigh(b *testing.B) { addr := AddressFromString("8.8.8.8") var l, h uint64 diff --git a/pkg/util/winutil/iphelper/routes.go b/pkg/util/winutil/iphelper/routes.go index 2de67ec872cae..08ca4f956dfe0 100644 --- a/pkg/util/winutil/iphelper/routes.go +++ b/pkg/util/winutil/iphelper/routes.go @@ -198,20 +198,3 @@ func GetIFTable() (table map[uint32]windows.MibIfRow, err error) { return table, nil } - -// Ntohs converts a network byte order 16 bit int to host byte order -func Ntohs(i uint16) uint16 { - return binary.BigEndian.Uint16((*(*[2]byte)(unsafe.Pointer(&i)))[:]) -} - -// Ntohl converts a network byte order 32 bit int to host byte order -func Ntohl(i uint32) uint32 { - return binary.BigEndian.Uint32((*(*[4]byte)(unsafe.Pointer(&i)))[:]) -} - -// Htonl converts a host byte order 32 bit int to network byte order -func Htonl(i uint32) uint32 { - b := make([]byte, 4) - binary.BigEndian.PutUint32(b, i) - return *(*uint32)(unsafe.Pointer(&b[0])) -} From bb0760ff706a9b5cae63794a1173c5cfc913b94f Mon Sep 17 00:00:00 2001 From: Caleb Metz <135133572+cmetz100@users.noreply.github.com> Date: Fri, 6 Sep 2024 16:08:24 -0400 Subject: [PATCH 070/128] Enable sub agent tagging of target metrics collected by lading for the idle agent SMP experiment (#29123) Signed-off-by: Caleb Metz --- test/regression/cases/idle/lading/lading.yaml | 167 +++++++++++++++++- 1 file changed, 166 insertions(+), 1 deletion(-) diff --git a/test/regression/cases/idle/lading/lading.yaml b/test/regression/cases/idle/lading/lading.yaml index 0ae7c415523d8..5e2eb2566ef45 100644 --- a/test/regression/cases/idle/lading/lading.yaml +++ b/test/regression/cases/idle/lading/lading.yaml @@ -7,5 +7,170 @@ blackhole: binding_addr: "127.0.0.1:9092" target_metrics: - - prometheus: + - prometheus: #core agent telemetry uri: "http://127.0.0.1:5000/telemetry" + tags: + sub_agent: "core" + - prometheus: #process agent telemetry + uri: "http://127.0.0.1:6062/telemetry" + tags: + sub_agent: "process" + - expvar: #trace agent telemetry + uri: "http://127.0.0.1:5012/debug/vars" + vars: + - "/Event" + - "/ServiceCheck" + - "/check_run_v1" + - "/cmdline" + - "/compressor/BytesIn" + - "/compressor/BytesOut" + - "/compressor/TotalCompressCycles" + - "/compressor/TotalPayloads" + - "/connections" + - "/container" + - "/events_v2" + - "/forwarder/APIKeyFailure" + - "/forwarder/APIKeyStatus" + - "/forwarder/FileStorage/CurrentSizeInBytes" + - "/forwarder/FileStorage/DeserializeCount" + - "/forwarder/FileStorage/DeserializeErrorsCount" + - "/forwarder/FileStorage/DeserializeTransactionsCount" + - "/forwarder/FileStorage/FileSize" + - "/forwarder/FileStorage/FilesCount" + - "/forwarder/FileStorage/FilesRemovedCount" + - "/forwarder/FileStorage/PointsDroppedCount" + - "/forwarder/FileStorage/SerializeCount" + - "/forwarder/FileStorage/StartupReloadedRetryFilesCount" + - "/forwarder/RemovalPolicy/FilesFromUnknownDomainCount" + - "/forwarder/RemovalPolicy/NewRemovalPolicyCount" + - "/forwarder/RemovalPolicy/OutdatedFilesCount" + - "/forwarder/RemovalPolicy/RegisteredDomainCount" + - "/forwarder/TransactionContainer/CurrentMemSizeInBytes" + - "/forwarder/TransactionContainer/ErrorsCount" + - "/forwarder/TransactionContainer/PointsDroppedCount" + - "/forwarder/TransactionContainer/TransactionsCount" + - "/forwarder/TransactionContainer/TransactionsDroppedCount" + - "/forwarder/Transactions/Cluster" + - "/forwarder/Transactions/ClusterRole" + - "/forwarder/Transactions/ClusterRoleBinding" + - "/forwarder/Transactions/ConnectionEvents/ConnectSuccess" + - "/forwarder/Transactions/ConnectionEvents/DNSSuccess" + - "/forwarder/Transactions/CronJob" + - "/forwarder/Transactions/CustomResource" + - "/forwarder/Transactions/CustomResourceDefinition" + - "/forwarder/Transactions/DaemonSet" + - "/forwarder/Transactions/Deployment" + - "/forwarder/Transactions/Dropped" + - "/forwarder/Transactions/DroppedByEndpoint" + - "/forwarder/Transactions/ECSTask" + - "/forwarder/Transactions/Errors" + - "/forwarder/Transactions/ErrorsByType/ConnectionErrors" + - "/forwarder/Transactions/ErrorsByType/DNSErrors" + - "/forwarder/Transactions/ErrorsByType/SentRequestErrors" + - "/forwarder/Transactions/ErrorsByType/TLSErrors" + - "/forwarder/Transactions/ErrorsByType/WroteRequestErrors" + - "/forwarder/Transactions/HTTPErrors" + - "/forwarder/Transactions/HTTPErrorsByCode" + - "/forwarder/Transactions/HighPriorityQueueFull" + - "/forwarder/Transactions/HorizontalPodAutoscaler" + - "/forwarder/Transactions/Ingress" + - "/forwarder/Transactions/InputBytesByEndpoint" + - "/forwarder/Transactions/InputCountByEndpoint" + - "/forwarder/Transactions/Job" + - "/forwarder/Transactions/LimitRange" + - "/forwarder/Transactions/Namespace" + - "/forwarder/Transactions/NetworkPolicy" + - "/forwarder/Transactions/Node" + - "/forwarder/Transactions/OrchestratorManifest" + - "/forwarder/Transactions/PersistentVolume" + - "/forwarder/Transactions/PersistentVolumeClaim" + - "/forwarder/Transactions/Pod" + - "/forwarder/Transactions/ReplicaSet" + - "/forwarder/Transactions/Requeued" + - "/forwarder/Transactions/RequeuedByEndpoint" + - "/forwarder/Transactions/Retried" + - "/forwarder/Transactions/RetriedByEndpoint" + - "/forwarder/Transactions/RetryQueueSize" + - "/forwarder/Transactions/Role" + - "/forwarder/Transactions/RoleBinding" + - "/forwarder/Transactions/Service" + - "/forwarder/Transactions/ServiceAccount" + - "/forwarder/Transactions/StatefulSet" + - "/forwarder/Transactions/StorageClass" + - "/forwarder/Transactions/Success" + - "/forwarder/Transactions/SuccessByEndpoint/check_run_v1" + - "/forwarder/Transactions/SuccessByEndpoint/connections" + - "/forwarder/Transactions/SuccessByEndpoint/container" + - "/forwarder/Transactions/SuccessByEndpoint/events_v2" + - "/forwarder/Transactions/SuccessByEndpoint/host_metadata_v2" + - "/forwarder/Transactions/SuccessByEndpoint/intake" + - "/forwarder/Transactions/SuccessByEndpoint/orchestrator" + - "/forwarder/Transactions/SuccessByEndpoint/process" + - "/forwarder/Transactions/SuccessByEndpoint/rtcontainer" + - "/forwarder/Transactions/SuccessByEndpoint/rtprocess" + - "/forwarder/Transactions/SuccessByEndpoint/series_v1" + - "/forwarder/Transactions/SuccessByEndpoint/series_v2" + - "/forwarder/Transactions/SuccessByEndpoint/services_checks_v2" + - "/forwarder/Transactions/SuccessByEndpoint/sketches_v1" + - "/forwarder/Transactions/SuccessByEndpoint/sketches_v2" + - "/forwarder/Transactions/SuccessByEndpoint/validate_v1" + - "/forwarder/Transactions/SuccessBytesByEndpoint" + - "/forwarder/Transactions/VerticalPodAutoscaler" + - "/host_metadata_v2" + - "/hostname/errors" + - "/hostname/provider" + - "/intake" + - "/jsonstream/CompressorLocks" + - "/jsonstream/ItemDrops" + - "/jsonstream/PayloadFulls" + - "/jsonstream/TotalCalls" + - "/jsonstream/TotalItems" + - "/jsonstream/TotalLockTime" + - "/jsonstream/TotalSerializationTime" + - "/jsonstream/WriteItemErrors" + - "/kubeletQueries" + - "/orchestrator" + - "/pid" + - "/process" + - "/rtcontainer" + - "/rtprocess" + - "/serializer/SendEventsErrItemTooBigs" + - "/serializer/SendEventsErrItemTooBigsFallback" + - "/series" + - "/series_v1" + - "/series_v2" + - "/services_checks_v2" + - "/sketch_series/ItemTooBig" + - "/sketch_series/PayloadFull" + - "/sketch_series/UnexpectedItemDrops" + - "/sketches_v1" + - "/sketches_v2" + - "/splitter/NotTooBig" + - "/splitter/PayloadDrops" + - "/splitter/TooBig" + - "/splitter/TotalLoops" + - "/stats_writer/Bytes" + - "/stats_writer/ClientPayloads" + - "/stats_writer/Errors" + - "/stats_writer/Payloads" + - "/stats_writer/Retries" + - "/stats_writer/Splits" + - "/stats_writer/StatsBuckets" + - "/stats_writer/StatsEntries" + - "/trace_writer/Bytes" + - "/trace_writer/BytesUncompressed" + - "/trace_writer/Errors" + - "/trace_writer/Events" + - "/trace_writer/Payloads" + - "/trace_writer/Retries" + - "/trace_writer/SingleMaxSize" + - "/trace_writer/Spans" + - "/trace_writer/Traces" + - "/uptime" + - "/validate_v1" + - "/version/Version" + - "/version/GitCommit" + - "/watchdog/CPU/UserAvg" + - "/watchdog/Mem/Alloc" + tags: + sub_agent: "trace" From a399ea8588d4aad1caa485f598b4a3a22dccb15e Mon Sep 17 00:00:00 2001 From: Seth Samuel Date: Fri, 6 Sep 2024 16:22:00 -0400 Subject: [PATCH 071/128] Add Oracle telemetry (#28771) Co-authored-by: Bryce Eadie --- comp/core/agenttelemetry/impl/config.go | 6 ++++- pkg/collector/corechecks/oracle/activity.go | 3 +++ pkg/collector/corechecks/oracle/statements.go | 2 ++ pkg/collector/corechecks/oracle/telemetry.go | 25 +++++++++++++++++++ .../oracle-telemetry-bcc1ab08a2b92bc6.yaml | 11 ++++++++ 5 files changed, 46 insertions(+), 1 deletion(-) create mode 100644 pkg/collector/corechecks/oracle/telemetry.go create mode 100644 releasenotes/notes/oracle-telemetry-bcc1ab08a2b92bc6.yaml diff --git a/comp/core/agenttelemetry/impl/config.go b/comp/core/agenttelemetry/impl/config.go index a33a440162119..3109a400b573c 100644 --- a/comp/core/agenttelemetry/impl/config.go +++ b/comp/core/agenttelemetry/impl/config.go @@ -237,7 +237,7 @@ var defaultProfiles = ` - check_name:network - check_name:io - check_name:file_handle - metrics: + metrics: - name: checks.runs aggregate_tags: - check_name @@ -257,6 +257,10 @@ var defaultProfiles = ` - name: logs.destination_http_resp aggregate_tags: - status_code + - name: oracle.activity_samples_count + - name: oracle.activity_latency + - name: oracle.statement_metrics + - name: oracle.statement_plan_errors - name: transactions.input_count - name: transactions.requeued - name: transactions.retries diff --git a/pkg/collector/corechecks/oracle/activity.go b/pkg/collector/corechecks/oracle/activity.go index 9b3ce2fa3c134..3979e1e38eff6 100644 --- a/pkg/collector/corechecks/oracle/activity.go +++ b/pkg/collector/corechecks/oracle/activity.go @@ -469,6 +469,9 @@ AND status = 'ACTIVE'`) return err } sendMetricWithDefaultTags(c, gauge, "dd.oracle.activity.time_ms", float64(time.Since(start).Milliseconds())) + TlmOracleActivityLatency.Observe(float64(time.Since(start).Milliseconds())) + TlmOracleActivitySamplesCount.Add(float64(len(sessionRows))) + sender.Commit() return nil diff --git a/pkg/collector/corechecks/oracle/statements.go b/pkg/collector/corechecks/oracle/statements.go index bacc29624bea7..910cc06acdd73 100644 --- a/pkg/collector/corechecks/oracle/statements.go +++ b/pkg/collector/corechecks/oracle/statements.go @@ -868,8 +868,10 @@ func (c *Check) StatementMetrics() (int, error) { sender.EventPlatformEvent(payloadBytes, "dbm-metrics") sendMetricWithDefaultTags(c, gauge, "dd.oracle.statements_metrics.time_ms", float64(time.Since(start).Milliseconds())) + TlmOracleStatementMetricsLatency.Observe(float64(time.Since(start).Milliseconds())) if c.config.ExecutionPlans.Enabled { sendMetricWithDefaultTags(c, gauge, "dd.oracle.plan_errors.count", float64(planErrors)) + TlmOracleStatementMetricsErrorCount.Add(float64(planErrors)) } sender.Commit() diff --git a/pkg/collector/corechecks/oracle/telemetry.go b/pkg/collector/corechecks/oracle/telemetry.go new file mode 100644 index 0000000000000..34be6ef378d6b --- /dev/null +++ b/pkg/collector/corechecks/oracle/telemetry.go @@ -0,0 +1,25 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build oracle + +package oracle + +import ( + "github.com/DataDog/datadog-agent/pkg/telemetry" +) + +// These collectors gather telemetry data for cross-org analysis +// They are not expected to appear in the originiating org's metrics +var ( + // TlmOracleActivityLatency is the time for the activity gathering to complete + TlmOracleActivityLatency = telemetry.NewHistogram("oracle", "activity_latency", nil, "Histogram of activity query latency in ms", []float64{10, 25, 50, 75, 100, 250, 500, 1000, 10000}) + // TlmOracleActivitySamplesCount is the number of activity samples collected + TlmOracleActivitySamplesCount = telemetry.NewCounter("oracle", "activity_samples_count", nil, "Number of activity samples collected") + // TlmOracleStatementMetricsLatency is the time for the statement metrics gathering to complete + TlmOracleStatementMetricsLatency = telemetry.NewHistogram("oracle", "statement_metrics", nil, "Histogram of statement metrics latency in ms", []float64{10, 25, 50, 75, 100, 250, 500, 1000, 10000}) + // TlmOracleStatementMetricsErrorCount is the number of statement plan errors + TlmOracleStatementMetricsErrorCount = telemetry.NewCounter("oracle", "statement_plan_errors", nil, "Number of statement plan errors") +) diff --git a/releasenotes/notes/oracle-telemetry-bcc1ab08a2b92bc6.yaml b/releasenotes/notes/oracle-telemetry-bcc1ab08a2b92bc6.yaml new file mode 100644 index 0000000000000..194a7a58b5d88 --- /dev/null +++ b/releasenotes/notes/oracle-telemetry-bcc1ab08a2b92bc6.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +other: + - | + Adds Agent telemetry for Oracle collector. From b3e1d6f1d90fd3d6de1667c3e5246ddf1003d3b0 Mon Sep 17 00:00:00 2001 From: Stanley Liu Date: Fri, 6 Sep 2024 16:28:28 -0400 Subject: [PATCH 072/128] Document infra attributes processor (#29088) Co-authored-by: Yang Song --- .../infraattributesprocessor/README.md | 196 ++++++++++++++++++ 1 file changed, 196 insertions(+) create mode 100644 comp/otelcol/otlp/components/processor/infraattributesprocessor/README.md diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/README.md b/comp/otelcol/otlp/components/processor/infraattributesprocessor/README.md new file mode 100644 index 0000000000000..3c2859b510b64 --- /dev/null +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/README.md @@ -0,0 +1,196 @@ +# Infra Attributes Processor + +The infra attributes processor extracts [Kubernetes tags](https://docs.datadoghq.com/containers/kubernetes/tag/?tab=datadogoperator#out-of-the-box-tags) based on labels or annotations and assigns these tags as resource attributes on traces, metrics, and logs. + +When telemetry is exported from the otel-agent, these infra attributes will be converted into Datadog tags and used as metadata in [Container Monitoring](https://docs.datadoghq.com/containers/). + +## Configuration + +The infra attributes processor will be added automatically by the [converter component](../../../../converter/README.md). If you opted out of the converter, or you want to change the defaults, you are able to configure the processor as so: +``` +processors: + infraattributes: + cardinality: 0 +``` + +The infra attributes processor also needs to be included in the pipelines in order to take effect: +``` +service: + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes] + exporters: [datadog/connector, datadog] + metrics: + receivers: [otlp, datadog/connector] + processors: [infraattributes] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes] + exporters: [datadog] +``` + +### Cardinality +The cardinality option sets the [TagCardinality](../../../../../../comp/core/tagger/README.md#tagcardinality) in the Datadog Agent tagger component. Possible values for this option include: +* `cardinality: 0` - **LowCardinality**: in the host count order of magnitude *(default)* +* `cardinality: 1` - **OrchestratorCardinality**: tags that change value for each pod or task +* `cardinality: 2` - **HighCardinality**: typically tags that change value for each web request, user agent, container, etc. + +## Expected Attributes + +The infra attributes processor [looks up the following resource attributes](https://github.com/DataDog/datadog-agent/blob/7d51e9e0dc9fb52aab468b372a5724eece97538c/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics.go#L42-L77) in order to extract Kubernetes Tags. These resource attributes can be set in your SDK or in your otel-agent collector configuration: + +| *[Entity](../../../../../../comp/core/tagger/README.md#entity-ids)* | *Resource Attributes* | +|----------------------------------------------------------------------|---------------------------------------------| +| workloadmeta.KindContainer | `container.id` | +| workloadmeta.KindContainerImageMetadata | `container.image.id` | +| workloadmeta.KindECSTask | `aws.ecs.task.arn` | +| workloadmeta.KindKubernetesDeployment | `k8s.deployment.name`, `k8s.namespace.name` | +| workloadmeta.KindKubernetesMetadata | `k8s.namespace.name`, `k8s.node.name` | +| workloadmeta.KindKubernetesPod | `k8s.pod.uid` | +| workloadmeta.KindProcess | `process.pid` | + +### SDK Configuration + +The expected resource attributes can be set by using the `OTEL_RESOURCE_ATTRIBUTES` environment variable. For example, this can be set in your Kubernetes deployment yaml: +``` +env: + ... + - name: OTEL_SERVICE_NAME + value: {{ include "calendar.fullname" . }} + - name: OTEL_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: OTEL_K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: OTEL_K8S_POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: OTEL_K8S_POD_ID + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.uid + - name: OTEL_RESOURCE_ATTRIBUTES + value: >- + service.name=$(OTEL_SERVICE_NAME), + k8s.namespace.name=$(OTEL_K8S_NAMESPACE), + k8s.node.name=$(OTEL_K8S_NODE_NAME), + k8s.pod.name=$(OTEL_K8S_POD_NAME), + k8s.pod.uid=$(OTEL_K8S_POD_ID), + k8s.container.name={{ .Chart.Name }}, + host.name=$(OTEL_K8S_NODE_NAME), + deployment.environment=$(OTEL_K8S_NAMESPACE) +``` + +If you are using OTel SDK auto-instrumentation, `container.id` and `process.pid` will be automatically set by your SDK. + +### Collector Configuration + +The expected resource attributes can be set by configuring the [Kubernetes attributes processor and resource detection processor](https://docs.datadoghq.com/opentelemetry/collector_exporter/hostname_tagging/?tab=kubernetesdaemonset). This is demonstrated in the [k8s-values.yaml](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/k8s-chart/k8s-values.yaml) example: +``` +mode: daemonset +presets: + kubernetesAttributes: + enabled: true +extraEnvs: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: OTEL_RESOURCE_ATTRIBUTES + value: "k8s.pod.ip=$(POD_IP)" +config: + processors: + k8sattributes: + passthrough: false + auth_type: "serviceAccount" + pod_association: + - sources: + - from: resource_attribute + name: k8s.pod.ip + extract: + metadata: + - k8s.pod.name + - k8s.pod.uid + - k8s.deployment.name + - k8s.node.name + - k8s.namespace.name + - k8s.pod.start_time + - k8s.replicaset.name + - k8s.replicaset.uid + - k8s.daemonset.name + - k8s.daemonset.uid + - k8s.job.name + - k8s.job.uid + - k8s.cronjob.name + - k8s.statefulset.name + - k8s.statefulset.uid + - container.image.name + - container.image.tag + - container.id + - k8s.container.name + - container.image.name + - container.image.tag + - container.id + labels: + - tag_name: kube_app_name + key: app.kubernetes.io/name + from: pod + - tag_name: kube_app_instance + key: app.kubernetes.io/instance + from: pod + - tag_name: kube_app_version + key: app.kubernetes.io/version + from: pod + - tag_name: kube_app_component + key: app.kubernetes.io/component + from: pod + - tag_name: kube_app_part_of + key: app.kubernetes.io/part-of + from: pod + - tag_name: kube_app_managed_by + key: app.kubernetes.io/managed-by + from: pod + resourcedetection: + detectors: [env, eks, ec2, system] + timeout: 2s + override: false + batch: + send_batch_max_size: 1000 + send_batch_size: 100 + timeout: 10s + exporters: + datadog: + api: + site: ${env:DD_SITE} + key: ${env:DD_API_KEY} + traces: + trace_buffer: 500 + service: + pipelines: + metrics: + receivers: [otlp] + processors: [batch, resourcedetection, k8sattributes] + exporters: [datadog] + traces: + receivers: [otlp] + processors: [batch, resourcedetection, k8sattributes] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [batch, resourcedetection, k8sattributes] + exporters: [datadog] +``` + +## List of Kubernetes Tags + +For the full list of Kubernetes Tags added by the infra attributes processor, see [comp/core/tagger/tags/tags.go](../../../../../../comp/core/tagger/tags/tags.go). From 317131906ca20bc5a81a479299bad789719a960f Mon Sep 17 00:00:00 2001 From: Hasan Mahmood <6599778+hmahmood@users.noreply.github.com> Date: Fri, 6 Sep 2024 15:41:10 -0500 Subject: [PATCH 073/128] [NPM] Fix eBPF-less connection timestamp (#29064) Co-authored-by: Bryce Kahle --- pkg/network/tracer/connection/ebpfless_tracer.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/pkg/network/tracer/connection/ebpfless_tracer.go b/pkg/network/tracer/connection/ebpfless_tracer.go index b2cd928ceddfb..2c467a114aa4b 100644 --- a/pkg/network/tracer/connection/ebpfless_tracer.go +++ b/pkg/network/tracer/connection/ebpfless_tracer.go @@ -20,6 +20,7 @@ import ( "github.com/vishvananda/netns" "golang.org/x/sys/unix" + ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/network" "github.com/DataDog/datadog-agent/pkg/network/config" "github.com/DataDog/datadog-agent/pkg/network/filter" @@ -213,7 +214,11 @@ func (t *ebpfLessTracer) processConnection( } if conn.Type == network.UDP || conn.Monotonic.TCPEstablished > 0 { - conn.LastUpdateEpoch = uint64(time.Now().UnixNano()) + var ts int64 + if ts, err = ddebpf.NowNanoseconds(); err != nil { + return fmt.Errorf("error getting last updated timestamp for connection: %w", err) + } + conn.LastUpdateEpoch = uint64(ts) t.conns[key] = conn } From ce77af65a101e1804f79cee4f1db82eed85cb05e Mon Sep 17 00:00:00 2001 From: Adam Karpowich Date: Fri, 6 Sep 2024 16:50:21 -0400 Subject: [PATCH 074/128] [NPM] Use IMDSv2 for vpc_id (network_id) lookups (#29027) --- cmd/process-agent/subcommands/check/check.go | 8 ++- cmd/system-probe/modules/network_tracer.go | 11 +++ pkg/network/tracer/tracer.go | 15 ++++ pkg/network/tracer/tracer_unsupported.go | 5 ++ pkg/network/tracer/tracer_windows.go | 5 ++ pkg/process/checks/checks.go | 2 + pkg/process/checks/container.go | 17 +++-- pkg/process/checks/net.go | 16 ++++- pkg/process/checks/process.go | 14 +++- pkg/process/net/common.go | 31 +++++++- pkg/process/net/common_linux.go | 1 + pkg/process/net/common_unsupported.go | 5 ++ pkg/process/net/common_windows.go | 1 + pkg/process/net/mocks/sys_probe_util.go | 28 ++++++++ pkg/process/net/shared.go | 1 + pkg/util/cloudproviders/network.go | 2 +- pkg/util/ec2/ec2_test.go | 16 ++--- pkg/util/ec2/imds_helpers.go | 2 +- pkg/util/ec2/network.go | 8 +-- pkg/util/ec2/network_test.go | 74 +++++++++++++------- 20 files changed, 210 insertions(+), 52 deletions(-) diff --git a/cmd/process-agent/subcommands/check/check.go b/cmd/process-agent/subcommands/check/check.go index d91447f346d19..be338e8df2252 100644 --- a/cmd/process-agent/subcommands/check/check.go +++ b/cmd/process-agent/subcommands/check/check.go @@ -182,10 +182,12 @@ func RunCheckCmd(deps Dependencies) error { names = append(names, ch.Name()) _, processModuleEnabled := deps.Syscfg.SysProbeObject().EnabledModules[sysconfig.ProcessModule] + _, networkTracerModuleEnabled := deps.Syscfg.SysProbeObject().EnabledModules[sysconfig.NetworkTracerModule] cfg := &checks.SysProbeConfig{ - MaxConnsPerMessage: deps.Syscfg.SysProbeObject().MaxConnsPerMessage, - SystemProbeAddress: deps.Syscfg.SysProbeObject().SocketAddress, - ProcessModuleEnabled: processModuleEnabled, + MaxConnsPerMessage: deps.Syscfg.SysProbeObject().MaxConnsPerMessage, + SystemProbeAddress: deps.Syscfg.SysProbeObject().SocketAddress, + ProcessModuleEnabled: processModuleEnabled, + NetworkTracerModuleEnabled: networkTracerModuleEnabled, } if !matchingCheck(deps.CliParams.checkName, ch) { diff --git a/cmd/system-probe/modules/network_tracer.go b/cmd/system-probe/modules/network_tracer.go index 4852575b36a75..44f2af55d9c78 100644 --- a/cmd/system-probe/modules/network_tracer.go +++ b/cmd/system-probe/modules/network_tracer.go @@ -12,6 +12,7 @@ import ( "encoding/json" "errors" "fmt" + "io" "net/http" "os" "runtime" @@ -108,6 +109,16 @@ func (nt *networkTracer) Register(httpMux *module.Router) error { logRequests(id, count, len(cs.Conns), start) })) + httpMux.HandleFunc("/network_id", utils.WithConcurrencyLimit(utils.DefaultMaxConcurrentRequests, func(w http.ResponseWriter, req *http.Request) { + id, err := nt.tracer.GetNetworkID(req.Context()) + if err != nil { + log.Errorf("unable to retrieve network_id: %s", err) + w.WriteHeader(500) + return + } + io.WriteString(w, id) + })) + httpMux.HandleFunc("/register", utils.WithConcurrencyLimit(utils.DefaultMaxConcurrentRequests, func(w http.ResponseWriter, req *http.Request) { id := getClientID(req) err := nt.tracer.RegisterClient(id) diff --git a/pkg/network/tracer/tracer.go b/pkg/network/tracer/tracer.go index fded4ce4d70c0..5017167091fca 100644 --- a/pkg/network/tracer/tracer.go +++ b/pkg/network/tracer/tracer.go @@ -40,6 +40,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/process/util" timeresolver "github.com/DataDog/datadog-agent/pkg/security/resolvers/time" "github.com/DataDog/datadog-agent/pkg/telemetry" + "github.com/DataDog/datadog-agent/pkg/util/ec2" "github.com/DataDog/datadog-agent/pkg/util/kernel" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -850,3 +851,17 @@ func newUSMMonitor(c *config.Config, tracer connection.Tracer) *usm.Monitor { return monitor } + +// GetNetworkID retrieves the vpc_id (network_id) from IMDS +func (t *Tracer) GetNetworkID(context context.Context) (string, error) { + id := "" + err := kernel.WithRootNS(kernel.ProcFSRoot(), func() error { + var err error + id, err = ec2.GetNetworkID(context) + return err + }) + if err != nil { + return "", err + } + return id, nil +} diff --git a/pkg/network/tracer/tracer_unsupported.go b/pkg/network/tracer/tracer_unsupported.go index bdb6abdf3dbf5..f3ef15179c0b7 100644 --- a/pkg/network/tracer/tracer_unsupported.go +++ b/pkg/network/tracer/tracer_unsupported.go @@ -34,6 +34,11 @@ func (t *Tracer) GetActiveConnections(_ string) (*network.Connections, error) { return nil, ebpf.ErrNotImplemented } +// GetNetworkID is not implemented on this OS for Tracer +func (t *Tracer) GetNetworkID(_ context.Context) (string, error) { + return "", ebpf.ErrNotImplemented +} + // RegisterClient registers the client func (t *Tracer) RegisterClient(_ string) error { return ebpf.ErrNotImplemented diff --git a/pkg/network/tracer/tracer_windows.go b/pkg/network/tracer/tracer_windows.go index a4677a19c501a..fba6ea78a95b0 100644 --- a/pkg/network/tracer/tracer_windows.go +++ b/pkg/network/tracer/tracer_windows.go @@ -309,6 +309,11 @@ func (t *Tracer) DebugDumpProcessCache(_ context.Context) (interface{}, error) { return nil, ebpf.ErrNotImplemented } +// GetNetworkID is not implemented on this OS for Tracer +func (t *Tracer) GetNetworkID(_ context.Context) (string, error) { + return "", ebpf.ErrNotImplemented +} + func newUSMMonitor(c *config.Config, dh driver.Handle) usm.Monitor { if !c.EnableHTTPMonitoring && !c.EnableNativeTLSMonitoring { return nil diff --git a/pkg/process/checks/checks.go b/pkg/process/checks/checks.go index 139abb92720dd..b7aabbd8c794e 100644 --- a/pkg/process/checks/checks.go +++ b/pkg/process/checks/checks.go @@ -35,6 +35,8 @@ type SysProbeConfig struct { SystemProbeAddress string // System probe process module on/off configuration ProcessModuleEnabled bool + // System probe network_tracer module on/off configuration + NetworkTracerModuleEnabled bool } // Check is an interface for Agent checks that collect data. Each check returns diff --git a/pkg/process/checks/container.go b/pkg/process/checks/container.go index de3e40fd00b43..1e4187d46391c 100644 --- a/pkg/process/checks/container.go +++ b/pkg/process/checks/container.go @@ -6,7 +6,6 @@ package checks import ( - "context" "fmt" "math" "sync" @@ -16,9 +15,9 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" ddconfig "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/process/net" "github.com/DataDog/datadog-agent/pkg/process/statsd" proccontainers "github.com/DataDog/datadog-agent/pkg/process/util/containers" - "github.com/DataDog/datadog-agent/pkg/util/cloudproviders" "github.com/DataDog/datadog-agent/pkg/util/flavor" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -53,11 +52,21 @@ type ContainerCheck struct { } // Init initializes a ContainerCheck instance. -func (c *ContainerCheck) Init(_ *SysProbeConfig, info *HostInfo, _ bool) error { +func (c *ContainerCheck) Init(syscfg *SysProbeConfig, info *HostInfo, _ bool) error { c.containerProvider = proccontainers.GetSharedContainerProvider(c.wmeta) c.hostInfo = info - networkID, err := cloudproviders.GetNetworkID(context.TODO()) + var tu *net.RemoteSysProbeUtil + var err error + if syscfg.NetworkTracerModuleEnabled { + // Calling the remote tracer will cause it to initialize and check connectivity + tu, err = net.GetRemoteSystemProbeUtil(syscfg.SystemProbeAddress) + if err != nil { + log.Warnf("could not initiate connection with system probe: %s", err) + } + } + + networkID, err := retryGetNetworkID(tu) if err != nil { log.Infof("no network ID detected: %s", err) } diff --git a/pkg/process/checks/net.go b/pkg/process/checks/net.go index 26e01d0677061..5396fdd26c10f 100644 --- a/pkg/process/checks/net.go +++ b/pkg/process/checks/net.go @@ -107,7 +107,7 @@ func (c *ConnectionsCheck) Init(syscfg *SysProbeConfig, hostInfo *HostInfo, _ bo } } - networkID, err := cloudproviders.GetNetworkID(context.TODO()) + networkID, err := retryGetNetworkID(tu) if err != nil { log.Infof("no network ID detected: %s", err) } @@ -503,3 +503,17 @@ func convertAndEnrichWithServiceCtx(tags []string, tagOffsets []uint32, serviceC return tagsStr } + +// fetches network_id from the current netNS or from the system probe if necessary, where the root netNS is used +func retryGetNetworkID(sysProbeUtil *net.RemoteSysProbeUtil) (string, error) { + networkID, err := cloudproviders.GetNetworkID(context.TODO()) + if err != nil && sysProbeUtil != nil { + log.Infof("no network ID detected. retrying via system-probe: %s", err) + networkID, err = sysProbeUtil.GetNetworkID() + if err != nil { + log.Infof("failed to get network ID from system-probe: %s", err) + return "", err + } + } + return networkID, err +} diff --git a/pkg/process/checks/process.go b/pkg/process/checks/process.go index f35e71704d1cb..26685da9e15aa 100644 --- a/pkg/process/checks/process.go +++ b/pkg/process/checks/process.go @@ -6,7 +6,6 @@ package checks import ( - "context" "errors" "fmt" "math" @@ -28,7 +27,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/process/statsd" "github.com/DataDog/datadog-agent/pkg/process/util" proccontainers "github.com/DataDog/datadog-agent/pkg/process/util/containers" - "github.com/DataDog/datadog-agent/pkg/util/cloudproviders" "github.com/DataDog/datadog-agent/pkg/util/flavor" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/subscriptions" @@ -137,7 +135,17 @@ func (p *ProcessCheck) Init(syscfg *SysProbeConfig, info *HostInfo, oneShot bool p.notInitializedLogLimit = log.NewLogLimit(1, time.Minute*10) - networkID, err := cloudproviders.GetNetworkID(context.TODO()) + var tu *net.RemoteSysProbeUtil + var err error + if syscfg.NetworkTracerModuleEnabled { + // Calling the remote tracer will cause it to initialize and check connectivity + tu, err = net.GetRemoteSystemProbeUtil(syscfg.SystemProbeAddress) + if err != nil { + log.Warnf("could not initiate connection with system probe: %s", err) + } + } + + networkID, err := retryGetNetworkID(tu) if err != nil { log.Infof("no network ID detected: %s", err) } diff --git a/pkg/process/net/common.go b/pkg/process/net/common.go index a9b7a64430143..640c3e82dadd2 100644 --- a/pkg/process/net/common.go +++ b/pkg/process/net/common.go @@ -44,6 +44,7 @@ type Conn interface { const ( contentTypeProtobuf = "application/protobuf" + contentTypeJSON = "application/json" ) var ( @@ -166,6 +167,32 @@ func (r *RemoteSysProbeUtil) GetConnections(clientID string) (*model.Connections return conns, nil } +// GetNetworkID fetches the network_id (vpc_id) from system-probe +func (r *RemoteSysProbeUtil) GetNetworkID() (string, error) { + req, err := http.NewRequest("GET", networkIDURL, nil) + if err != nil { + return "", fmt.Errorf("failed to create request: %w", err) + } + + req.Header.Set("Accept", "text/plain") + resp, err := r.httpClient.Do(req) + if err != nil { + return "", fmt.Errorf("failed to execute request: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("network_id request failed: url: %s, status code: %d", networkIDURL, resp.StatusCode) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("failed to read response body: %w", err) + } + + return string(body), nil +} + // GetPing returns the results of a ping to a host func (r *RemoteSysProbeUtil) GetPing(clientID string, host string, count int, interval time.Duration, timeout time.Duration) ([]byte, error) { req, err := http.NewRequest("GET", fmt.Sprintf("%s/%s?client_id=%s&count=%d&interval=%d&timeout=%d", pingURL, host, clientID, count, interval, timeout), nil) @@ -173,7 +200,7 @@ func (r *RemoteSysProbeUtil) GetPing(clientID string, host string, count int, in return nil, err } - req.Header.Set("Accept", "application/json") + req.Header.Set("Accept", contentTypeJSON) resp, err := r.httpClient.Do(req) if err != nil { return nil, err @@ -208,7 +235,7 @@ func (r *RemoteSysProbeUtil) GetTraceroute(clientID string, host string, port ui return nil, err } - req.Header.Set("Accept", "application/json") + req.Header.Set("Accept", contentTypeJSON) resp, err := r.tracerouteClient.Do(req) if err != nil { return nil, err diff --git a/pkg/process/net/common_linux.go b/pkg/process/net/common_linux.go index 2dc5c7db28c8d..7fee3ffdb1cb9 100644 --- a/pkg/process/net/common_linux.go +++ b/pkg/process/net/common_linux.go @@ -18,6 +18,7 @@ const ( pingURL = "http://unix/" + string(sysconfig.PingModule) + "/ping/" tracerouteURL = "http://unix/" + string(sysconfig.TracerouteModule) + "/traceroute/" connectionsURL = "http://unix/" + string(sysconfig.NetworkTracerModule) + "/connections" + networkIDURL = "http://unix/" + string(sysconfig.NetworkTracerModule) + "/network_id" procStatsURL = "http://unix/" + string(sysconfig.ProcessModule) + "/stats" registerURL = "http://unix/" + string(sysconfig.NetworkTracerModule) + "/register" statsURL = "http://unix/debug/stats" diff --git a/pkg/process/net/common_unsupported.go b/pkg/process/net/common_unsupported.go index 03a481a2de400..ebdea5968e5bb 100644 --- a/pkg/process/net/common_unsupported.go +++ b/pkg/process/net/common_unsupported.go @@ -40,6 +40,11 @@ func (r *RemoteSysProbeUtil) GetConnections(_ string) (*model.Connections, error return nil, ErrNotImplemented } +// GetNetworkID is not supported +func (r *RemoteSysProbeUtil) GetNetworkID() (string, error) { + return "", ErrNotImplemented +} + // GetStats is not supported func (r *RemoteSysProbeUtil) GetStats() (map[string]interface{}, error) { return nil, ErrNotImplemented diff --git a/pkg/process/net/common_windows.go b/pkg/process/net/common_windows.go index 4ad0d218e65f5..83d8440825e4a 100644 --- a/pkg/process/net/common_windows.go +++ b/pkg/process/net/common_windows.go @@ -15,6 +15,7 @@ import ( const ( connectionsURL = "http://localhost:3333/" + string(sysconfig.NetworkTracerModule) + "/connections" + networkIDURL = "http://unix/" + string(sysconfig.NetworkTracerModule) + "/network_id" registerURL = "http://localhost:3333/" + string(sysconfig.NetworkTracerModule) + "/register" languageDetectionURL = "http://localhost:3333/" + string(sysconfig.LanguageDetectionModule) + "/detect" statsURL = "http://localhost:3333/debug/stats" diff --git a/pkg/process/net/mocks/sys_probe_util.go b/pkg/process/net/mocks/sys_probe_util.go index 3bf0b2c1d7270..0d0af5300fa4f 100644 --- a/pkg/process/net/mocks/sys_probe_util.go +++ b/pkg/process/net/mocks/sys_probe_util.go @@ -43,6 +43,34 @@ func (_m *SysProbeUtil) GetConnections(clientID string) (*process.Connections, e return r0, r1 } +// GetNetworkID provides a mock function with given fields: +func (_m *SysProbeUtil) GetNetworkID() (string, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetNetworkID") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func() (string, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetProcStats provides a mock function with given fields: pids func (_m *SysProbeUtil) GetProcStats(pids []int32) (*process.ProcStatsWithPermByPID, error) { ret := _m.Called(pids) diff --git a/pkg/process/net/shared.go b/pkg/process/net/shared.go index 72a6e418865c6..a0a7aa18ae327 100644 --- a/pkg/process/net/shared.go +++ b/pkg/process/net/shared.go @@ -13,4 +13,5 @@ type SysProbeUtil interface { GetStats() (map[string]interface{}, error) GetProcStats(pids []int32) (*model.ProcStatsWithPermByPID, error) Register(clientID string) error + GetNetworkID() (string, error) } diff --git a/pkg/util/cloudproviders/network.go b/pkg/util/cloudproviders/network.go index 12c7496579c3f..c183ea96ce06f 100644 --- a/pkg/util/cloudproviders/network.go +++ b/pkg/util/cloudproviders/network.go @@ -30,7 +30,7 @@ func GetNetworkID(ctx context.Context) (string, error) { return cache.Get[string]( networkIDCacheKey, func() (string, error) { - // the the id from configuration + // the id from configuration if networkID := config.Datadog().GetString("network.id"); networkID != "" { log.Debugf("GetNetworkID: using configured network ID: %s", networkID) return networkID, nil diff --git a/pkg/util/ec2/ec2_test.go b/pkg/util/ec2/ec2_test.go index d92242dada46f..cbb660df50c8a 100644 --- a/pkg/util/ec2/ec2_test.go +++ b/pkg/util/ec2/ec2_test.go @@ -30,6 +30,8 @@ var ( initialTokenURL = tokenURL ) +const testIMDSToken = "AQAAAFKw7LyqwVmmBMkqXHpDBuDWw2GnfGswTHi2yiIOGvzD7OMaWw==" + func resetPackageVars() { config.Datadog().SetWithoutSource("ec2_metadata_timeout", initialTimeout) metadataURL = initialMetadataURL @@ -301,12 +303,11 @@ func TestExtractClusterName(t *testing.T) { func TestGetToken(t *testing.T) { ctx := context.Background() - originalToken := "AQAAAFKw7LyqwVmmBMkqXHpDBuDWw2GnfGswTHi2yiIOGvzD7OMaWw==" ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/plain") h := r.Header.Get("X-aws-ec2-metadata-token-ttl-seconds") if h != "" && r.Method == http.MethodPut { - io.WriteString(w, originalToken) + io.WriteString(w, testIMDSToken) } else { w.WriteHeader(http.StatusNotFound) } @@ -319,7 +320,7 @@ func TestGetToken(t *testing.T) { token, err := token.Get(ctx) require.NoError(t, err) - assert.Equal(t, originalToken, token) + assert.Equal(t, testIMDSToken, token) } func TestMetedataRequestWithToken(t *testing.T) { @@ -331,7 +332,6 @@ func TestMetedataRequestWithToken(t *testing.T) { ctx := context.Background() ipv4 := "198.51.100.1" - tok := "AQAAAFKw7LyqwVmmBMkqXHpDBuDWw2GnfGswTHi2yiIOGvzD7OMaWw==" ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/plain") @@ -345,11 +345,11 @@ func TestMetedataRequestWithToken(t *testing.T) { r.Header.Add("X-sequence", fmt.Sprintf("%v", seq)) seq++ requestForToken = r - io.WriteString(w, tok) + io.WriteString(w, testIMDSToken) case http.MethodGet: // Should be a metadata request t := r.Header.Get("X-aws-ec2-metadata-token") - if t != tok { + if t != testIMDSToken { r.Header.Add("X-sequence", fmt.Sprintf("%v", seq)) seq++ requestWithoutToken = r @@ -386,7 +386,7 @@ func TestMetedataRequestWithToken(t *testing.T) { assert.Equal(t, fmt.Sprint(config.Datadog().GetInt("ec2_metadata_token_lifetime")), requestForToken.Header.Get("X-aws-ec2-metadata-token-ttl-seconds")) assert.Equal(t, http.MethodPut, requestForToken.Method) assert.Equal(t, "/", requestForToken.RequestURI) - assert.Equal(t, tok, requestWithToken.Header.Get("X-aws-ec2-metadata-token")) + assert.Equal(t, testIMDSToken, requestWithToken.Header.Get("X-aws-ec2-metadata-token")) assert.Equal(t, "/public-ipv4", requestWithToken.RequestURI) assert.Equal(t, http.MethodGet, requestWithToken.Method) @@ -515,7 +515,7 @@ func TestMetadataSourceIMDS(t *testing.T) { w.Header().Set("Content-Type", "text/plain") switch r.Method { case http.MethodPut: // token request - io.WriteString(w, "AQAAAFKw7LyqwVmmBMkqXHpDBuDWw2GnfGswTHi2yiIOGvzD7OMaWw==") + io.WriteString(w, testIMDSToken) case http.MethodGet: // metadata request switch r.RequestURI { case "/hostname": diff --git a/pkg/util/ec2/imds_helpers.go b/pkg/util/ec2/imds_helpers.go index 510fad39f43c4..afc2ef22fffbd 100644 --- a/pkg/util/ec2/imds_helpers.go +++ b/pkg/util/ec2/imds_helpers.go @@ -77,7 +77,7 @@ func doHTTPRequest(ctx context.Context, url string, forceIMDSv2 bool) (string, e tokenValue, err := token.Get(ctx) if err != nil { if forceIMDSv2 { - return "", fmt.Errorf("Could not fetch token from IMDSv2") + return "", fmt.Errorf("could not fetch token from IMDSv2") } log.Warnf("ec2_prefer_imdsv2 is set to true in the configuration but the agent was unable to proceed: %s", err) } else { diff --git a/pkg/util/ec2/network.go b/pkg/util/ec2/network.go index 5fafa6bed62d7..a7fa4730513a7 100644 --- a/pkg/util/ec2/network.go +++ b/pkg/util/ec2/network.go @@ -30,9 +30,9 @@ func GetPublicIPv4(ctx context.Context) (string, error) { var networkIDFetcher = cachedfetch.Fetcher{ Name: "VPC IDs", Attempt: func(ctx context.Context) (interface{}, error) { - resp, err := getMetadataItem(ctx, imdsNetworkMacs, false) + resp, err := getMetadataItem(ctx, imdsNetworkMacs, true) if err != nil { - return "", err + return "", fmt.Errorf("EC2: GetNetworkID failed to get mac addresses: %w", err) } macs := strings.Split(strings.TrimSpace(resp), "\n") @@ -43,9 +43,9 @@ var networkIDFetcher = cachedfetch.Fetcher{ continue } mac = strings.TrimSuffix(mac, "/") - id, err := getMetadataItem(ctx, fmt.Sprintf("%s/%s/vpc-id", imdsNetworkMacs, mac), false) + id, err := getMetadataItem(ctx, fmt.Sprintf("%s/%s/vpc-id", imdsNetworkMacs, mac), true) if err != nil { - return "", err + return "", fmt.Errorf("EC2: GetNetworkID failed to get vpc id for mac %s: %w", mac, err) } vpcIDs.Add(id) } diff --git a/pkg/util/ec2/network_test.go b/pkg/util/ec2/network_test.go index 7fa773b41b888..1e4ca0bc36b42 100644 --- a/pkg/util/ec2/network_test.go +++ b/pkg/util/ec2/network_test.go @@ -23,11 +23,16 @@ func TestGetPublicIPv4(t *testing.T) { ip := "10.0.0.2" ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/plain") - switch r.RequestURI { - case "/public-ipv4": - io.WriteString(w, ip) - default: - w.WriteHeader(http.StatusNotFound) + switch r.Method { + case http.MethodPut: // token request + io.WriteString(w, testIMDSToken) + case http.MethodGet: // metadata request + switch r.RequestURI { + case "/public-ipv4": + io.WriteString(w, ip) + default: + w.WriteHeader(http.StatusNotFound) + } } })) @@ -47,18 +52,24 @@ func TestGetNetworkID(t *testing.T) { vpc := "vpc-12345" ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/plain") - switch r.RequestURI { - case "/network/interfaces/macs": - io.WriteString(w, mac+"/") - case "/network/interfaces/macs/00:00:00:00:00/vpc-id": - io.WriteString(w, vpc) - default: - w.WriteHeader(http.StatusNotFound) + switch r.Method { + case http.MethodPut: // token request + io.WriteString(w, testIMDSToken) + case http.MethodGet: // metadata request + switch r.RequestURI { + case "/network/interfaces/macs": + io.WriteString(w, mac+"/") + case "/network/interfaces/macs/00:00:00:00:00/vpc-id": + io.WriteString(w, vpc) + default: + w.WriteHeader(http.StatusNotFound) + } } })) defer ts.Close() metadataURL = ts.URL + tokenURL = ts.URL config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) defer resetPackageVars() @@ -69,18 +80,25 @@ func TestGetNetworkID(t *testing.T) { func TestGetInstanceIDNoMac(t *testing.T) { ctx := context.Background() - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { - io.WriteString(w, "") + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain") + switch r.Method { + case http.MethodPut: // token request + io.WriteString(w, testIMDSToken) + case http.MethodGet: // metadata request + io.WriteString(w, "") + } })) defer ts.Close() metadataURL = ts.URL + tokenURL = ts.URL config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) defer resetPackageVars() _, err := GetNetworkID(ctx) require.Error(t, err) - assert.Contains(t, err.Error(), "no mac addresses returned") + assert.Contains(t, err.Error(), "EC2: GetNetworkID no mac addresses returned") } func TestGetInstanceIDMultipleVPC(t *testing.T) { @@ -91,21 +109,27 @@ func TestGetInstanceIDMultipleVPC(t *testing.T) { vpc2 := "vpc-6789" ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/plain") - switch r.RequestURI { - case "/network/interfaces/macs": - io.WriteString(w, mac+"/\n") - io.WriteString(w, mac2+"/\n") - case "/network/interfaces/macs/00:00:00:00:00/vpc-id": - io.WriteString(w, vpc) - case "/network/interfaces/macs/00:00:00:00:01/vpc-id": - io.WriteString(w, vpc2) - default: - w.WriteHeader(http.StatusNotFound) + switch r.Method { + case http.MethodPut: // token request + io.WriteString(w, testIMDSToken) + case http.MethodGet: // metadata request + switch r.RequestURI { + case "/network/interfaces/macs": + io.WriteString(w, mac+"/\n") + io.WriteString(w, mac2+"/\n") + case "/network/interfaces/macs/00:00:00:00:00/vpc-id": + io.WriteString(w, vpc) + case "/network/interfaces/macs/00:00:00:00:01/vpc-id": + io.WriteString(w, vpc2) + default: + w.WriteHeader(http.StatusNotFound) + } } })) defer ts.Close() metadataURL = ts.URL + tokenURL = ts.URL config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) defer resetPackageVars() From d003e99dfa25615b9690972120b2907d7eaeab73 Mon Sep 17 00:00:00 2001 From: grantseltzer Date: Fri, 6 Sep 2024 17:24:58 -0400 Subject: [PATCH 075/128] Add dynamic instrumentation system-probe module (#28639) Co-authored-by: brycekahle Co-authored-by: --- .github/CODEOWNERS | 1 + .../modules/dynamic_instrumentation.go | 17 +- pkg/config/setup/system_probe.go | 4 + .../codegen/c/dynamicinstrumentation.c | 99 + pkg/dynamicinstrumentation/codegen/c/types.h | 14 + pkg/dynamicinstrumentation/codegen/codegen.go | 232 +++ pkg/dynamicinstrumentation/codegen/compile.go | 11 + .../codegen/output_offsets.go | 138 ++ .../codegen/templates.go | 210 +++ pkg/dynamicinstrumentation/di.go | 159 ++ .../diagnostics/diagnostics.go | 82 + .../diconfig/binary_inspection.go | 268 +++ .../diconfig/config_manager.go | 295 +++ pkg/dynamicinstrumentation/diconfig/dwarf.go | 642 +++++++ .../diconfig/file_config_manager.go | 230 +++ .../ditypes/analysis.go | 105 ++ pkg/dynamicinstrumentation/ditypes/config.go | 337 ++++ .../ditypes/config_test.go | 44 + .../ditypes/diagnostics.go | 52 + pkg/dynamicinstrumentation/ditypes/ebpf.go | 17 + .../ditypes/ebpf_linux.go | 13 + .../ditypes/ringbuffer.go | 43 + .../ditypes/snapshot.go | 118 ++ .../ditypes/snapshot_test.go | 50 + .../ditypes/testdata/snapshot-00.json | 402 ++++ .../ditypes/testdata/snapshot-01.json | 1660 +++++++++++++++++ .../ditypes/testdata/snapshot-02.json | 1645 ++++++++++++++++ pkg/dynamicinstrumentation/ebpf/ebpf.go | 174 ++ .../eventparser/event_parser.go | 268 +++ .../eventparser/event_parser_test.go | 298 +++ .../eventparser/param_stack.go | 45 + .../{ => module}/config.go | 4 +- .../{ => module}/doc.go | 9 +- pkg/dynamicinstrumentation/module/module.go | 73 + .../module_stub.go} | 30 +- .../proctracker/proctracker.go | 251 +++ .../proctracker/types.go | 43 + .../ratelimiter/ratelimit.go | 92 + .../ratelimiter/ratelimit_test.go | 48 + pkg/dynamicinstrumentation/ringbufconsumer.go | 64 + .../uploader/di_log_converter.go | 159 ++ .../uploader/offline.go | 83 + .../uploader/stack_trace.go | 151 ++ .../uploader/uploader.go | 221 +++ .../util/file_watcher.go | 61 + .../util/file_watcher_test.go | 110 ++ pkg/ebpf/bytecode/runtime/.gitignore | 1 + pkg/ebpf/bytecode/runtime/asset.go | 123 +- pkg/ebpf/bytecode/runtime/protected_file.go | 11 +- .../runtime/runtime_compilation_helpers.go | 2 +- pkg/ebpf/cgo/genpost.go | 2 +- pkg/ebpf/compiler/compiler.go | 4 - tasks/system_probe.py | 3 + 53 files changed, 9162 insertions(+), 56 deletions(-) create mode 100644 pkg/dynamicinstrumentation/codegen/c/dynamicinstrumentation.c create mode 100644 pkg/dynamicinstrumentation/codegen/c/types.h create mode 100644 pkg/dynamicinstrumentation/codegen/codegen.go create mode 100644 pkg/dynamicinstrumentation/codegen/compile.go create mode 100644 pkg/dynamicinstrumentation/codegen/output_offsets.go create mode 100644 pkg/dynamicinstrumentation/codegen/templates.go create mode 100644 pkg/dynamicinstrumentation/di.go create mode 100644 pkg/dynamicinstrumentation/diagnostics/diagnostics.go create mode 100644 pkg/dynamicinstrumentation/diconfig/binary_inspection.go create mode 100644 pkg/dynamicinstrumentation/diconfig/config_manager.go create mode 100644 pkg/dynamicinstrumentation/diconfig/dwarf.go create mode 100644 pkg/dynamicinstrumentation/diconfig/file_config_manager.go create mode 100644 pkg/dynamicinstrumentation/ditypes/analysis.go create mode 100644 pkg/dynamicinstrumentation/ditypes/config.go create mode 100644 pkg/dynamicinstrumentation/ditypes/config_test.go create mode 100644 pkg/dynamicinstrumentation/ditypes/diagnostics.go create mode 100644 pkg/dynamicinstrumentation/ditypes/ebpf.go create mode 100644 pkg/dynamicinstrumentation/ditypes/ebpf_linux.go create mode 100644 pkg/dynamicinstrumentation/ditypes/ringbuffer.go create mode 100644 pkg/dynamicinstrumentation/ditypes/snapshot.go create mode 100644 pkg/dynamicinstrumentation/ditypes/snapshot_test.go create mode 100644 pkg/dynamicinstrumentation/ditypes/testdata/snapshot-00.json create mode 100644 pkg/dynamicinstrumentation/ditypes/testdata/snapshot-01.json create mode 100644 pkg/dynamicinstrumentation/ditypes/testdata/snapshot-02.json create mode 100644 pkg/dynamicinstrumentation/ebpf/ebpf.go create mode 100644 pkg/dynamicinstrumentation/eventparser/event_parser.go create mode 100644 pkg/dynamicinstrumentation/eventparser/event_parser_test.go create mode 100644 pkg/dynamicinstrumentation/eventparser/param_stack.go rename pkg/dynamicinstrumentation/{ => module}/config.go (96%) rename pkg/dynamicinstrumentation/{ => module}/doc.go (51%) create mode 100644 pkg/dynamicinstrumentation/module/module.go rename pkg/dynamicinstrumentation/{module_linux.go => module/module_stub.go} (55%) create mode 100644 pkg/dynamicinstrumentation/proctracker/proctracker.go create mode 100644 pkg/dynamicinstrumentation/proctracker/types.go create mode 100644 pkg/dynamicinstrumentation/ratelimiter/ratelimit.go create mode 100644 pkg/dynamicinstrumentation/ratelimiter/ratelimit_test.go create mode 100644 pkg/dynamicinstrumentation/ringbufconsumer.go create mode 100644 pkg/dynamicinstrumentation/uploader/di_log_converter.go create mode 100644 pkg/dynamicinstrumentation/uploader/offline.go create mode 100644 pkg/dynamicinstrumentation/uploader/stack_trace.go create mode 100644 pkg/dynamicinstrumentation/uploader/uploader.go create mode 100644 pkg/dynamicinstrumentation/util/file_watcher.go create mode 100644 pkg/dynamicinstrumentation/util/file_watcher_test.go diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 15af633890318..62262edfd9967 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -205,6 +205,7 @@ /cmd/system-probe/modules/service_discover* @DataDog/apm-onboarding @DataDog/universal-service-monitoring /cmd/system-probe/modules/language_detection* @DataDog/processes @DataDog/universal-service-monitoring /cmd/system-probe/runtime/ @DataDog/agent-security +/cmd/system-probe/modules/dynamic_instrumentation* @DataDog/debugger /cmd/system-probe/windows/ @DataDog/windows-kernel-integrations /cmd/system-probe/windows_resources/ @DataDog/windows-kernel-integrations /cmd/system-probe/main_windows*.go @DataDog/windows-kernel-integrations diff --git a/cmd/system-probe/modules/dynamic_instrumentation.go b/cmd/system-probe/modules/dynamic_instrumentation.go index 7ff8d7d48ba6d..bd4272b8c8295 100644 --- a/cmd/system-probe/modules/dynamic_instrumentation.go +++ b/cmd/system-probe/modules/dynamic_instrumentation.go @@ -14,23 +14,26 @@ import ( "github.com/DataDog/datadog-agent/cmd/system-probe/api/module" "github.com/DataDog/datadog-agent/cmd/system-probe/config" sysconfigtypes "github.com/DataDog/datadog-agent/cmd/system-probe/config/types" - "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation" + dimod "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/module" "github.com/DataDog/datadog-agent/pkg/ebpf" ) -// DynamicInstrumentation is the dynamic instrumentation module factory +// DynamicInstrumentation is a system probe module which allows you to add instrumentation into +// running Go services without restarts. var DynamicInstrumentation = module.Factory{ Name: config.DynamicInstrumentationModule, ConfigNamespaces: []string{}, Fn: func(agentConfiguration *sysconfigtypes.Config, _ module.FactoryDependencies) (module.Module, error) { - config, err := dynamicinstrumentation.NewConfig(agentConfiguration) + config, err := dimod.NewConfig(agentConfiguration) if err != nil { return nil, fmt.Errorf("invalid dynamic instrumentation module configuration: %w", err) } - - m, err := dynamicinstrumentation.NewModule(config) - if errors.Is(err, ebpf.ErrNotImplemented) { - return nil, module.ErrNotEnabled + m, err := dimod.NewModule(config) + if err != nil { + if errors.Is(err, ebpf.ErrNotImplemented) { + return nil, module.ErrNotEnabled + } + return nil, err } return m, nil diff --git a/pkg/config/setup/system_probe.go b/pkg/config/setup/system_probe.go index 64a4fc53e273e..7ebde9fda3f96 100644 --- a/pkg/config/setup/system_probe.go +++ b/pkg/config/setup/system_probe.go @@ -167,6 +167,10 @@ func InitSystemProbeConfig(cfg pkgconfigmodel.Config) { // User Tracer cfg.BindEnvAndSetDefault(join(diNS, "enabled"), false, "DD_DYNAMIC_INSTRUMENTATION_ENABLED") + cfg.BindEnvAndSetDefault(join(diNS, "offline_mode"), false, "DD_DYNAMIC_INSTRUMENTATION_OFFLINE_MODE") + cfg.BindEnvAndSetDefault(join(diNS, "probes_file_path"), false, "DD_DYNAMIC_INSTRUMENTATION_PROBES_FILE_PATH") + cfg.BindEnvAndSetDefault(join(diNS, "snapshot_output_file_path"), false, "DD_DYNAMIC_INSTRUMENTATION_SNAPSHOT_FILE_PATH") + cfg.BindEnvAndSetDefault(join(diNS, "diagnostics_output_file_path"), false, "DD_DYNAMIC_INSTRUMENTATION_DIAGNOSTICS_FILE_PATH") // network_tracer settings // we cannot use BindEnvAndSetDefault for network_config.enabled because we need to know if it was manually set. diff --git a/pkg/dynamicinstrumentation/codegen/c/dynamicinstrumentation.c b/pkg/dynamicinstrumentation/codegen/c/dynamicinstrumentation.c new file mode 100644 index 0000000000000..f3c17c3dd5ca3 --- /dev/null +++ b/pkg/dynamicinstrumentation/codegen/c/dynamicinstrumentation.c @@ -0,0 +1,99 @@ +#include "bpf_helpers.h" +#include "bpf_tracing.h" +#include "kconfig.h" +#include +#include "types.h" + +#define MAX_STRING_SIZE {{ .InstrumentationInfo.InstrumentationOptions.StringMaxSize}} +#define PARAM_BUFFER_SIZE {{ .InstrumentationInfo.InstrumentationOptions.ArgumentsMaxSize}} +#define STACK_DEPTH_LIMIT 10 +#define MAX_SLICE_SIZE 1800 +#define MAX_SLICE_LENGTH 20 + +struct { + __uint(type, BPF_MAP_TYPE_RINGBUF); + __uint(max_entries, 1 << 24); +} events SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(char[PARAM_BUFFER_SIZE])); + __uint(max_entries, 1); +} zeroval SEC(".maps"); + +struct event { + struct base_event base; + char output[PARAM_BUFFER_SIZE]; +}; + +SEC("uprobe/{{.GetBPFFuncName}}") +int {{.GetBPFFuncName}}(struct pt_regs *ctx) +{ + bpf_printk("{{.GetBPFFuncName}} probe in {{.ServiceName}} has triggered"); + + // reserve space on ringbuffer + struct event *event; + event = bpf_ringbuf_reserve(&events, sizeof(struct event), 0); + if (!event) { + bpf_printk("No space available on ringbuffer, dropping event"); + return 0; + } + + char* zero_string; + __u32 key = 0; + zero_string = bpf_map_lookup_elem(&zeroval, &key); + if (!zero_string) { + bpf_printk("couldn't lookup zero value in zeroval array map, dropping event for {{.GetBPFFuncName}}"); + bpf_ringbuf_discard(event, 0); + return 0; + } + + bpf_probe_read(&event->base.probe_id, sizeof(event->base.probe_id), zero_string); + bpf_probe_read(&event->base.program_counters, sizeof(event->base.program_counters), zero_string); + bpf_probe_read(&event->output, sizeof(event->output), zero_string); + bpf_probe_read(&event->base.probe_id, {{ .ID | len }}, "{{.ID}}"); + + // Get tid and tgid + u64 pidtgid = bpf_get_current_pid_tgid(); + u32 tgid = pidtgid >> 32; + event->base.pid = tgid; + + u64 uidgid = bpf_get_current_uid_gid(); + u32 uid = uidgid >> 32; + event->base.uid = uid; + + // Collect stack trace + __u64 currentPC = ctx->pc; + bpf_probe_read(&event->base.program_counters[0], sizeof(__u64), ¤tPC); + + __u64 bp = ctx->regs[29]; + bpf_probe_read(&bp, sizeof(__u64), (void*)bp); // dereference bp to get current stack frame + __u64 ret_addr = ctx->regs[30]; // when bpf prog enters, the return address hasn't yet been written to the stack + + int i; + for (i = 1; i < STACK_DEPTH_LIMIT; i++) + { + if (bp == 0) { + break; + } + bpf_probe_read(&event->base.program_counters[i], sizeof(__u64), &ret_addr); + bpf_probe_read(&ret_addr, sizeof(__u64), (void*)(bp-8)); + bpf_probe_read(&bp, sizeof(__u64), (void*)bp); + } + + // Collect parameters + __u8 param_type; + __u16 param_size; + __u16 slice_length; + + int outputOffset = 0; + + {{ .InstrumentationInfo.BPFParametersSourceCode }} + + bpf_ringbuf_submit(event, 0); + + return 0; +} + +char __license[] SEC("license") = "GPL"; diff --git a/pkg/dynamicinstrumentation/codegen/c/types.h b/pkg/dynamicinstrumentation/codegen/c/types.h new file mode 100644 index 0000000000000..f170b91fe7541 --- /dev/null +++ b/pkg/dynamicinstrumentation/codegen/c/types.h @@ -0,0 +1,14 @@ +#ifndef DI_TYPES_H +#define DI_TYPES_H + +#include "ktypes.h" + +// NOTE: Be careful when adding fields, alignment should always be to 8 bytes +struct base_event { + char probe_id[304]; + __u32 pid; + __u32 uid; + __u64 program_counters[10]; +}__attribute__((aligned(8))); + +#endif diff --git a/pkg/dynamicinstrumentation/codegen/codegen.go b/pkg/dynamicinstrumentation/codegen/codegen.go new file mode 100644 index 0000000000000..6c3e7b44905cf --- /dev/null +++ b/pkg/dynamicinstrumentation/codegen/codegen.go @@ -0,0 +1,232 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +// Package codegen is used to generate bpf program source code based on probe definitions +package codegen + +import ( + "bytes" + "errors" + "fmt" + "io" + "reflect" + "strings" + "text/template" + + "github.com/DataDog/datadog-agent/pkg/util/log" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" +) + +// GenerateBPFParamsCode generates the source code associated with the probe and data +// in it's associated process info. +func GenerateBPFParamsCode(procInfo *ditypes.ProcessInfo, probe *ditypes.Probe) error { + parameterBytes := []byte{} + out := bytes.NewBuffer(parameterBytes) + + if probe.InstrumentationInfo.InstrumentationOptions.CaptureParameters { + params := applyCaptureDepth(procInfo.TypeMap.Functions[probe.FuncName], probe.InstrumentationInfo.InstrumentationOptions.MaxReferenceDepth) + applyFieldCountLimit(params) + for i := range params { + flattenedParams := flattenParameters([]ditypes.Parameter{params[i]}) + + err := generateHeadersText(flattenedParams, out) + if err != nil { + return err + } + + err = generateParametersText(flattenedParams, out) + if err != nil { + return err + } + } + } else { + log.Info("Not capturing parameters") + } + + probe.InstrumentationInfo.BPFParametersSourceCode = out.String() + return nil +} + +func resolveHeaderTemplate(param *ditypes.Parameter) (*template.Template, error) { + switch param.Kind { + case uint(reflect.String): + if param.Location.InReg { + return template.New("string_reg_header_template").Parse(stringRegisterHeaderTemplateText) + } + return template.New("string_stack_header_template").Parse(stringStackHeaderTemplateText) + case uint(reflect.Slice): + if param.Location.InReg { + return template.New("slice_reg_header_template").Parse(sliceRegisterHeaderTemplateText) + } + return template.New("slice_stack_header_template").Parse(sliceStackHeaderTemplateText) + default: + return template.New("header_template").Parse(headerTemplateText) + } +} + +func generateHeadersText(params []ditypes.Parameter, out io.Writer) error { + for i := range params { + err := generateHeaderText(params[i], out) + if err != nil { + return err + } + } + return nil +} + +func generateHeaderText(param ditypes.Parameter, out io.Writer) error { + if reflect.Kind(param.Kind) == reflect.Slice { + return generateSliceHeader(¶m, out) + } + + tmplt, err := resolveHeaderTemplate(¶m) + if err != nil { + return err + } + err = tmplt.Execute(out, param) + if err != nil { + return err + } + return nil +} + +func generateParametersText(params []ditypes.Parameter, out io.Writer) error { + for i := range params { + err := generateParameterText(¶ms[i], out) + if err != nil { + return err + } + } + return nil +} + +func generateParameterText(param *ditypes.Parameter, out io.Writer) error { + + if param.Kind == uint(reflect.Array) || + param.Kind == uint(reflect.Struct) || + param.Kind == uint(reflect.Pointer) { + // - Arrays/structs don't have actual values, we just want to generate + // a header for them for the sake of event parsing. + // - Pointers do have actual values, but they're captured when the + // underlying value is also captured. + return nil + } + + template, err := resolveParameterTemplate(param) + if err != nil { + return err + } + param.Type = cleanupTypeName(param.Type) + err = template.Execute(out, param) + if err != nil { + return fmt.Errorf("could not execute template for generating read of parameter: %w", err) + } + + return nil +} + +func resolveParameterTemplate(param *ditypes.Parameter) (*template.Template, error) { + if param.Type == "main.triggerVerifierErrorForTesting" { + return template.New("trigger_verifier_error_template").Parse(forcedVerifierErrorTemplate) + } + notSupported := param.NotCaptureReason == ditypes.Unsupported + cutForFieldLimit := param.NotCaptureReason == ditypes.FieldLimitReached + + if notSupported { + return template.New("unsupported_type_template").Parse(unsupportedTypeTemplateText) + } else if cutForFieldLimit { + return template.New("cut_field_limit_template").Parse(cutForFieldLimitTemplateText) + } + + if param.Location.InReg { + return resolveRegisterParameterTemplate(param) + } + return resolveStackParameterTemplate(param) +} + +func resolveRegisterParameterTemplate(param *ditypes.Parameter) (*template.Template, error) { + needsDereference := param.Location.NeedsDereference + stringType := param.Kind == uint(reflect.String) + sliceType := param.Kind == uint(reflect.Slice) + + if needsDereference { + // Register Pointer + return template.New("pointer_register_template").Parse(pointerRegisterTemplateText) + } else if stringType { + // Register String + return template.New("string_register_template").Parse(stringRegisterTemplateText) + } else if sliceType { + // Register Slice + return template.New("slice_register_template").Parse(sliceRegisterTemplateText) + } else if !needsDereference { + // Register Normal Value + return template.New("register_template").Parse(normalValueRegisterTemplateText) + } + return nil, errors.New("no template created: invalid or unsupported type") +} + +func resolveStackParameterTemplate(param *ditypes.Parameter) (*template.Template, error) { + needsDereference := param.Location.NeedsDereference + stringType := param.Kind == uint(reflect.String) + sliceType := param.Kind == uint(reflect.Slice) + + if needsDereference { + // Stack Pointer + return template.New("pointer_stack_template").Parse(pointerStackTemplateText) + } else if stringType { + // Stack String + return template.New("string_stack_template").Parse(stringStackTemplateText) + } else if sliceType { + // Stack Slice + return template.New("slice_stack_template").Parse(sliceStackTemplateText) + } else if !needsDereference { + // Stack Normal Value + return template.New("stack_template").Parse(normalValueStackTemplateText) + } + return nil, errors.New("no template created: invalid or unsupported type") +} + +func cleanupTypeName(s string) string { + return strings.TrimPrefix(s, "*") +} + +func generateSliceHeader(slice *ditypes.Parameter, out io.Writer) error { + if slice == nil { + return errors.New("nil slice parameter when generating header code") + } + if len(slice.ParameterPieces) != 1 { + return errors.New("invalid slice parameter when generating header code") + } + + x := []byte{} + buf := bytes.NewBuffer(x) + err := generateHeaderText(slice.ParameterPieces[0], buf) + if err != nil { + return err + } + w := sliceHeaderWrapper{ + Parameter: slice, + SliceTypeHeaderText: buf.String(), + } + + sliceTemplate, err := resolveHeaderTemplate(slice) + if err != nil { + return err + } + + err = sliceTemplate.Execute(out, w) + if err != nil { + return fmt.Errorf("could not execute template for generating slice header: %w", err) + } + return nil +} + +type sliceHeaderWrapper struct { + Parameter *ditypes.Parameter + SliceTypeHeaderText string +} diff --git a/pkg/dynamicinstrumentation/codegen/compile.go b/pkg/dynamicinstrumentation/codegen/compile.go new file mode 100644 index 0000000000000..abb8523280443 --- /dev/null +++ b/pkg/dynamicinstrumentation/codegen/compile.go @@ -0,0 +1,11 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package codegen + +//go:generate $GOPATH/bin/include_headers pkg/dynamicinstrumentation/codegen/c/dynamicinstrumentation.c pkg/ebpf/bytecode/build/runtime/dynamicinstrumentation.c pkg/ebpf/c +//go:generate $GOPATH/bin/integrity pkg/ebpf/bytecode/build/runtime/dynamicinstrumentation.c pkg/ebpf/bytecode/runtime/dynamicinstrumentation.go runtime diff --git a/pkg/dynamicinstrumentation/codegen/output_offsets.go b/pkg/dynamicinstrumentation/codegen/output_offsets.go new file mode 100644 index 0000000000000..56250b25897c2 --- /dev/null +++ b/pkg/dynamicinstrumentation/codegen/output_offsets.go @@ -0,0 +1,138 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package codegen + +import ( + "math/rand" + "reflect" + + "github.com/DataDog/datadog-agent/pkg/util/log" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" +) + +type paramDepthCounter struct { + depth int + param *ditypes.Parameter +} + +func applyCaptureDepth(params []ditypes.Parameter, maxDepth int) []ditypes.Parameter { + log.Tracef("Applying capture depth: %d", maxDepth) + queue := []paramDepthCounter{} + + for i := range params { + queue = append(queue, paramDepthCounter{ + depth: 0, + param: ¶ms[i], + }) + } + + for len(queue) != 0 { + front := queue[0] + queue = queue[1:] + + if front.depth == maxDepth { + // max capture depth reached, remove parameters below this level. + front.param.ParameterPieces = []ditypes.Parameter{} + if front.param.Kind == uint(reflect.Struct) { + // struct size reflects the number of fields, + // setting to 0 tells the user space parsing not to + // expect anything else. + front.param.TotalSize = 0 + } + } else { + for i := range front.param.ParameterPieces { + queue = append(queue, paramDepthCounter{ + depth: front.depth + 1, + param: &front.param.ParameterPieces[i], + }) + } + } + } + return params +} + +func flattenParameters(params []ditypes.Parameter) []ditypes.Parameter { + flattenedParams := []ditypes.Parameter{} + for i := range params { + kind := reflect.Kind(params[i].Kind) + if kind == reflect.Slice { + // Slices don't get flattened as we need the underlying type. + // We populate the slice's template using that type. + flattenedParams = append(flattenedParams, params[i]) + } else if hasHeader(kind) { + paramHeader := params[i] + paramHeader.ParameterPieces = nil + flattenedParams = append(flattenedParams, paramHeader) + flattenedParams = append(flattenedParams, flattenParameters(params[i].ParameterPieces)...) + } else if len(params[i].ParameterPieces) > 0 { + flattenedParams = append(flattenedParams, flattenParameters(params[i].ParameterPieces)...) + } else { + flattenedParams = append(flattenedParams, params[i]) + } + } + + for i := range flattenedParams { + flattenedParams[i].ID = randomID() + } + + return flattenedParams +} + +func applyFieldCountLimit(params []ditypes.Parameter) { + queue := []*ditypes.Parameter{} + for i := range params { + queue = append(queue, ¶ms[len(params)-1-i]) + } + var ( + current *ditypes.Parameter + max int + ) + for len(queue) != 0 { + current = queue[0] + queue = queue[1:] + + max = len(current.ParameterPieces) + if len(current.ParameterPieces) > ditypes.MaxFieldCount { + max = ditypes.MaxFieldCount + for j := max; j < len(current.ParameterPieces); j++ { + excludeForFieldCount(¤t.ParameterPieces[j]) + } + } + for n := 0; n < max; n++ { + queue = append(queue, ¤t.ParameterPieces[n]) + } + } +} + +func excludeForFieldCount(root *ditypes.Parameter) { + // Exclude all in this tree + if root == nil { + return + } + root.NotCaptureReason = ditypes.FieldLimitReached + root.Kind = ditypes.KindCutFieldLimit + for i := range root.ParameterPieces { + excludeForFieldCount(&root.ParameterPieces[i]) + } +} + +func hasHeader(kind reflect.Kind) bool { + return kind == reflect.Struct || + kind == reflect.Array || + kind == reflect.Pointer +} + +func randomID() string { + length := 6 + randomString := make([]byte, length) + for i := 0; i < length; i++ { + randomString[i] = byte(65 + rand.Intn(25)) + } + return string(randomString) +} diff --git a/pkg/dynamicinstrumentation/codegen/templates.go b/pkg/dynamicinstrumentation/codegen/templates.go new file mode 100644 index 0000000000000..64f5dab18ec3a --- /dev/null +++ b/pkg/dynamicinstrumentation/codegen/templates.go @@ -0,0 +1,210 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package codegen + +var forcedVerifierErrorTemplate = ` +int illegalDereference = *(*(*ctx->regs[0])); +` + +var headerTemplateText = ` +// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} +// Write the kind and size to output buffer +param_type = {{.Kind}}; +bpf_probe_read(&event->output[outputOffset], sizeof(param_type), ¶m_type); +param_size = {{.TotalSize}}; +bpf_probe_read(&event->output[outputOffset+1], sizeof(param_size), ¶m_size); +outputOffset += 3; +` + +// The length of slices aren't known until parsing, so they require +// special headers to read in the length dynamically +var sliceRegisterHeaderTemplateText = ` +// Name={{.Parameter.Name}} ID={{.Parameter.ID}} TotalSize={{.Parameter.TotalSize}} Kind={{.Parameter.Kind}} +// Write the slice kind to output buffer +param_type = {{.Parameter.Kind}}; +bpf_probe_read(&event->output[outputOffset], sizeof(param_type), ¶m_type); +// Read slice length and write it to output buffer +bpf_probe_read(¶m_size, sizeof(param_size), &ctx->regs[{{.Parameter.Location.Register}}+1]); +bpf_probe_read(&event->output[outputOffset+1], sizeof(param_size), ¶m_size); +outputOffset += 3; + +__u16 indexSlice{{.Parameter.ID}}; +slice_length = param_size; +if (slice_length > MAX_SLICE_LENGTH) { + slice_length = MAX_SLICE_LENGTH; +} + +for (indexSlice{{.Parameter.ID}} = 0; indexSlice{{.Parameter.ID}} < MAX_SLICE_LENGTH; indexSlice{{.Parameter.ID}}++) { + if (indexSlice{{.Parameter.ID}} >= slice_length) { + break; + } + {{.SliceTypeHeaderText}} +} +` + +// The length of slices aren't known until parsing, so they require +// special headers to read in the length dynamically +var sliceStackHeaderTemplateText = ` +// Name={{.Parameter.Name}} ID={{.Parameter.ID}} TotalSize={{.Parameter.TotalSize}} Kind={{.Parameter.Kind}} +// Write the slice kind to output buffer +param_type = {{.Parameter.Kind}}; +bpf_probe_read(&event->output[outputOffset], sizeof(param_type), ¶m_type); +// Read slice length and write it to output buffer +bpf_probe_read(¶m_size, sizeof(param_size), &ctx->regs[29]+{{.Parameter.Location.StackOffset}}+8]); +bpf_probe_read(&event->output[outputOffset+1], sizeof(param_size), ¶m_size); +outputOffset += 3; + +__u16 indexSlice{{.Parameter.ID}}; +slice_length = param_size; +if (slice_length > MAX_SLICE_LENGTH) { + slice_length = MAX_SLICE_LENGTH; +} + +for (indexSlice{{.Parameter.ID}} = 0; indexSlice{{.Parameter.ID}} < MAX_SLICE_LENGTH; indexSlice{{.Parameter.ID}}++) { + if (indexSlice{{.Parameter.ID}} >= slice_length) { + break; + } + {{.SliceTypeHeaderText}} +} +` + +// The length of strings aren't known until parsing, so they require +// special headers to read in the length dynamically +var stringRegisterHeaderTemplateText = ` +// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} +// Write the string kind to output buffer +param_type = {{.Kind}}; +bpf_probe_read(&event->output[outputOffset], sizeof(param_type), ¶m_type); + +// Read string length and write it to output buffer +bpf_probe_read(¶m_size, sizeof(param_size), &ctx->regs[{{.Location.Register}}+1]); + +// Limit string length +__u16 string_size_{{.ID}} = param_size; +if (string_size_{{.ID}} > MAX_STRING_SIZE) { + string_size_{{.ID}} = MAX_STRING_SIZE; +} +bpf_probe_read(&event->output[outputOffset+1], sizeof(string_size_{{.ID}}), &string_size_{{.ID}}); +outputOffset += 3; +` + +// The length of strings aren't known until parsing, so they require +// special headers to read in the length dynamically +var stringStackHeaderTemplateText = ` +// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} +// Write the string kind to output buffer +param_type = {{.Kind}}; +bpf_probe_read(&event->output[outputOffset], sizeof(param_type), ¶m_type); +// Read string length and write it to output buffer +bpf_probe_read(¶m_size, sizeof(param_size), (char*)((ctx->regs[29])+{{.Location.StackOffset}}+8)); +// Limit string length +__u16 string_size_{{.ID}} = param_size; +if (string_size_{{.ID}} > MAX_STRING_SIZE) { + string_size_{{.ID}} = MAX_STRING_SIZE; +} +bpf_probe_read(&event->output[outputOffset+1], sizeof(string_size_{{.ID}}), &string_size_{{.ID}}); +outputOffset += 3; +` + +var sliceRegisterTemplateText = ` +// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} +// Read contents of slice +bpf_probe_read(&event->output[outputOffset], MAX_SLICE_SIZE, (void*)ctx->regs[{{.Location.Register}}]); +outputOffset += MAX_SLICE_SIZE; +` + +var sliceStackTemplateText = ` +// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} +// Read contents of slice +bpf_probe_read(&event->output[outputOffset], MAX_SLICE_SIZE, (void*)(ctx->regs[29]+{{.Location.StackOffset}}); +outputOffset += MAX_SLICE_SIZE;` + +var stringRegisterTemplateText = ` +// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} +// Read string length and write it to output buffer +bpf_probe_read(¶m_size, sizeof(param_size), &ctx->regs[{{.Location.Register}}+1]); + +__u16 string_size_read_{{.ID}} = param_size; +if (string_size_read_{{.ID}} > MAX_STRING_SIZE) { + string_size_read_{{.ID}} = MAX_STRING_SIZE; +} + +// Read contents of string +bpf_probe_read(&event->output[outputOffset], string_size_read_{{.ID}}, (void*)ctx->regs[{{.Location.Register}}]); +outputOffset += string_size_read_{{.ID}}; +` + +var stringStackTemplateText = ` +// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} +// Read string length and write it to output buffer +bpf_probe_read(¶m_size, sizeof(param_size), (char*)((ctx->regs[29])+{{.Location.StackOffset}}+8)); +// Limit string length +__u16 string_size_read_{{.ID}} = param_size; +if (string_size_read_{{.ID}} > MAX_STRING_SIZE) { + string_size_read_{{.ID}} = MAX_STRING_SIZE; +} +// Read contents of string +bpf_probe_read(&ret_addr, sizeof(__u64), (void*)(ctx->regs[29]+{{.Location.StackOffset}})); +bpf_probe_read(&event->output[outputOffset], string_size_read_{{.ID}}, (void*)(ret_addr)); +outputOffset += string_size_read_{{.ID}}; +` + +var pointerRegisterTemplateText = ` +// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} +// Read the pointer value (address of underlying value) +void *ptrTo{{.ID}}; +bpf_probe_read(&ptrTo{{.ID}}, sizeof(ptrTo{{.ID}}), &ctx->regs[{{.Location.Register}}]); + +// Write the underlying value to output +bpf_probe_read(&event->output[outputOffset], {{.TotalSize}}, ptrTo{{.ID}}+{{.Location.PointerOffset}}); +outputOffset += {{.TotalSize}}; + +// Write the pointer address to output +ptrTo{{.ID}} += {{.Location.PointerOffset}}; +bpf_probe_read(&event->output[outputOffset], sizeof(ptrTo{{.ID}}), &ptrTo{{.ID}}); +` + +var pointerStackTemplateText = ` +// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} +// Read the pointer value (address of underlying value) +void *ptrTo{{.ID}}; +bpf_probe_read(&ptrTo{{.ID}}, sizeof(ptrTo{{.ID}}), (char*)((ctx->regs[29])+{{.Location.StackOffset}}+8)); + +// Write the underlying value to output +bpf_probe_read(&event->output[outputOffset], {{.TotalSize}}, ptrTo{{.ID}}+{{.Location.PointerOffset}}); +outputOffset += {{.TotalSize}}; + +// Write the pointer address to output +ptrTo{{.ID}} += {{.Location.PointerOffset}}; +bpf_probe_read(&event->output[outputOffset], sizeof(ptrTo{{.ID}}), &ptrTo{{.ID}}); +` + +var normalValueRegisterTemplateText = ` +// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} +bpf_probe_read(&event->output[outputOffset], {{.TotalSize}}, &ctx->regs[{{.Location.Register}}]); +outputOffset += {{.TotalSize}}; +` + +var normalValueStackTemplateText = ` +// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} +// Read value for {{.Name}} +bpf_probe_read(&event->output[outputOffset], {{.TotalSize}}, (char*)((ctx->regs[29])+{{.Location.StackOffset}})); +outputOffset += {{.TotalSize}}; +` + +// Unsupported types just get a single `255` value to signify as a placeholder +// that an unsupported type goes here. Size is where we keep the actual type. +var unsupportedTypeTemplateText = ` +// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} +// No capture, unsupported type +` + +var cutForFieldLimitTemplateText = ` +// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} +// No capture, cut for field limit +` diff --git a/pkg/dynamicinstrumentation/di.go b/pkg/dynamicinstrumentation/di.go new file mode 100644 index 0000000000000..409b8a1af5f7a --- /dev/null +++ b/pkg/dynamicinstrumentation/di.go @@ -0,0 +1,159 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +// Package dynamicinstrumentation provides the main entrypoint into running the +// dynamic instrumentation for Go product +package dynamicinstrumentation + +import ( + "encoding/json" + "fmt" + + "github.com/DataDog/datadog-agent/pkg/util/log" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/diagnostics" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/diconfig" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ebpf" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/uploader" +) + +// GoDI is the central controller representation of the Dynamic Instrumentation +// implementation for Go services +type GoDI struct { + cm diconfig.ConfigManager + + lu uploader.LogUploader + du uploader.DiagnosticUploader + + processEvent ditypes.EventCallback + Close func() + + stats GoDIStats +} + +// GoDIStats is used to track various metrics relevant to the health of the +// Dynamic Instrumentation process +type GoDIStats struct { + PIDEventsCreatedCount map[uint32]uint64 // pid : count + ProbeEventsCreatedCount map[string]uint64 // probeID : count +} + +func newGoDIStats() GoDIStats { + return GoDIStats{ + PIDEventsCreatedCount: make(map[uint32]uint64), + ProbeEventsCreatedCount: make(map[string]uint64), + } +} + +// DIOptions is used to configure the running Dynamic Instrumentation process +type DIOptions struct { + Offline bool + + ProbesFilePath string + SnapshotOutput string + DiagnosticOutput string + + ditypes.EventCallback +} + +// RunDynamicInstrumentation is the main entry point into running the Dynamic +// Instrumentation project for Go. +func RunDynamicInstrumentation(opts *DIOptions) (*GoDI, error) { + var goDI *GoDI + + err := ebpf.SetupEventsMap() + if err != nil { + return nil, err + } + + if opts.Offline { + cm, err := diconfig.NewFileConfigManager(opts.ProbesFilePath) + if err != nil { + return nil, fmt.Errorf("couldn't create new file config manager: %w", err) + } + lu, err := uploader.NewOfflineLogSerializer(opts.SnapshotOutput) + if err != nil { + return nil, fmt.Errorf("couldn't create new offline log serializer: %w", err) + } + du, err := uploader.NewOfflineDiagnosticSerializer(diagnostics.Diagnostics, opts.DiagnosticOutput) + if err != nil { + return nil, fmt.Errorf("couldn't create new offline diagnostic serializer: %w", err) + } + goDI = &GoDI{ + cm: cm, + lu: lu, + du: du, + stats: newGoDIStats(), + } + } else { + cm, err := diconfig.NewRCConfigManager() + if err != nil { + return nil, fmt.Errorf("couldn't create new RC config manager: %w", err) + } + goDI = &GoDI{ + cm: cm, + lu: uploader.NewLogUploader(), + du: uploader.NewDiagnosticUploader(), + stats: newGoDIStats(), + } + } + if opts.EventCallback != nil { + goDI.processEvent = opts.EventCallback + } else { + goDI.processEvent = goDI.uploadSnapshot + } + + closeRingbuffer, err := goDI.startRingbufferConsumer() + if err != nil { + return nil, fmt.Errorf("couldn't set up new ringbuffer consumer: %w", err) + } + + goDI.Close = func() { + goDI.cm.Stop() + closeRingbuffer() + } + + return goDI, nil +} + +func (goDI *GoDI) printSnapshot(event *ditypes.DIEvent) { + if event == nil { + return + } + procInfo := goDI.cm.GetProcInfos()[event.PID] + diLog := uploader.NewDILog(procInfo, event) + + var bs []byte + var err error + + if diLog != nil { + bs, err = json.MarshalIndent(diLog, "", " ") + } else { + bs, err = json.MarshalIndent(event, "", " ") + } + + if err != nil { + log.Info(err) + } + log.Debug(string(bs)) +} + +func (goDI *GoDI) uploadSnapshot(event *ditypes.DIEvent) { + goDI.printSnapshot(event) + procInfo := goDI.cm.GetProcInfos()[event.PID] + diLog := uploader.NewDILog(procInfo, event) + if diLog != nil { + goDI.lu.Enqueue(diLog) + } +} + +// GetStats returns the maps of various statitics for +// runtime health of dynamic instrumentation +func (goDI *GoDI) GetStats() GoDIStats { + return goDI.stats +} diff --git a/pkg/dynamicinstrumentation/diagnostics/diagnostics.go b/pkg/dynamicinstrumentation/diagnostics/diagnostics.go new file mode 100644 index 0000000000000..c1e351297210d --- /dev/null +++ b/pkg/dynamicinstrumentation/diagnostics/diagnostics.go @@ -0,0 +1,82 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +// Package diagnostics provides a facility for dynamic instrumentation to upload diagnostic information +package diagnostics + +import ( + "sync" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" +) + +func newDIDiagnostic(service, runtimeID, probeID string, status ditypes.Status) *ditypes.DiagnosticUpload { + return &ditypes.DiagnosticUpload{ + Service: service, + DDSource: "dd_debugger", + Debugger: struct { + ditypes.Diagnostic `json:"diagnostics"` + }{ + Diagnostic: ditypes.Diagnostic{ + RuntimeID: runtimeID, + ProbeID: probeID, + Status: status, + }, + }, + } +} + +type probeInstanceID struct { + service string + runtimeID string + probeID string +} + +// DiagnosticManager is used to keep track and upload diagnostic information +type DiagnosticManager struct { + state map[probeInstanceID]*ditypes.DiagnosticUpload + Updates chan *ditypes.DiagnosticUpload + + mu sync.Mutex +} + +// NewDiagnosticManager creates a new DiagnosticManager +func NewDiagnosticManager() *DiagnosticManager { + return &DiagnosticManager{ + state: make(map[probeInstanceID]*ditypes.DiagnosticUpload), + Updates: make(chan *ditypes.DiagnosticUpload), + } +} + +// SetStatus associates the status with the specified service/probe +func (m *DiagnosticManager) SetStatus(service, runtimeID, probeID string, status ditypes.Status) { + id := probeInstanceID{service, probeID, runtimeID} + d := newDIDiagnostic(service, runtimeID, probeID, status) + m.update(id, d) +} + +// SetError associates the error with the specified service/probe +func (m *DiagnosticManager) SetError(service, runtimeID, probeID, errorType, errorMessage string) { + id := probeInstanceID{service, probeID, runtimeID} + d := newDIDiagnostic(service, runtimeID, probeID, ditypes.StatusError) + d.SetError(errorType, errorMessage) + m.update(id, d) +} + +func (m *DiagnosticManager) update(id probeInstanceID, d *ditypes.DiagnosticUpload) { + m.mu.Lock() + defer m.mu.Unlock() + + if m.state[id] != d { + m.state[id] = d + // TODO: if there is no consumer reading updates, this blocks the calling goroutine + m.Updates <- d + } +} + +// Diagnostics is a global instance of a diagnostic manager +var Diagnostics = NewDiagnosticManager() diff --git a/pkg/dynamicinstrumentation/diconfig/binary_inspection.go b/pkg/dynamicinstrumentation/diconfig/binary_inspection.go new file mode 100644 index 0000000000000..4722aa6505202 --- /dev/null +++ b/pkg/dynamicinstrumentation/diconfig/binary_inspection.go @@ -0,0 +1,268 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package diconfig + +import ( + "debug/elf" + "fmt" + "reflect" + + "github.com/DataDog/datadog-agent/pkg/util/log" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" + "github.com/DataDog/datadog-agent/pkg/network/go/bininspect" +) + +// inspectGoBinaries goes through each service and populates information about the binary +// and the relevant parameters, and their types +// configEvent maps service names to info about the service and their configurations +func inspectGoBinaries(configEvent ditypes.DIProcs) error { + var err error + for i := range configEvent { + err = AnalyzeBinary(configEvent[i]) + if err != nil { + return fmt.Errorf("inspection of PID %d (path=%s) failed: %w", configEvent[i].PID, configEvent[i].BinaryPath, err) + } + } + return nil +} + +// AnalyzeBinary reads the binary associated with the specified process and parses +// the DWARF information. It populates relevant fields in the process representation +func AnalyzeBinary(procInfo *ditypes.ProcessInfo) error { + functions := []string{} + targetFunctions := map[string]bool{} + for _, probe := range procInfo.GetProbes() { + functions = append(functions, probe.FuncName) + targetFunctions[probe.FuncName] = true + } + + dwarfData, err := loadDWARF(procInfo.BinaryPath) + if err != nil { + return fmt.Errorf("could not retrieve debug information from binary: %w", err) + } + + typeMap, err := getTypeMap(dwarfData, targetFunctions) + if err != nil { + return fmt.Errorf("could not retrieve type information from binary %w", err) + } + + procInfo.TypeMap = typeMap + + elfFile, err := elf.Open(procInfo.BinaryPath) + if err != nil { + return fmt.Errorf("could not open elf file %w", err) + } + + procInfo.DwarfData = dwarfData + + fieldIDs := make([]bininspect.FieldIdentifier, 0) + for _, funcParams := range typeMap.Functions { + for _, param := range funcParams { + fieldIDs = append(fieldIDs, + collectFieldIDs(param)...) + } + } + + r, err := bininspect.InspectWithDWARF(elfFile, functions, fieldIDs) + if err != nil { + return fmt.Errorf("could not determine locations of variables from debug information %w", err) + } + + // Use the result from InspectWithDWARF to populate the locations of parameters + for functionName, functionMetadata := range r.Functions { + putLocationsInParams(functionMetadata.Parameters, r.StructOffsets, procInfo.TypeMap.Functions, functionName) + correctStructSizes(procInfo.TypeMap.Functions[functionName]) + } + + return nil +} + +// collectFieldIDs returns all struct fields if there are any amongst types of parameters +// including if there's structs that are nested deep within complex types +func collectFieldIDs(param ditypes.Parameter) []bininspect.FieldIdentifier { + fieldIDs := []bininspect.FieldIdentifier{} + stack := append([]ditypes.Parameter{param}, param.ParameterPieces...) + + for len(stack) != 0 { + + current := stack[len(stack)-1] + stack = stack[:len(stack)-1] + if !kindIsSupported(reflect.Kind(current.Kind)) { + continue + } + if len(current.ParameterPieces) != 0 { + stack = append(stack, current.ParameterPieces...) + } + + if current.Kind == uint(reflect.Struct) || current.Kind == uint(reflect.Slice) { + for _, structField := range current.ParameterPieces { + if structField.Name == "" || current.Type == "" { + // these can be blank in anonymous types or embedded fields + // of builtin types. bininspect has no ability to find offsets + // in these cases and we're best off skipping them. + continue + } + fieldIDs = append(fieldIDs, bininspect.FieldIdentifier{ + StructName: current.Type, + FieldName: structField.Name, + }) + if len(fieldIDs) >= ditypes.MaxFieldCount { + log.Info("field limit applied, not collecting further fields", len(fieldIDs), ditypes.MaxFieldCount) + return fieldIDs + } + } + } + } + return fieldIDs +} + +func putLocationsInParams( + paramMetadatas []bininspect.ParameterMetadata, + fieldLocations map[bininspect.FieldIdentifier]uint64, + funcMap map[string][]ditypes.Parameter, + funcName string) { + + params := funcMap[funcName] + locations := []ditypes.Location{} + + // Collect locations in order + for _, param := range paramMetadatas { + for _, piece := range param.Pieces { + locations = append(locations, ditypes.Location{ + InReg: piece.InReg, + StackOffset: piece.StackOffset, + Register: piece.Register, + }) + } + } + + assignLocationsInOrder(params, locations) + correctTypeSpecificLocations(params, fieldLocations) + + funcMap[funcName] = params +} + +func assignLocationsInOrder(params []ditypes.Parameter, locations []ditypes.Location) { + stack := []*ditypes.Parameter{} + locationCounter := 0 + + // Start by pushing addresses of all parameters to stack + for i := range params { + stack = append(stack, ¶ms[len(params)-1-i]) + } + + for { + if len(stack) == 0 || locationCounter == len(locations) { + return + } + current := stack[len(stack)-1] + stack = stack[:len(stack)-1] + if len(current.ParameterPieces) != 0 && + current.Kind != uint(reflect.Array) && + current.Kind != uint(reflect.Pointer) && + current.Kind != uint(reflect.Slice) { + + for i := range current.ParameterPieces { + stack = append(stack, ¤t.ParameterPieces[len(current.ParameterPieces)-1-i]) + } + } else { + // Location fields are directly assigned instead of setting the whole + // location field to preserve other fields + locationToAssign := locations[locationCounter] + current.Location.InReg = locationToAssign.InReg + current.Location.Register = locationToAssign.Register + current.Location.StackOffset = locationToAssign.StackOffset + + if reflect.Kind(current.Kind) == reflect.String { + // Strings actually have two locations (pointer, length) + // but are shortened to a single one for parsing. The missing + // location is taken into account in bpf code, but we need + // to make sure it's not assigned to something else here. + locationCounter++ + } else if reflect.Kind(current.Kind) == reflect.Slice { + // slices actually have three locations (array, length, capacity) + // but are shortened to a single one for parsing. The missing + // locations are taken into account in bpf code, but we need + // to make sure it's not assigned to something else here. + locationCounter += 2 + } + locationCounter++ + } + } +} + +func correctTypeSpecificLocations(params []ditypes.Parameter, fieldLocations map[bininspect.FieldIdentifier]uint64) { + for i := range params { + if params[i].Kind == uint(reflect.Array) { + correctArrayLocations(¶ms[i], fieldLocations) + } else if params[i].Kind == uint(reflect.Pointer) { + correctPointerLocations(¶ms[i], fieldLocations) + } else if params[i].Kind == uint(reflect.Struct) { + correctStructLocations(¶ms[i], fieldLocations) + } + } +} + +// correctStructLocations sets pointer and stack offsets for struct fields from +// bininspect results +func correctStructLocations(structParam *ditypes.Parameter, fieldLocations map[bininspect.FieldIdentifier]uint64) { + for i := range structParam.ParameterPieces { + fieldID := bininspect.FieldIdentifier{ + StructName: structParam.Type, + FieldName: structParam.ParameterPieces[i].Name, + } + offset, ok := fieldLocations[fieldID] + if !ok { + log.Infof("no field location available for %s.%s\n", fieldID.StructName, fieldID.FieldName) + continue + } + + fieldLocationsHaveAlreadyBeenDirectlyAssigned := isLocationSet(structParam.ParameterPieces[i].Location) + if fieldLocationsHaveAlreadyBeenDirectlyAssigned { + // The location would be set if it was directly assigned to (i.e. has its own register instead of needing + // to dereference a pointer or get the element from a slice) + structParam.ParameterPieces[i].Location = structParam.Location + structParam.ParameterPieces[i].Location.StackOffset = int64(offset) + structParam.Location.StackOffset + } + + structParam.ParameterPieces[i].Location.PointerOffset = offset + structParam.ParameterPieces[i].Location.StackOffset = structParam.ParameterPieces[0].Location.StackOffset + int64(offset) + + correctTypeSpecificLocations([]ditypes.Parameter{structParam.ParameterPieces[i]}, fieldLocations) + } +} + +func isLocationSet(l ditypes.Location) bool { + return reflect.DeepEqual(l, ditypes.Location{}) +} + +// correctPointerLocations takes a parameters location and copies it to the underlying +// type that's pointed to. It sets `NeedsDereference` to true +// then calls the top level function on each element of the array to ensure all +// element's have corrected locations +func correctPointerLocations(pointerParam *ditypes.Parameter, fieldLocations map[bininspect.FieldIdentifier]uint64) { + // Pointers should have exactly one entry in ParameterPieces that correspond to the underlying type + if len(pointerParam.ParameterPieces) != 1 { + return + } + pointerParam.ParameterPieces[0].Location = pointerParam.Location + pointerParam.ParameterPieces[0].Location.NeedsDereference = true + correctTypeSpecificLocations([]ditypes.Parameter{pointerParam.ParameterPieces[0]}, fieldLocations) +} + +// correctArrayLocations takes a parameter's location, and distribute it to each element +// by using `stack offset + (size*index)` then calls the top level function on each element +// of the array to ensure all element's have corrected locations +func correctArrayLocations(arrayParam *ditypes.Parameter, fieldLocations map[bininspect.FieldIdentifier]uint64) { + initialOffset := arrayParam.Location.StackOffset + for i := range arrayParam.ParameterPieces { + arrayParam.ParameterPieces[i].Location.StackOffset = initialOffset + (arrayParam.ParameterPieces[i].TotalSize * int64(i)) + correctTypeSpecificLocations([]ditypes.Parameter{arrayParam.ParameterPieces[i]}, fieldLocations) + } +} diff --git a/pkg/dynamicinstrumentation/diconfig/config_manager.go b/pkg/dynamicinstrumentation/diconfig/config_manager.go new file mode 100644 index 0000000000000..a49287e0a7566 --- /dev/null +++ b/pkg/dynamicinstrumentation/diconfig/config_manager.go @@ -0,0 +1,295 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +// Package diconfig provides utlity that allows dynamic instrumentation to receive and +// manage probe configurations from users +package diconfig + +import ( + "encoding/json" + "fmt" + + "github.com/DataDog/datadog-agent/pkg/util/log" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/codegen" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/diagnostics" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ebpf" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/eventparser" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/proctracker" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ratelimiter" + "github.com/cilium/ebpf/ringbuf" + "github.com/google/uuid" +) + +type rcConfig struct { + ID string + Version int + ProbeType string `json:"type"` + Language string + Where struct { + TypeName string `json:"typeName"` + MethodName string `json:"methodName"` + SourceFile string + Lines []string + } + Tags []string + Template string + CaptureSnapshot bool + EvaluatedAt string + Capture struct { + MaxReferenceDepth int `json:"maxReferenceDepth"` + MaxFieldCount int `json:"maxFieldCount"` + } +} + +type configUpdateCallback func(*ditypes.ProcessInfo, *ditypes.Probe) + +// ConfigManager is a facility to track probe configurations for +// instrumenting tracked processes +type ConfigManager interface { + GetProcInfos() ditypes.DIProcs + Stop() +} + +// RCConfigManager is the configuration manager which utilizes remote-config +type RCConfigManager struct { + procTracker *proctracker.ProcessTracker + + diProcs ditypes.DIProcs + callback configUpdateCallback +} + +// NewRCConfigManager creates a new configuration manager which utilizes remote-config +func NewRCConfigManager() (*RCConfigManager, error) { + log.Info("Creating new RC config manager") + cm := &RCConfigManager{ + callback: applyConfigUpdate, + } + + cm.procTracker = proctracker.NewProcessTracker(cm.updateProcesses) + err := cm.procTracker.Start() + if err != nil { + return nil, fmt.Errorf("could not start process tracker: %w", err) + } + cm.diProcs = ditypes.NewDIProcs() + return cm, nil +} + +// GetProcInfos returns the state of the RCConfigManager +func (cm *RCConfigManager) GetProcInfos() ditypes.DIProcs { + return cm.diProcs +} + +// Stop closes the config and proc trackers used by the RCConfigManager +func (cm *RCConfigManager) Stop() { + cm.procTracker.Stop() + for _, procInfo := range cm.GetProcInfos() { + procInfo.CloseAllUprobeLinks() + } +} + +// updateProcesses is the callback interface that ConfigManager uses to consume the map of `ProcessInfo`s +// It is called whenever there's an update to the state of known processes of services on the machine. +// +// It compares the previously known state of services on the machine and creates a hook on the remote-config +// callback for configurations on new ones, and deletes the hook on old ones. +func (cm *RCConfigManager) updateProcesses(runningProcs ditypes.DIProcs) { + // Remove processes that are no longer running from state and close their uprobe links + for pid, procInfo := range cm.diProcs { + _, ok := runningProcs[pid] + if !ok { + procInfo.CloseAllUprobeLinks() + delete(cm.diProcs, pid) + } + } + + for pid, runningProcInfo := range runningProcs { + _, ok := cm.diProcs[pid] + if !ok { + cm.diProcs[pid] = runningProcInfo + err := cm.installConfigProbe(runningProcInfo) + if err != nil { + log.Infof("could not install config probe for service %s (pid %d): %s", runningProcInfo.ServiceName, runningProcInfo.PID, err) + } + } + } +} + +func (cm *RCConfigManager) installConfigProbe(procInfo *ditypes.ProcessInfo) error { + var err error + configProbe := newConfigProbe() + + svcConfigProbe := *configProbe + svcConfigProbe.ServiceName = procInfo.ServiceName + procInfo.ProbesByID[configProbe.ID] = &svcConfigProbe + + err = AnalyzeBinary(procInfo) + if err != nil { + return fmt.Errorf("could not analyze binary for config probe: %w", err) + } + + err = codegen.GenerateBPFParamsCode(procInfo, configProbe) + if err != nil { + return fmt.Errorf("could not generate bpf code for config probe: %w", err) + } + + err = ebpf.CompileBPFProgram(procInfo, configProbe) + if err != nil { + return fmt.Errorf("could not compile bpf code for config probe: %w", err) + } + + err = ebpf.AttachBPFUprobe(procInfo, configProbe) + if err != nil { + return fmt.Errorf("could not attach bpf code for config probe: %w", err) + } + + m, err := procInfo.SetupConfigUprobe() + if err != nil { + return fmt.Errorf("could not setup config probe for service %s: %w", procInfo.ServiceName, err) + } + + r, err := ringbuf.NewReader(m) + if err != nil { + return fmt.Errorf("could not read from config probe %s", procInfo.ServiceName) + } + + go cm.readConfigs(r, procInfo) + + return nil +} + +func (cm *RCConfigManager) readConfigs(r *ringbuf.Reader, procInfo *ditypes.ProcessInfo) { + log.Tracef("Waiting for configs for service: %s", procInfo.ServiceName) + for { + record, err := r.Read() + if err != nil { + log.Errorf("error reading raw configuration from bpf: %v", err) + continue + } + + configEventParams, err := eventparser.ParseParams(record.RawSample) + if err != nil { + log.Errorf("error parsing configuration for PID %d: %v", procInfo.PID, err) + continue + } + if len(configEventParams) != 3 { + log.Errorf("error parsing configuration for PID %d: not enough arguments", procInfo.PID) + continue + } + + runtimeID, err := uuid.ParseBytes([]byte(configEventParams[0].ValueStr)) + if err != nil { + log.Errorf("Runtime ID \"%s\" is not a UUID: %v)", runtimeID, err) + continue + } + + configPath, err := ditypes.ParseConfigPath(string(configEventParams[1].ValueStr)) + if err != nil { + log.Errorf("couldn't parse config path: %v", err) + continue + } + + // An empty config means that this probe has been removed for this process + if configEventParams[2].ValueStr == "" { + cm.diProcs.DeleteProbe(procInfo.PID, configPath.ProbeUUID.String()) + continue + } + + conf := rcConfig{} + err = json.Unmarshal([]byte(configEventParams[2].ValueStr), &conf) + if err != nil { + diagnostics.Diagnostics.SetError(procInfo.ServiceName, procInfo.RuntimeID, configPath.ProbeUUID.String(), "ATTACH_ERROR", err.Error()) + log.Errorf("could not unmarshal configuration, cannot apply: %v (Probe-ID: %s)\n", err, configPath.ProbeUUID) + continue + } + + if conf.Capture.MaxReferenceDepth == 0 { + conf.Capture.MaxReferenceDepth = int(ditypes.MaxReferenceDepth) + } + if conf.Capture.MaxFieldCount == 0 { + conf.Capture.MaxFieldCount = int(ditypes.MaxFieldCount) + } + opts := &ditypes.InstrumentationOptions{ + CaptureParameters: ditypes.CaptureParameters, + ArgumentsMaxSize: ditypes.ArgumentsMaxSize, + StringMaxSize: ditypes.StringMaxSize, + MaxReferenceDepth: conf.Capture.MaxReferenceDepth, + MaxFieldCount: conf.Capture.MaxFieldCount, + } + + probe, probeExists := procInfo.ProbesByID[configPath.ProbeUUID.String()] + if !probeExists { + cm.diProcs.SetProbe(procInfo.PID, procInfo.ServiceName, conf.Where.TypeName, conf.Where.MethodName, configPath.ProbeUUID, runtimeID, opts) + diagnostics.Diagnostics.SetStatus(procInfo.ServiceName, runtimeID.String(), configPath.ProbeUUID.String(), ditypes.StatusReceived) + probe = procInfo.ProbesByID[configPath.ProbeUUID.String()] + } + + // Check hash to see if the configuration changed + if configPath.Hash != probe.InstrumentationInfo.ConfigurationHash { + probe.InstrumentationInfo.ConfigurationHash = configPath.Hash + applyConfigUpdate(procInfo, probe) + } + } +} + +func applyConfigUpdate(procInfo *ditypes.ProcessInfo, probe *ditypes.Probe) { + log.Tracef("Applying config update: %v", probe) + err := AnalyzeBinary(procInfo) + if err != nil { + log.Errorf("couldn't inspect binary: %v\n", err) + return + } + +generateCompileAttach: + err = codegen.GenerateBPFParamsCode(procInfo, probe) + if err != nil { + log.Info("Couldn't generate BPF programs", err) + return + } + + err = ebpf.CompileBPFProgram(procInfo, probe) + if err != nil { + log.Info("Couldn't compile BPF object", err) + if !probe.InstrumentationInfo.AttemptedRebuild { + log.Info("Removing parameters and attempting to rebuild BPF object", err) + probe.InstrumentationInfo.AttemptedRebuild = true + probe.InstrumentationInfo.InstrumentationOptions.CaptureParameters = false + goto generateCompileAttach + } + return + } + + err = ebpf.AttachBPFUprobe(procInfo, probe) + if err != nil { + log.Info("Couldn't load and attach bpf programs", err) + if !probe.InstrumentationInfo.AttemptedRebuild { + log.Info("Removing parameters and attempting to rebuild BPF object", err) + probe.InstrumentationInfo.AttemptedRebuild = true + probe.InstrumentationInfo.InstrumentationOptions.CaptureParameters = false + goto generateCompileAttach + } + return + } +} + +func newConfigProbe() *ditypes.Probe { + return &ditypes.Probe{ + ID: ditypes.ConfigBPFProbeID, + FuncName: "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer.passProbeConfiguration", + InstrumentationInfo: &ditypes.InstrumentationInfo{ + InstrumentationOptions: &ditypes.InstrumentationOptions{ + ArgumentsMaxSize: 100000, + StringMaxSize: 30000, + MaxFieldCount: int(ditypes.MaxFieldCount), + MaxReferenceDepth: 8, + CaptureParameters: true, + }, + }, + RateLimiter: ratelimiter.NewSingleEventRateLimiter(0), + } +} diff --git a/pkg/dynamicinstrumentation/diconfig/dwarf.go b/pkg/dynamicinstrumentation/diconfig/dwarf.go new file mode 100644 index 0000000000000..03bc95335d409 --- /dev/null +++ b/pkg/dynamicinstrumentation/diconfig/dwarf.go @@ -0,0 +1,642 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package diconfig + +import ( + "cmp" + "debug/dwarf" + "debug/elf" + "fmt" + "io" + "reflect" + "slices" + + "github.com/DataDog/datadog-agent/pkg/util/log" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" + "github.com/go-delve/delve/pkg/dwarf/godwarf" +) + +func getTypeMap(dwarfData *dwarf.Data, targetFunctions map[string]bool) (*ditypes.TypeMap, error) { + return loadFunctionDefinitions(dwarfData, targetFunctions) +} + +var dwarfMap = make(map[string]*dwarf.Data) + +type seenTypeCounter struct { + parameter *ditypes.Parameter + count uint8 +} + +var seenTypes = make(map[string]*seenTypeCounter) + +func loadFunctionDefinitions(dwarfData *dwarf.Data, targetFunctions map[string]bool) (*ditypes.TypeMap, error) { + entryReader := dwarfData.Reader() + typeReader := dwarfData.Reader() + readingAFunction := false + var funcName string + + var result = ditypes.TypeMap{ + Functions: make(map[string][]ditypes.Parameter), + InlinedFunctions: make(map[uint64][]*dwarf.Entry), + } + + var ( + name string + typeFields *ditypes.Parameter + ) + +entryLoop: + for { + entry, err := entryReader.Next() + if err == io.EOF || entry == nil { + break + } + + if entryIsEmpty(entry) { + readingAFunction = false + continue entryLoop + } + + if entry.Tag == dwarf.TagCompileUnit { + + name, ok := entry.Val(dwarf.AttrName).(string) + if !ok { + continue entryLoop + } + ranges, err := dwarfData.Ranges(entry) + if err != nil { + log.Infof("couldnt retrieve ranges for compile unit %s: %s", name, err) + continue entryLoop + } + + for i := range ranges { + result.DeclaredFiles = append(result.DeclaredFiles, &ditypes.LowPCEntry{ + LowPC: ranges[i][0], + Entry: entry, + }) + } + } + + if entry.Tag == dwarf.TagInlinedSubroutine { + // This is a inlined function + for i := range entry.Field { + // Find it's high program counter (where it exits in the parent routine) + if entry.Field[i].Attr == dwarf.AttrHighpc { + + // The field for HighPC can be a constant or address, which are int64 and uint64 respectively + if entry.Field[i].Class == dwarf.ClassConstant { + result.InlinedFunctions[uint64(entry.Field[i].Val.(int64))] = + append([]*dwarf.Entry{entry}, result.InlinedFunctions[uint64(entry.Field[i].Val.(int64))]...) + } else if entry.Field[i].Class == dwarf.ClassAddress { + result.InlinedFunctions[entry.Field[i].Val.(uint64)] = + append([]*dwarf.Entry{entry}, result.InlinedFunctions[entry.Field[i].Val.(uint64)]...) + } + } + } + continue entryLoop + } + + if entry.Tag == dwarf.TagSubprogram { + + for _, field := range entry.Field { + if field.Attr == dwarf.AttrLowpc { + lowpc := field.Val.(uint64) + result.FunctionsByPC = append(result.FunctionsByPC, &ditypes.LowPCEntry{LowPC: lowpc, Entry: entry}) + } + } + + for _, field := range entry.Field { + if field.Attr == dwarf.AttrName { + funcName = field.Val.(string) + if !targetFunctions[funcName] { + continue entryLoop + } + result.Functions[funcName] = make([]ditypes.Parameter, 0) + readingAFunction = true + continue entryLoop + } + } + } + + if !readingAFunction { + continue + } + + if entry.Tag != dwarf.TagFormalParameter { + readingAFunction = false + continue entryLoop + } + + // This branch should only be reached if we're currently reading ditypes.Parameters of a function + // Meaning: This is a formal ditypes.Parameter entry, and readingAFunction = true + + // Go through fields of the entry collecting type, name, size information + for i := range entry.Field { + + // ditypes.Parameter name + if entry.Field[i].Attr == dwarf.AttrName { + name = entry.Field[i].Val.(string) + } + + // Collect information about the type of this ditypes.Parameter + if entry.Field[i].Attr == dwarf.AttrType { + + typeReader.Seek(entry.Field[i].Val.(dwarf.Offset)) + typeEntry, err := typeReader.Next() + if err != nil { + return nil, err + } + + typeFields, err = expandTypeData(typeEntry.Offset, dwarfData) + if err != nil { + return nil, fmt.Errorf("error while parsing debug information: %w", err) + } + + } + } + + typeFields.Name = name + + // We've collected information about this ditypes.Parameter, append it to the slice of ditypes.Parameters for this function + result.Functions[funcName] = append(result.Functions[funcName], *typeFields) + seenTypes = make(map[string]*seenTypeCounter) // reset seen types map for next parameter + } + + // Sort program counter slice for lookup when resolving pcs->functions + slices.SortFunc(result.FunctionsByPC, func(a, b *ditypes.LowPCEntry) int { + return cmp.Compare(b.LowPC, a.LowPC) + }) + slices.SortFunc(result.DeclaredFiles, func(a, b *ditypes.LowPCEntry) int { + return cmp.Compare(b.LowPC, a.LowPC) + }) + + return &result, nil +} + +func loadDWARF(binaryPath string) (*dwarf.Data, error) { + if dwarfData, ok := dwarfMap[binaryPath]; ok { + return dwarfData, nil + } + elfFile, err := elf.Open(binaryPath) + if err != nil { + return nil, fmt.Errorf("couldn't open elf binary: %w", err) + } + + dwarfData, err := elfFile.DWARF() + if err != nil { + return nil, fmt.Errorf("couldn't retrieve debug info from elf: %w", err) + } + dwarfMap[binaryPath] = dwarfData + return dwarfData, nil +} + +func expandTypeData(offset dwarf.Offset, dwarfData *dwarf.Data) (*ditypes.Parameter, error) { + typeReader := dwarfData.Reader() + + typeReader.Seek(offset) + typeEntry, err := typeReader.Next() + if err != nil { + return nil, fmt.Errorf("could not get type entry: %w", err) + } + + if !entryTypeIsSupported(typeEntry) { + return resolveUnsupportedEntry(typeEntry), nil + } + + if typeEntry.Tag == dwarf.TagTypedef { + typeEntry, err = resolveTypedefToRealType(typeEntry, typeReader) + if err != nil { + return nil, err + } + } + + typeName, typeSize, typeKind := getTypeEntryBasicInfo(typeEntry) + typeHeader := ditypes.Parameter{ + Type: typeName, + TotalSize: typeSize, + Kind: typeKind, + } + + v, typeParsedAlready := seenTypes[typeHeader.Type] + if typeParsedAlready { + v.count++ + if v.count >= ditypes.MaxReferenceDepth { + return v.parameter, nil + } + } else { + seenTypes[typeHeader.Type] = &seenTypeCounter{ + parameter: &typeHeader, + count: 1, + } + } + + if typeKind == uint(reflect.Slice) { + sliceElements, err := getSliceField(typeEntry.Offset, dwarfData) + if err != nil { + return nil, fmt.Errorf("could not collect fields of slice type: %w", err) + } + typeHeader = sliceElements[0] + } else if typeEntry.Tag == dwarf.TagStructType && typeName != "string" { + structFields, err := getStructFields(typeEntry.Offset, dwarfData) + if err != nil { + return nil, fmt.Errorf("could not collect fields of struct type of ditypes.Parameter: %w", err) + } + typeHeader.ParameterPieces = structFields + } else if typeEntry.Tag == dwarf.TagArrayType { + arrayElements, err := getIndividualArrayElements(typeEntry.Offset, dwarfData) + if err != nil { + return nil, fmt.Errorf("could not get length of array: %w", err) + } + typeHeader.ParameterPieces = arrayElements + } else if typeEntry.Tag == dwarf.TagPointerType { + pointerElements, err := getPointerLayers(typeEntry.Offset, dwarfData) + if err != nil { + return nil, fmt.Errorf("could not find pointer type: %w", err) + } + typeHeader.ParameterPieces = pointerElements + } + + return &typeHeader, nil +} + +// getSliceField returns the representation of a slice as a []ditypes.Parameter. The returned +// slice will have only one element. +// +// Slices are represented internally in go as a struct with 3 fields. The pointer to the +// the underlying array, the array length, and the array capacity. +func getSliceField(offset dwarf.Offset, dwarfData *dwarf.Data) ([]ditypes.Parameter, error) { + typeReader := dwarfData.Reader() + + typeReader.Seek(offset) + typeEntry, err := typeReader.Next() + if err != nil { + return nil, fmt.Errorf("could not get slice type entry: %w", err) + } + + elementTypeName, elementTypeSize, elementTypeKind := getTypeEntryBasicInfo(typeEntry) + sliceParameter := ditypes.Parameter{ + Type: elementTypeName, + TotalSize: elementTypeSize, + Kind: elementTypeKind, + } + + arrayEntry, err := typeReader.Next() + if err != nil { + return nil, fmt.Errorf("could not get slice type entry: %w", err) + } + + for i := range arrayEntry.Field { + if arrayEntry.Field[i].Attr == dwarf.AttrType { + typeReader.Seek(arrayEntry.Field[i].Val.(dwarf.Offset)) + typeEntry, err := typeReader.Next() + if err != nil { + return nil, err + } + underlyingType, err := expandTypeData(typeEntry.Offset, dwarfData) + if err != nil { + return nil, err + } + sliceParameter.ParameterPieces = append(sliceParameter.ParameterPieces, underlyingType.ParameterPieces[0]) + } + } + return []ditypes.Parameter{sliceParameter}, nil +} + +func getIndividualArrayElements(offset dwarf.Offset, dwarfData *dwarf.Data) ([]ditypes.Parameter, error) { + savedArrayEntryOffset := offset + typeReader := dwarfData.Reader() + + // Go to the entry of the array type to get the underlying type information + typeReader.Seek(offset) + typeEntry, err := typeReader.Next() + if err != nil { + return nil, fmt.Errorf("could not get array type entry: %w", err) + } + + var ( + elementFields *ditypes.Parameter + elementTypeName string + elementTypeSize int64 + elementTypeKind uint + ) + underlyingType, err := followType(typeEntry, dwarfData.Reader()) + if err != nil { + return nil, fmt.Errorf("could not get underlying array type's type entry: %w", err) + } + if !entryTypeIsSupported(underlyingType) { + elementFields = resolveUnsupportedEntry(underlyingType) + elementTypeName, elementTypeSize, elementTypeKind = getTypeEntryBasicInfo(underlyingType) + } else { + arrayElementTypeEntry, err := resolveTypedefToRealType(underlyingType, typeReader) + if err != nil { + return nil, err + } + + elementFields, err = expandTypeData(arrayElementTypeEntry.Offset, dwarfData) + if err != nil { + return nil, err + } + + elementTypeName, elementTypeSize, elementTypeKind = getTypeEntryBasicInfo(arrayElementTypeEntry) + } + + // Return back to entry of array so we can go to the subrange entry after the type, which gives + // us the length of the array + typeReader.Seek(savedArrayEntryOffset) + _, err = typeReader.Next() + if err != nil { + return nil, fmt.Errorf("could not find array entry: %w", err) + } + subrangeEntry, err := typeReader.Next() + if err != nil { + return nil, fmt.Errorf("could not get length of array: %w", err) + } + + var arrayLength int64 + for h := range subrangeEntry.Field { + if subrangeEntry.Field[h].Attr == dwarf.AttrCount { + arrayLength = subrangeEntry.Field[h].Val.(int64) + } + } + + arrayElements := []ditypes.Parameter{} + for h := 0; h < int(arrayLength); h++ { + newParam := ditypes.Parameter{} + copyTree(&newParam.ParameterPieces, &elementFields.ParameterPieces) + newParam.Name = fmt.Sprintf("[%d]%s[%d]", arrayLength, elementTypeName, h) + newParam.Type = elementTypeName + newParam.Kind = elementTypeKind + newParam.TotalSize = elementTypeSize + arrayElements = append(arrayElements, newParam) + } + + return arrayElements, nil +} + +func getStructFields(offset dwarf.Offset, dwarfData *dwarf.Data) ([]ditypes.Parameter, error) { + inOrderReader := dwarfData.Reader() + typeReader := dwarfData.Reader() + + structFields := []ditypes.Parameter{} + fieldEntry := &dwarf.Entry{} + + // Start at the entry of the definition of the struct + inOrderReader.Seek(offset) + _, err := inOrderReader.Next() + if err != nil { + return structFields, err + } + + // From the struct entry in DWARF, traverse through subsequent DWARF entries + // which are fields of the struct + for { + fieldEntry, err = inOrderReader.Next() + if err != nil { + return []ditypes.Parameter{}, err + } + + if entryIsEmpty(fieldEntry) || fieldEntry.Tag != dwarf.TagMember { + break + } + + newStructField := ditypes.Parameter{} + + for i := range fieldEntry.Field { + + // Struct Field Name + if fieldEntry.Field[i].Attr == dwarf.AttrName { + newStructField.Name = fieldEntry.Field[i].Val.(string) + } + + // Struct Field Type + if fieldEntry.Field[i].Attr == dwarf.AttrType { + typeReader.Seek(fieldEntry.Field[i].Val.(dwarf.Offset)) + typeEntry, err := typeReader.Next() + if err != nil { + return []ditypes.Parameter{}, err + } + + if !entryTypeIsSupported(typeEntry) { + unsupportedType := resolveUnsupportedEntry(typeEntry) + structFields = append(structFields, *unsupportedType) + continue + } + + if typeEntry.Tag == dwarf.TagTypedef { + typeEntry, err = resolveTypedefToRealType(typeEntry, typeReader) + if err != nil { + return []ditypes.Parameter{}, err + } + } + + newStructField.Type, newStructField.TotalSize, newStructField.Kind = getTypeEntryBasicInfo(typeEntry) + if typeEntry.Tag != dwarf.TagBaseType { + field, err := expandTypeData(typeEntry.Offset, dwarfData) + if err != nil { + return []ditypes.Parameter{}, err + } + field.Name = newStructField.Name + structFields = append(structFields, *field) + } else { + structFields = append(structFields, newStructField) + } + } + } + } + return structFields, nil +} + +func getPointerLayers(offset dwarf.Offset, dwarfData *dwarf.Data) ([]ditypes.Parameter, error) { + typeReader := dwarfData.Reader() + typeReader.Seek(offset) + pointerEntry, err := typeReader.Next() + if err != nil { + return nil, err + } + var underlyingType *ditypes.Parameter + for i := range pointerEntry.Field { + + if pointerEntry.Field[i].Attr == dwarf.AttrType { + typeReader.Seek(pointerEntry.Field[i].Val.(dwarf.Offset)) + typeEntry, err := typeReader.Next() + if err != nil { + return nil, err + } + + underlyingType, err = expandTypeData(typeEntry.Offset, dwarfData) + if err != nil { + return nil, err + } + } + } + if underlyingType == nil { + return []ditypes.Parameter{}, nil + } + return []ditypes.Parameter{*underlyingType}, nil +} + +// Can use `Children` field, but there's also always a NULL/empty entry at the end of entry trees. +func entryIsEmpty(e *dwarf.Entry) bool { + return !e.Children && + len(e.Field) == 0 && + e.Offset == 0 && + e.Tag == dwarf.Tag(0) +} + +func getTypeEntryBasicInfo(typeEntry *dwarf.Entry) (typeName string, typeSize int64, typeKind uint) { + if typeEntry.Tag == dwarf.TagPointerType { + typeSize = 8 // On 64 bit, all pointers are 8 bytes + } + for i := range typeEntry.Field { + if typeEntry.Field[i].Attr == dwarf.AttrName { + typeName = typeEntry.Field[i].Val.(string) + } + if typeEntry.Field[i].Attr == dwarf.AttrByteSize { + typeSize = typeEntry.Field[i].Val.(int64) + } + if typeEntry.Field[i].Attr == godwarf.AttrGoKind { + typeKind = uint(typeEntry.Field[i].Val.(int64)) + if typeKind == 0 { + // Temporary fix for bug: https://github.com/golang/go/issues/64231 + switch typeEntry.Tag { + case dwarf.TagStructType: + typeKind = uint(reflect.Struct) + case dwarf.TagArrayType: + typeKind = uint(reflect.Array) + case dwarf.TagPointerType: + typeKind = uint(reflect.Pointer) + default: + log.Info("Unexpected AttrGoKind == 0 for", typeEntry.Tag) + } + } + } + } + return +} + +func followType(outerType *dwarf.Entry, reader *dwarf.Reader) (*dwarf.Entry, error) { + for i := range outerType.Field { + if outerType.Field[i].Attr == dwarf.AttrType { + reader.Seek(outerType.Field[i].Val.(dwarf.Offset)) + nextType, err := reader.Next() + if err != nil { + return nil, fmt.Errorf("error while retrieving underlying type: %w", err) + } + return nextType, nil + } + } + return outerType, nil +} + +// resolveTypedefToRealType is used to get the underlying type of fields/variables/parameters when +// go packages the type underneath a typdef DWARF entry. The typedef DWARF entry has a 'type' entry +// which points to the actual type, which is what this function 'resolves'. +// Typedef's are used in for structs, pointers, maps, and likely other types. +func resolveTypedefToRealType(outerType *dwarf.Entry, reader *dwarf.Reader) (*dwarf.Entry, error) { + + if outerType.Tag == dwarf.TagTypedef { + followedType, err := followType(outerType, reader) + if err != nil { + return nil, err + } + + if followedType.Tag == dwarf.TagTypedef { + return resolveTypedefToRealType(followedType, reader) + } + return followedType, nil + } + + return outerType, nil +} + +func correctStructSizes(params []ditypes.Parameter) { + for i := range params { + correctStructSize(¶ms[i]) + } +} + +// correctStructSize sets the size of structs to the number of fields in the struct +func correctStructSize(param *ditypes.Parameter) { + if len(param.ParameterPieces) == 0 { + return + } + if param.Kind == uint(reflect.Struct) || param.Kind == uint(reflect.Array) { + param.TotalSize = int64(len(param.ParameterPieces)) + } + for i := range param.ParameterPieces { + correctStructSize(¶m.ParameterPieces[i]) + } +} + +func copyTree(dst, src *[]ditypes.Parameter) { + if dst == nil || src == nil || len(*src) == 0 { + return + } + *dst = make([]ditypes.Parameter, len(*src)) + copy(*dst, *src) + for i := range *src { + copyTree(&((*dst)[i].ParameterPieces), &((*src)[i].ParameterPieces)) + } +} + +func kindIsSupported(k reflect.Kind) bool { + if k == reflect.Map || + k == reflect.UnsafePointer || + k == reflect.Chan { + return false + } + return true +} + +func typeIsSupported(t string) bool { + return t != "unsafe.Pointer" +} + +func entryTypeIsSupported(e *dwarf.Entry) bool { + for f := range e.Field { + + if e.Field[f].Attr == godwarf.AttrGoKind { + kindOfTypeEntry := reflect.Kind(e.Field[f].Val.(int64)) + if !kindIsSupported(kindOfTypeEntry) { + return false + } + } + + if e.Field[f].Attr == dwarf.AttrName { + if !typeIsSupported(e.Field[f].Val.(string)) { + return false + } + } + } + return true +} + +func resolveUnsupportedEntry(e *dwarf.Entry) *ditypes.Parameter { + var ( + kind uint + name string + ) + for f := range e.Field { + if e.Field[f].Attr == godwarf.AttrGoKind { + kind = uint(e.Field[f].Val.(int64)) + } + if e.Field[f].Attr == dwarf.AttrName { + name = e.Field[f].Val.(string) + } + } + if name == "unsafe.Pointer" { + // The DWARF entry for unsafe.Pointer doesn't have a `kind` field + kind = uint(reflect.UnsafePointer) + } + return &ditypes.Parameter{ + Type: fmt.Sprintf("unsupported-%s", reflect.Kind(kind).String()), + Kind: kind, + NotCaptureReason: ditypes.Unsupported, + } +} diff --git a/pkg/dynamicinstrumentation/diconfig/file_config_manager.go b/pkg/dynamicinstrumentation/diconfig/file_config_manager.go new file mode 100644 index 0000000000000..3f495ee97c4e4 --- /dev/null +++ b/pkg/dynamicinstrumentation/diconfig/file_config_manager.go @@ -0,0 +1,230 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package diconfig + +import ( + "encoding/json" + "fmt" + "reflect" + "sync" + + "github.com/DataDog/datadog-agent/pkg/util/log" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/proctracker" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/util" +) + +// FileWatchingConfigManager is used to track updates to a specified file +// which contains probe configurations +type FileWatchingConfigManager struct { + sync.Mutex + configTracker *configTracker + procTracker *proctracker.ProcessTracker + + callback configUpdateCallback + configs configsByService + state ditypes.DIProcs +} + +type fileConfigCallback func(configsByService) + +type configsByService = map[ditypes.ServiceName]map[ditypes.ProbeID]rcConfig + +// NewFileConfigManager creates a FileWatchingConfigManager set up to track +// the specified file. +func NewFileConfigManager(configFile string) (*FileWatchingConfigManager, error) { + cm := &FileWatchingConfigManager{ + callback: applyConfigUpdate, + } + + cm.procTracker = proctracker.NewProcessTracker(cm.updateProcessInfo) + err := cm.procTracker.Start() + if err != nil { + return nil, err + } + + cm.configTracker = newFileWatchingConfigTracker(configFile, cm.updateServiceConfigs) + err = cm.configTracker.Start() + if err != nil { + return nil, err + } + return cm, nil +} + +// GetProcInfos returns the state of the FileWatchingConfigManager +func (cm *FileWatchingConfigManager) GetProcInfos() ditypes.DIProcs { + return cm.state +} + +// Stop closes the config and proc trackers used by the FileWatchingConfigManager +func (cm *FileWatchingConfigManager) Stop() { + cm.configTracker.Stop() + cm.procTracker.Stop() +} + +func newFileWatchingConfigTracker(configFile string, onConfigUpdate fileConfigCallback) *configTracker { + ct := configTracker{ + ConfigPath: configFile, + configCallback: onConfigUpdate, + stopChannel: make(chan bool), + } + + return &ct +} + +// correlate this new configuration with a running service, +// and operate on the new global state of services/configs +// via cm.callback +func (cm *FileWatchingConfigManager) updateServiceConfigs(configs configsByService) { + log.Info("Updating config from file:", configs) + cm.configs = configs + err := cm.update() + if err != nil { + log.Info(err) + } +} + +func (cm *FileWatchingConfigManager) updateProcessInfo(procs ditypes.DIProcs) { + cm.Lock() + defer cm.Unlock() + log.Info("Updating procs", procs) + cm.configTracker.UpdateProcesses(procs) + err := cm.update() + if err != nil { + log.Info(err) + } +} + +type configTracker struct { + Processes map[ditypes.PID]*ditypes.ProcessInfo + ConfigPath string + configCallback fileConfigCallback + stopChannel chan bool +} + +func (ct *configTracker) Start() error { + fw := util.NewFileWatcher(ct.ConfigPath) + updateChan, err := fw.Watch() + if err != nil { + return fmt.Errorf("failed to watch config file %s: %s", ct.ConfigPath, err) + } + + go func(updateChan <-chan []byte) { + configUpdateLoop: + for { + select { + case rawConfigBytes := <-updateChan: + conf := map[string]map[string]rcConfig{} + err = json.Unmarshal(rawConfigBytes, &conf) + if err != nil { + log.Infof("invalid config read from %s: %s", ct.ConfigPath, err) + continue + } + ct.configCallback(conf) + case <-ct.stopChannel: + break configUpdateLoop + } + } + }(updateChan) + return nil +} + +func (ct *configTracker) Stop() { + ct.stopChannel <- true +} + +// UpdateProcesses is the callback interface that ConfigTracker uses to consume the map of ProcessInfo's +// such that it's used whenever there's an update to the state of known service processes on the machine. +// It simply overwrites the previous state of known service processes with the new one +func (ct *configTracker) UpdateProcesses(procs ditypes.DIProcs) { + current := procs + old := ct.Processes + if !reflect.DeepEqual(current, old) { + ct.Processes = current + } +} + +func (cm *FileWatchingConfigManager) update() error { + var updatedState = ditypes.NewDIProcs() + for serviceName, configsByID := range cm.configs { + for pid, proc := range cm.configTracker.Processes { + // If a config exists relevant to this proc + if proc.ServiceName == serviceName { + procCopy := *proc + updatedState[pid] = &procCopy + updatedState[pid].ProbesByID = convert(serviceName, configsByID) + } + } + } + + if !reflect.DeepEqual(cm.state, updatedState) { + err := inspectGoBinaries(updatedState) + if err != nil { + return err + } + + for pid, procInfo := range cm.state { + // cleanup dead procs + if _, running := updatedState[pid]; !running { + procInfo.CloseAllUprobeLinks() + delete(cm.state, pid) + } + } + + for pid, procInfo := range updatedState { + if _, tracked := cm.state[pid]; !tracked { + for _, probe := range procInfo.GetProbes() { + // install all probes from new process + cm.callback(procInfo, probe) + } + } else { + for _, existingProbe := range cm.state[pid].GetProbes() { + updatedProbe := procInfo.GetProbe(existingProbe.ID) + if updatedProbe == nil { + // delete old probes + cm.state[pid].DeleteProbe(existingProbe.ID) + } + } + for _, updatedProbe := range procInfo.GetProbes() { + existingProbe := cm.state[pid].GetProbe(updatedProbe.ID) + if !reflect.DeepEqual(existingProbe, updatedProbe) { + // update existing probes that changed + cm.callback(procInfo, updatedProbe) + } + } + } + } + cm.state = updatedState + } + return nil +} + +func convert(service string, configsByID map[ditypes.ProbeID]rcConfig) map[ditypes.ProbeID]*ditypes.Probe { + probesByID := map[ditypes.ProbeID]*ditypes.Probe{} + for id, config := range configsByID { + probesByID[id] = config.toProbe(service) + } + return probesByID +} + +func (rc *rcConfig) toProbe(service string) *ditypes.Probe { + return &ditypes.Probe{ + ID: rc.ID, + ServiceName: service, + FuncName: fmt.Sprintf("%s.%s", rc.Where.TypeName, rc.Where.MethodName), + InstrumentationInfo: &ditypes.InstrumentationInfo{ + InstrumentationOptions: &ditypes.InstrumentationOptions{ + CaptureParameters: ditypes.CaptureParameters, + ArgumentsMaxSize: ditypes.ArgumentsMaxSize, + StringMaxSize: ditypes.StringMaxSize, + MaxReferenceDepth: rc.Capture.MaxReferenceDepth, + }, + }, + } +} diff --git a/pkg/dynamicinstrumentation/ditypes/analysis.go b/pkg/dynamicinstrumentation/ditypes/analysis.go new file mode 100644 index 0000000000000..e10ab9657c53e --- /dev/null +++ b/pkg/dynamicinstrumentation/ditypes/analysis.go @@ -0,0 +1,105 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package ditypes + +import ( + "debug/dwarf" + "fmt" +) + +// TypeMap contains all the information about functions and their parameters including +// functions that have been inlined in the binary +type TypeMap struct { + // Functions maps fully-qualified function names to a slice of its parameters + Functions map[string][]Parameter + + // InlinedFunctions maps program counters to a slice of dwarf entries used + // when resolving stack traces that include inlined functions + InlinedFunctions map[uint64][]*dwarf.Entry + + // FunctionsByPC places DWARF subprogram (function) entries in order by + // its low program counter which is necessary for resolving stack traces + FunctionsByPC []*LowPCEntry + + // DeclaredFiles places DWARF compile unit entries in order by its + // low program counter which is necessary for resolving declared file + // for the sake of stack traces + DeclaredFiles []*LowPCEntry +} + +// Parameter represents a function parameter as read from DWARF info +type Parameter struct { + Name string + ID string + Type string + TotalSize int64 + Kind uint + Location Location + NotCaptureReason NotCaptureReason + ParameterPieces []Parameter +} + +func (p Parameter) String() string { + return fmt.Sprintf("%s %s", p.Name, p.Type) +} + +// NotCaptureReason is used to convey why a parameter was not captured +type NotCaptureReason uint8 + +const ( + Unsupported NotCaptureReason = iota + 1 // Unsupported means the data type of the parameter is unsupported + FieldLimitReached // FieldLimitReached means the parameter wasn't captured because the data type has too many fields + CaptureDepthReached // CaptureDepthReached means the parameter wasn't captures because the data type has too many levels +) + +// SpecialKind is used for clarity in generated events that certain fields weren't read +type SpecialKind uint8 + +const ( + KindUnsupported = 255 - iota // KindUnsupported is for unsupported types + KindCutFieldLimit // KindCutFieldLimit is for fields that were cut because of field limit + KindCaptureDepthReached // KindCaptureDepthReached is for fields that were cut because of depth limit +) + +func (s SpecialKind) String() string { + switch s { + case KindUnsupported: + return "Unsupported" + case KindCutFieldLimit: + return "CutFieldLimit" + default: + return fmt.Sprintf("%d", s) + } +} + +// Location represents where a particular datatype is found on probe entry +type Location struct { + InReg bool + StackOffset int64 + Register int + NeedsDereference bool + PointerOffset uint64 +} + +func (l Location) String() string { + return fmt.Sprintf("Location{InReg: %t, StackOffset: %d, Register: %d}", l.InReg, l.StackOffset, l.Register) +} + +// LowPCEntry is a helper type used to sort DWARF entries by their low program counter +type LowPCEntry struct { + LowPC uint64 + Entry *dwarf.Entry +} + +// BPFProgram represents a bpf program that's created for a single probe +type BPFProgram struct { + ProgramText string + + // Used for bpf code generation + Probe *Probe +} diff --git a/pkg/dynamicinstrumentation/ditypes/config.go b/pkg/dynamicinstrumentation/ditypes/config.go new file mode 100644 index 0000000000000..06c0f826b33b7 --- /dev/null +++ b/pkg/dynamicinstrumentation/ditypes/config.go @@ -0,0 +1,337 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +// Package ditypes contains various datatypes and otherwise shared components +// used by all the packages in dynamic instrumentation +package ditypes + +import ( + "debug/dwarf" + "fmt" + "io" + "strconv" + "strings" + + "github.com/DataDog/datadog-agent/pkg/util/log" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ratelimiter" + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/link" + "github.com/google/uuid" +) + +const ConfigBPFProbeID = "config" // ConfigBPFProbeID is the ID used for the config bpf program + +var ( + CaptureParameters = true // CaptureParameters is the default value for if probes should capture parameter values + ArgumentsMaxSize = 10000 // ArgumentsMaxSize is the default size in bytes of the output buffer used for param values + StringMaxSize = 512 // StringMaxSize is the default size in bytes of a single string + MaxReferenceDepth uint8 = 4 // MaxReferenceDepth is the default depth that DI will traverse datatypes for capturing values + MaxFieldCount = 20 // MaxFieldCount is the default limit for how many fields DI will capture in a single data type + SliceMaxSize = 1800 // SliceMaxSize is the default limit in bytes of a slice + SliceMaxLength = 100 // SliceMaxLength is the default limit in number of elements of a slice +) + +// ProbeID is the unique identifier for probes +type ProbeID = string + +// ServiceName is the unique identifier for a service +type ServiceName = string + +// PID stands for process ID +type PID = uint32 + +// DIProcs is the map that dynamic instrumentation uses for tracking processes and their relevant instrumentation info +type DIProcs map[PID]*ProcessInfo + +// NewDIProcs creates a new DIProcs map +func NewDIProcs() DIProcs { + return DIProcs{} +} + +// GetProbes returns the relevant probes information for a specific process +func (procs DIProcs) GetProbes(pid PID) []*Probe { + procInfo, ok := procs[pid] + if !ok { + return nil + } + return procInfo.GetProbes() +} + +// GetProbe returns the relevant probe information for a specific probe being instrumented +// in a specific process +func (procs DIProcs) GetProbe(pid PID, probeID ProbeID) *Probe { + procInfo, ok := procs[pid] + if !ok { + return nil + } + return procInfo.GetProbe(probeID) +} + +// SetProbe associates instrumentation information with a probe for a specific process +func (procs DIProcs) SetProbe(pid PID, service, typeName, method string, probeID, runtimeID uuid.UUID, opts *InstrumentationOptions) { + procInfo, ok := procs[pid] + if !ok { + return + } + probe := &Probe{ + ID: probeID.String(), + ServiceName: service, + FuncName: fmt.Sprintf("%s.%s", typeName, method), + InstrumentationInfo: &InstrumentationInfo{InstrumentationOptions: opts}, + } + + procInfo.ProbesByID[probeID.String()] = probe + // TODO: remove this from here + procInfo.RuntimeID = runtimeID.String() +} + +// DeleteProbe removes instrumentation for the specified probe +// in the specified process +func (procs DIProcs) DeleteProbe(pid PID, probeID ProbeID) { + procInfo, ok := procs[pid] + if !ok { + return + } + procInfo.DeleteProbe(probeID) +} + +// CloseUprobe closes the uprobe link for the specific probe (by ID) of +// a the specified process (by PID) +func (procs DIProcs) CloseUprobe(pid PID, probeID ProbeID) { + probe := procs.GetProbe(pid, probeID) + if probe == nil { + return + } + proc, ok := procs[pid] + if !ok || proc == nil { + log.Info("could not close uprobe, pid not found") + } + err := proc.CloseUprobeLink(probeID) + if err != nil { + log.Infof("could not close uprobe: %s", err) + } +} + +// SetRuntimeID sets the runtime ID for the specified process +func (procs DIProcs) SetRuntimeID(pid PID, runtimeID string) { + proc, ok := procs[pid] + if !ok || proc == nil { + log.Info("could not set runtime ID, pid not found") + } + proc.RuntimeID = runtimeID +} + +// ProcessInfo represents a process, it contains the information relevant to +// dynamic instrumentation for this specific process +type ProcessInfo struct { + PID uint32 + ServiceName string + RuntimeID string + BinaryPath string + + TypeMap *TypeMap + DwarfData *dwarf.Data + + ConfigurationUprobe *link.Link + ProbesByID ProbesByID + InstrumentationUprobes map[ProbeID]*link.Link + InstrumentationObjects map[ProbeID]*ebpf.Collection +} + +// SetupConfigUprobe sets the configuration probe for the process +func (pi *ProcessInfo) SetupConfigUprobe() (*ebpf.Map, error) { + configProbe, ok := pi.ProbesByID[ConfigBPFProbeID] + if !ok { + return nil, fmt.Errorf("config probe was not set for process %s", pi.ServiceName) + } + + configLink, ok := pi.InstrumentationUprobes[ConfigBPFProbeID] + if !ok { + return nil, fmt.Errorf("config uprobe was not set for process %s", pi.ServiceName) + } + pi.ConfigurationUprobe = configLink + delete(pi.InstrumentationUprobes, ConfigBPFProbeID) + + m, ok := pi.InstrumentationObjects[configProbe.ID].Maps["events"] + if !ok { + return nil, fmt.Errorf("config ringbuffer was not set for process %s", pi.ServiceName) + } + return m, nil +} + +// CloseConfigUprobe closes the uprobe connection for the configuration probe +func (pi *ProcessInfo) CloseConfigUprobe() error { + if pi.ConfigurationUprobe != nil { + return (*pi.ConfigurationUprobe).Close() + } + return nil +} + +// SetUprobeLink associates the uprobe link with the specified probe +// in the tracked process +func (pi *ProcessInfo) SetUprobeLink(probeID ProbeID, l *link.Link) { + pi.InstrumentationUprobes[probeID] = l +} + +// CloseUprobeLink closes the probe and deletes the link for the probe +// in the tracked process +func (pi *ProcessInfo) CloseUprobeLink(probeID ProbeID) error { + if l, ok := pi.InstrumentationUprobes[probeID]; ok { + err := (*l).Close() + delete(pi.InstrumentationUprobes, probeID) + return err + } + return nil +} + +// CloseAllUprobeLinks closes all probes and deletes their links for all probes +// in the tracked process +func (pi *ProcessInfo) CloseAllUprobeLinks() { + for probeID := range pi.InstrumentationUprobes { + if err := pi.CloseUprobeLink(probeID); err != nil { + log.Info("Failed to close uprobe link for probe", pi.BinaryPath, pi.PID, probeID, err) + } + } + err := pi.CloseConfigUprobe() + if err != nil { + log.Info("Failed to close config uprobe for process", pi.BinaryPath, pi.PID, err) + } +} + +// GetProbes returns references to each probe in the associated process +func (pi *ProcessInfo) GetProbes() []*Probe { + probes := make([]*Probe, 0, len(pi.ProbesByID)) + for _, probe := range pi.ProbesByID { + probes = append(probes, probe) + } + return probes +} + +// GetProbe returns a reference to the specified probe in the associated process +func (pi *ProcessInfo) GetProbe(probeID ProbeID) *Probe { + return pi.ProbesByID[probeID] +} + +// DeleteProbe closes the uprobe link and disassociates the probe in the associated process +func (pi *ProcessInfo) DeleteProbe(probeID ProbeID) { + err := pi.CloseUprobeLink(probeID) + if err != nil { + log.Infof("could not close uprobe link: %s", err) + } + delete(pi.ProbesByID, probeID) +} + +// ProbesByID maps probe IDs with probes +type ProbesByID = map[ProbeID]*Probe + +// FieldIdentifier is a tuple of struct names and field names +type FieldIdentifier struct { + StructName, FieldName string +} + +// InstrumentationInfo contains information used while setting up probes +type InstrumentationInfo struct { + InstrumentationOptions *InstrumentationOptions + + // BPFParametersSourceCode is the source code needed for capturing parameters via this probe + BPFParametersSourceCode string + + // BPFSourceCode is the source code of the BPF program attached via this probe + BPFSourceCode string + + // BPFObjectFileReader is the compiled BPF program attached via this probe + BPFObjectFileReader io.ReaderAt + + ConfigurationHash string + + // Toggle for whether or not the BPF object was rebuilt after changing parameters + AttemptedRebuild bool +} + +// InstrumentationOptions is a set of options for how data should be captured by probes +type InstrumentationOptions struct { + CaptureParameters bool + ArgumentsMaxSize int + StringMaxSize int + MaxReferenceDepth int + MaxFieldCount int + SliceMaxSize int + SliceMaxLength int +} + +// Probe represents a location in a GoProgram that can be instrumented +// dynamically. It contains information about the service and the function +// associated with the probe. +type Probe struct { + // ID is a unique identifier for the probe. + ID string + + // ServiceName is the name of the service in which the probe should be placed. + ServiceName string + + // FuncName is the name of the function that triggers the probe. + FuncName string + + InstrumentationInfo *InstrumentationInfo + + RateLimiter *ratelimiter.SingleRateLimiter +} + +// GetBPFFuncName cleans the function name to be allowed by the bpf compiler +func (p *Probe) GetBPFFuncName() string { + // can't have '.', '-' or '/' in bpf program name + replacer := strings.NewReplacer(".", "_", "/", "_", "-", "_") + return replacer.Replace(p.FuncName) +} + +// ConfigPath is a remote-config specific representation which is used for retrieving probe definitions +type ConfigPath struct { + OrgID int64 + Product string + ProbeType string + ProbeUUID uuid.UUID + Hash string +} + +// ParseConfigPath takes the remote-config specific string and parses a ConfigPath object out of it +// the string is expected to be datadog///_/ +func ParseConfigPath(str string) (*ConfigPath, error) { + parts := strings.Split(str, "/") + if len(parts) != 5 { + return nil, fmt.Errorf("failed to parse config path %s", str) + } + orgIDStr, product, probeIDStr, hash := parts[1], parts[2], parts[3], parts[4] + orgID, err := strconv.ParseInt(orgIDStr, 10, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse orgID %s (from %s)", orgIDStr, str) + } + if product != "LIVE_DEBUGGING" { + return nil, fmt.Errorf("product %s not supported (from %s)", product, str) + } + + typeAndID := strings.Split(probeIDStr, "_") + if len(typeAndID) != 2 { + return nil, fmt.Errorf("failed to parse probe type and UUID %s (from %s)", probeIDStr, str) + } + probeType, probeUUIDStr := typeAndID[0], typeAndID[1] + if probeType != "logProbe" { + return nil, fmt.Errorf("probe type %s not supported (from %s)", probeType, str) + } + probeUUID, err := uuid.Parse(probeUUIDStr) + if err != nil { + return nil, fmt.Errorf("failed to parse probeUUID %s (from %s)", probeUUIDStr, str) + } + + return &ConfigPath{ + OrgID: orgID, + Product: product, + ProbeType: probeType, + ProbeUUID: probeUUID, + Hash: hash, + }, nil +} diff --git a/pkg/dynamicinstrumentation/ditypes/config_test.go b/pkg/dynamicinstrumentation/ditypes/config_test.go new file mode 100644 index 0000000000000..9fefba03063cd --- /dev/null +++ b/pkg/dynamicinstrumentation/ditypes/config_test.go @@ -0,0 +1,44 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package ditypes + +import ( + "testing" + + "github.com/google/uuid" + "github.com/stretchr/testify/assert" +) + +func TestParseConfigPath(t *testing.T) { + expectedUUID, err := uuid.Parse("f0b49f3e-8364-448d-97e9-3e640c4a21e6") + assert.NoError(t, err) + + configPath, err := ParseConfigPath("datadog/2/LIVE_DEBUGGING/logProbe_f0b49f3e-8364-448d-97e9-3e640c4a21e6/51fed9071414a7058c2ee96fc703f3e1fa51b5bffaab6155ce5c492303882b51") + assert.NoError(t, err) + + assert.NoError(t, err) + assert.Equal(t, int64(2), configPath.OrgID) + assert.Equal(t, "LIVE_DEBUGGING", configPath.Product) + assert.Equal(t, "logProbe", configPath.ProbeType) + assert.Equal(t, expectedUUID, configPath.ProbeUUID) + assert.Equal(t, "51fed9071414a7058c2ee96fc703f3e1fa51b5bffaab6155ce5c492303882b51", configPath.Hash) +} + +func TestParseConfigPathErrors(t *testing.T) { + tcs := []string{ + "datadog/2/LIVE_DEBUGGING/logProbe_f0b49f3e-8364-448d-97e9-3e640c4a21e6", + "datadog/2/NOT_SUPPORTED/logProbe_f0b49f3e-8364-448d-97e9-3e640c4a21e6/51fed9071414a7058c2ee96fc703f3e1fa51b5bffaab6155ce5c492303882b51", + "datadog/2/LIVE_DEBUGGING/notSupported_f0b49f3e-8364-448d-97e9-3e640c4a21e6/51fed9071414a7058c2ee96fc703f3e1fa51b5bffaab6155ce5c492303882b51", + "datadog/2/LIVE_DEBUGGING/logProbe_f0b49f3e-8364-448d-97e9-3e640c4a21e6/51fed9071414a7058c2ee96fc703f3e1fa51b5bffaab6155ce5c492303882b51/extra", + "datadog/2/LIVE_DEBUGGING/logProbe_f0b49f3e-xxxx-448d-97e9-3e640c4a21e6/51fed9071414a7058c2ee96fc703f3e1fa51b5bffaab6155ce5c492303882b51", + } + for _, tc := range tcs { + _, err := ParseConfigPath(tc) + assert.Error(t, err) + } +} diff --git a/pkg/dynamicinstrumentation/ditypes/diagnostics.go b/pkg/dynamicinstrumentation/ditypes/diagnostics.go new file mode 100644 index 0000000000000..d28764057b9ba --- /dev/null +++ b/pkg/dynamicinstrumentation/ditypes/diagnostics.go @@ -0,0 +1,52 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package ditypes + +// DiagnosticUpload is the message sent to the DataDog backend conveying diagnostic information +type DiagnosticUpload struct { + Service string `json:"service"` + DDSource string `json:"ddsource"` + + Debugger struct { + Diagnostic `json:"diagnostics"` + } `json:"debugger"` +} + +// SetError sets the error in the diagnostic upload +func (d *DiagnosticUpload) SetError(errorType, errorMessage string) { + d.Debugger.Diagnostic.Status = StatusError + d.Debugger.Diagnostic.DiagnosticException = &DiagnosticException{ + Type: errorType, + Message: errorMessage, + } +} + +// Status conveys the status of a probe +type Status string + +const ( + StatusReceived Status = "RECEIVED" // StatusReceived means the probe configuration was received + StatusInstalled Status = "INSTALLED" // StatusInstalled means the probe was installed + StatusEmitting Status = "EMITTING" // StatusEmitting means the probe is emitting events + StatusError Status = "ERROR" // StatusError means the probe has an issue +) + +// Diagnostic contains fields relevant for conveying the status of a probe +type Diagnostic struct { + RuntimeID string `json:"runtimeId"` + ProbeID string `json:"probeId"` + Status Status `json:"status"` + + *DiagnosticException `json:"exception,omitempty"` +} + +// DiagnosticException is used for diagnosing errors in probes +type DiagnosticException struct { + Type string `json:"type"` + Message string `json:"message"` +} diff --git a/pkg/dynamicinstrumentation/ditypes/ebpf.go b/pkg/dynamicinstrumentation/ditypes/ebpf.go new file mode 100644 index 0000000000000..40bc53fdf9d28 --- /dev/null +++ b/pkg/dynamicinstrumentation/ditypes/ebpf.go @@ -0,0 +1,17 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build ignore + +package ditypes + +/* +#include "../codegen/c/types.h" +*/ +import "C" + +type BaseEvent C.struct_base_event + +const SizeofBaseEvent = C.sizeof_struct_base_event diff --git a/pkg/dynamicinstrumentation/ditypes/ebpf_linux.go b/pkg/dynamicinstrumentation/ditypes/ebpf_linux.go new file mode 100644 index 0000000000000..d76d6c39c2105 --- /dev/null +++ b/pkg/dynamicinstrumentation/ditypes/ebpf_linux.go @@ -0,0 +1,13 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs -- -I ../../network/ebpf/c -I ../../ebpf/c -fsigned-char ebpf.go + +package ditypes + +type BaseEvent struct { + Probe_id [304]byte + Pid uint32 + Uid uint32 + Program_counters [10]uint64 +} + +const SizeofBaseEvent = 0x188 diff --git a/pkg/dynamicinstrumentation/ditypes/ringbuffer.go b/pkg/dynamicinstrumentation/ditypes/ringbuffer.go new file mode 100644 index 0000000000000..3e42586dc499c --- /dev/null +++ b/pkg/dynamicinstrumentation/ditypes/ringbuffer.go @@ -0,0 +1,43 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package ditypes + +import "github.com/cilium/ebpf" + +// EventsRingbuffer is the shared ringbuffer which all bpf programs use for communication +// with userspace +var EventsRingbuffer *ebpf.Map + +// DIEvent represents a single invocation of a function and it's captured information +type DIEvent struct { + ProbeID string + PID uint32 + UID uint32 + Argdata []*Param + StackPCs []uint64 +} + +// Param is the representation of a single function parameter after being parsed from +// the raw byte buffer sent from bpf +type Param struct { + ValueStr string `json:",omitempty"` + Type string + Size uint16 + Kind byte + Fields []*Param `json:",omitempty"` +} + +// StackFrame represents a single entry in a stack trace +type StackFrame struct { + FileName string `json:"fileName,omitempty"` + Function string `json:"function,omitempty"` + Line int `json:"lineNumber,omitempty"` +} + +// EventCallback is the function that is called everytime a new event is created +type EventCallback func(*DIEvent) diff --git a/pkg/dynamicinstrumentation/ditypes/snapshot.go b/pkg/dynamicinstrumentation/ditypes/snapshot.go new file mode 100644 index 0000000000000..44e7fcb35cdd2 --- /dev/null +++ b/pkg/dynamicinstrumentation/ditypes/snapshot.go @@ -0,0 +1,118 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package ditypes + +import ( + "github.com/google/uuid" +) + +// SnapshotUpload is a single message sent to the datadog back containing the +// snapshot and metadata +type SnapshotUpload struct { + Service string `json:"service"` + Message string `json:"message"` + DDSource string `json:"ddsource"` + DDTags string `json:"ddtags"` + Logger struct { + Name string `json:"name"` + Method string `json:"method"` + Version int `json:"version,omitempty"` + ThreadID int `json:"thread_id,omitempty"` + ThreadName string `json:"thread_name,omitempty"` + } `json:"logger"` + + Debugger struct { + Snapshot `json:"snapshot"` + } `json:"debugger"` + + // TODO: check precision (ms, ns etc) + Duration int64 `json:"duration"` + + DD *TraceCorrelation `json:"dd,omitempty"` +} + +// Snapshot is a single instance of a function invocation and all +// captured data +type Snapshot struct { + ID *uuid.UUID `json:"id"` + Timestamp int64 `json:"timestamp"` + + Language string `json:"language"` + ProbeInSnapshot `json:"probe"` + + Captures `json:"captures"` + + Errors []EvaluationError `json:"evaluationErrors,omitempty"` + + Stack []StackFrame `json:"stack"` +} + +// Captures contains captured data at various points during a function invocation +type Captures struct { + Entry *Capture `json:"entry,omitempty"` + Return *Capture `json:"return,omitempty"` + + Lines map[string]Capture `json:"lines,omitempty"` +} + +// ProbeInSnapshot contains information about the probe that produced a snapshot +type ProbeInSnapshot struct { + ID string `json:"id"` + EvaluateAt string `json:"evaluateAt,omitempty"` + Tags string `json:"tags,omitempty"` + Version int `json:"version,omitempty"` + + ProbeLocation `json:"location"` +} + +// ProbeLocation represents where a snapshot was originally captured +type ProbeLocation struct { + Type string `json:"type,omitempty"` + Method string `json:"method,omitempty"` + Lines []string `json:"lines,omitempty"` + File string `json:"file,omitempty"` +} + +// CapturedValueMap maps type names to their values +type CapturedValueMap = map[string]*CapturedValue + +// Capture represents all the captured values in a snapshot +type Capture struct { + Arguments CapturedValueMap `json:"arguments,omitempty"` + Locals CapturedValueMap `json:"locals,omitempty"` +} + +// CapturedValue represents the value of a captured type +type CapturedValue struct { + Type string `json:"type"` + + // we use a string pointer so the empty string is marshalled + Value *string `json:"value,omitempty"` + + Fields map[string]*CapturedValue `json:"fields,omitempty"` + Entries [][]CapturedValue `json:"entries,omitempty"` + Elements []CapturedValue `json:"elements,omitempty"` + + NotCapturedReason string `json:"notCapturedReason,omitempty"` + IsNull bool `json:"isNull,omitempty"` + + Size string `json:"size,omitempty"` + Truncated bool `json:"truncated,omitempty"` +} + +// EvaluationError expresses why a value could not be evaluated +type EvaluationError struct { + Expr string `json:"expr"` + Message string `json:"message"` +} + +// TraceCorrelation contains fields that correlate a snapshot with traces +type TraceCorrelation struct { + TraceID string `json:"trace_id,omitempty"` + SpanID string `json:"span_id,omitempty"` +} diff --git a/pkg/dynamicinstrumentation/ditypes/snapshot_test.go b/pkg/dynamicinstrumentation/ditypes/snapshot_test.go new file mode 100644 index 0000000000000..e4496f4c6dc92 --- /dev/null +++ b/pkg/dynamicinstrumentation/ditypes/snapshot_test.go @@ -0,0 +1,50 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package ditypes + +import ( + "encoding/json" + "io" + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDynamicInstrumentationLogJSONRoundTrip(t *testing.T) { + files := []string{ + "testdata/snapshot-00.json", + "testdata/snapshot-01.json", + "testdata/snapshot-02.json", + } + for _, filePath := range files { + file, err := os.Open(filePath) + if err != nil { + t.Error(err) + } + defer file.Close() + + bytes, err := io.ReadAll(file) + if err != nil { + t.Error(err) + } + + var s SnapshotUpload + err = json.Unmarshal(bytes, &s) + if err != nil { + t.Error(err) + } + + mBytes, err := json.Marshal(s) + if err != nil { + t.Error(err) + } + + assert.JSONEq(t, string(bytes), string(mBytes)) + } +} diff --git a/pkg/dynamicinstrumentation/ditypes/testdata/snapshot-00.json b/pkg/dynamicinstrumentation/ditypes/testdata/snapshot-00.json new file mode 100644 index 0000000000000..e92603672b4c6 --- /dev/null +++ b/pkg/dynamicinstrumentation/ditypes/testdata/snapshot-00.json @@ -0,0 +1,402 @@ +{ + "service": "debugger-backend-api-monitor", + "ddsource": "dd_debugger", + "message": "Log probe executed successfully", + "duration": 763602, + "ddtags": "tag:value", + "logger": { + "thread_id": 91, + "method": "emitSnapshot", + "thread_name": "scheduled-executor-thread-16", + "name": "com.datadog.debugger.apimonitor.SnapshotReadAfterWriteMonitorJob", + "version": 2 + }, + "debugger": { + "snapshot": { + "stack": [ + { + "fileName": "SnapshotReadAfterWriteMonitorJob.kt", + "function": "com.datadog.debugger.apimonitor.SnapshotReadAfterWriteMonitorJob.emitSnapshot", + "lineNumber": 89 + }, + { + "fileName": "SnapshotReadAfterWriteMonitorJob.kt", + "function": "com.datadog.debugger.apimonitor.SnapshotReadAfterWriteMonitorJob.access$emitSnapshot", + "lineNumber": 31 + }, + { + "fileName": "SnapshotReadAfterWriteMonitorJob.kt", + "function": "com.datadog.debugger.apimonitor.SnapshotReadAfterWriteMonitorJob$run$1.invoke", + "lineNumber": 63 + }, + { + "fileName": "SnapshotReadAfterWriteMonitorJob.kt", + "function": "com.datadog.debugger.apimonitor.SnapshotReadAfterWriteMonitorJob$run$1.invoke", + "lineNumber": 55 + }, + { + "fileName": "MonitoredTask.kt", + "function": "com.datadog.debugger.MonitoredTask$ExecutionState.run", + "lineNumber": 30 + }, + { + "fileName": "MonitoredTask.kt", + "function": "com.datadog.debugger.MonitoredTask.run", + "lineNumber": 89 + }, + { + "fileName": "SnapshotReadAfterWriteMonitorJob.kt", + "function": "com.datadog.debugger.apimonitor.SnapshotReadAfterWriteMonitorJob.run", + "lineNumber": 55 + }, + { + "function": "com.datadog.debugger.apimonitor.$SnapshotReadAfterWriteMonitorJob$Definition$Exec.dispatch", + "lineNumber": -1 + }, + { + "fileName": "AbstractExecutableMethodsDefinition.java", + "function": "io.micronaut.context.AbstractExecutableMethodsDefinition$DispatchedExecutableMethod.invoke", + "lineNumber": 378 + }, + { + "fileName": "DelegatingExecutableMethod.java", + "function": "io.micronaut.inject.DelegatingExecutableMethod.invoke", + "lineNumber": 76 + }, + { + "fileName": "ScheduledMethodProcessor.java", + "function": "io.micronaut.scheduling.processor.ScheduledMethodProcessor.lambda$process$5", + "lineNumber": 127 + }, + { + "fileName": "Executors.java", + "function": "java.util.concurrent.Executors$RunnableAdapter.call", + "lineNumber": 577 + }, + { + "fileName": "FutureTask.java", + "function": "java.util.concurrent.FutureTask.runAndReset", + "lineNumber": 358 + }, + { + "fileName": "ScheduledThreadPoolExecutor.java", + "function": "java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run", + "lineNumber": 305 + }, + { + "fileName": "ThreadPoolExecutor.java", + "function": "java.util.concurrent.ThreadPoolExecutor.runWorker", + "lineNumber": 1144 + }, + { + "fileName": "ThreadPoolExecutor.java", + "function": "java.util.concurrent.ThreadPoolExecutor$Worker.run", + "lineNumber": 642 + }, + { + "fileName": "Thread.java", + "function": "java.lang.Thread.run", + "lineNumber": 1589 + } + ], + "captures": { + "entry": { + "arguments": { + "apiMonitorStr": { + "type": "java.lang.String", + "value": "red" + }, + "this": { + "type": "com.datadog.debugger.apimonitor.SnapshotReadAfterWriteMonitorJob", + "fields": { + "apiClient": { + "type": "com.datadog.debugger.monitor.utils.DebuggerConfigurationApiClient", + "fields": { + "converter": { + "type": "com.datadog.debugger.monitor.utils.JsonApiConverter", + "fields": { + "mapper": { + "notCapturedReason": "depth", + "type": "com.fasterxml.jackson.databind.ObjectMapper" + } + } + }, + "rcApiClient": { + "type": "com.datadog.debugger.monitor.utils.AuthenticatingRcApiClient", + "fields": { + "apiClient": { + "notCapturedReason": "depth", + "type": "com.datadog.debugger.monitor.utils.RcApiClient$Intercepted" + }, + "mcnultyJobConfig": { + "notCapturedReason": "depth", + "type": "java.util.LinkedHashMap" + }, + "secretManager": { + "notCapturedReason": "depth", + "type": "com.datadog.debugger.monitor.utils.VaultSecretManager" + } + } + } + } + }, + "metrics": { + "type": "com.datadog.debugger.Metrics", + "fields": { + "statsd": { + "type": "com.timgroup.statsd.NonBlockingStatsDClient", + "fields": { + "clientChannel": { + "notCapturedReason": "depth", + "type": "com.timgroup.statsd.UnixDatagramClientChannel" + }, + "handler": { + "notCapturedReason": "depth", + "type": "com.timgroup.statsd.NonBlockingStatsDClient$1" + }, + "constantTagsRendered": { + "type": "java.lang.String", + "value": "|#version:v13250988-a801c320,env:prod,service:debugger-backend-api-monitor,dd.internal.entity_id:d1239294-1fe7-4188-9646-1bb7f59eb0b0" + }, + "statsDSender": { + "notCapturedReason": "depth", + "type": "com.timgroup.statsd.StatsDSender" + }, + "telemetryClientChannel": { + "notCapturedReason": "depth", + "type": "com.timgroup.statsd.UnixDatagramClientChannel" + }, + "prefix": { + "type": "java.lang.String", + "value": "dd.debugger_backend_api_monitor." + }, + "telemetryStatsDProcessor": { + "notCapturedReason": "depth", + "type": "com.timgroup.statsd.StatsDNonBlockingProcessor" + }, + "blocking": { + "type": "boolean", + "value": "false" + }, + "statsDProcessor": { + "notCapturedReason": "depth", + "type": "com.timgroup.statsd.StatsDNonBlockingProcessor" + }, + "telemetryStatsDSender": { + "notCapturedReason": "depth", + "type": "com.timgroup.statsd.StatsDSender" + }, + "telemetry": { + "notCapturedReason": "depth", + "type": "com.timgroup.statsd.Telemetry" + } + } + } + } + }, + "jobConfiguration": { + "type": "com.datadog.debugger.apimonitor.ApiMonitorJobConfigurations$SnapshotReadAfterWriteJobConfiguration", + "fields": { + "pollInterval": { + "type": "java.time.Duration", + "value": "PT1S" + }, + "orgId": { + "type": "long", + "value": "2" + }, + "timeout": { + "type": "java.time.Duration", + "value": "PT1M" + } + } + }, + "eventPlatformQueryClient": { + "type": "com.datadog.debugger.monitor.utils.EventPlatformQueryClientImpl", + "fields": { + "httpClient": { + "type": "com.datadog.debugger.monitor.utils.EventPlatformApiHttpClient$Intercepted", + "fields": { + "$interceptors": { + "notCapturedReason": "depth", + "type": "io.micronaut.aop.Interceptor[][]" + }, + "$proxyMethods": { + "notCapturedReason": "depth", + "type": "io.micronaut.inject.ExecutableMethod[]" + } + } + } + } + } + } + }, + "apiMonitorInt": { + "type": "int", + "value": "86" + } + } + }, + "return": { + "arguments": { + "apiMonitorStr": { + "type": "java.lang.String", + "value": "red" + }, + "this": { + "type": "com.datadog.debugger.apimonitor.SnapshotReadAfterWriteMonitorJob", + "fields": { + "apiClient": { + "type": "com.datadog.debugger.monitor.utils.DebuggerConfigurationApiClient", + "fields": { + "converter": { + "type": "com.datadog.debugger.monitor.utils.JsonApiConverter", + "fields": { + "mapper": { + "notCapturedReason": "depth", + "type": "com.fasterxml.jackson.databind.ObjectMapper" + } + } + }, + "rcApiClient": { + "type": "com.datadog.debugger.monitor.utils.AuthenticatingRcApiClient", + "fields": { + "apiClient": { + "notCapturedReason": "depth", + "type": "com.datadog.debugger.monitor.utils.RcApiClient$Intercepted" + }, + "mcnultyJobConfig": { + "notCapturedReason": "depth", + "type": "java.util.LinkedHashMap" + }, + "secretManager": { + "notCapturedReason": "depth", + "type": "com.datadog.debugger.monitor.utils.VaultSecretManager" + } + } + } + } + }, + "metrics": { + "type": "com.datadog.debugger.Metrics", + "fields": { + "statsd": { + "type": "com.timgroup.statsd.NonBlockingStatsDClient", + "fields": { + "clientChannel": { + "notCapturedReason": "depth", + "type": "com.timgroup.statsd.UnixDatagramClientChannel" + }, + "handler": { + "notCapturedReason": "depth", + "type": "com.timgroup.statsd.NonBlockingStatsDClient$1" + }, + "constantTagsRendered": { + "type": "java.lang.String", + "value": "|#version:v13250988-a801c320,env:prod,service:debugger-backend-api-monitor,dd.internal.entity_id:d1239294-1fe7-4188-9646-1bb7f59eb0b0" + }, + "statsDSender": { + "notCapturedReason": "depth", + "type": "com.timgroup.statsd.StatsDSender" + }, + "telemetryClientChannel": { + "notCapturedReason": "depth", + "type": "com.timgroup.statsd.UnixDatagramClientChannel" + }, + "prefix": { + "type": "java.lang.String", + "value": "dd.debugger_backend_api_monitor." + }, + "telemetryStatsDProcessor": { + "notCapturedReason": "depth", + "type": "com.timgroup.statsd.StatsDNonBlockingProcessor" + }, + "blocking": { + "type": "boolean", + "value": "false" + }, + "statsDProcessor": { + "notCapturedReason": "depth", + "type": "com.timgroup.statsd.StatsDNonBlockingProcessor" + }, + "telemetryStatsDSender": { + "notCapturedReason": "depth", + "type": "com.timgroup.statsd.StatsDSender" + }, + "telemetry": { + "notCapturedReason": "depth", + "type": "com.timgroup.statsd.Telemetry" + } + } + } + } + }, + "jobConfiguration": { + "type": "com.datadog.debugger.apimonitor.ApiMonitorJobConfigurations$SnapshotReadAfterWriteJobConfiguration", + "fields": { + "pollInterval": { + "type": "java.time.Duration", + "value": "PT1S" + }, + "orgId": { + "type": "long", + "value": "2" + }, + "timeout": { + "type": "java.time.Duration", + "value": "PT1M" + } + } + }, + "eventPlatformQueryClient": { + "type": "com.datadog.debugger.monitor.utils.EventPlatformQueryClientImpl", + "fields": { + "httpClient": { + "type": "com.datadog.debugger.monitor.utils.EventPlatformApiHttpClient$Intercepted", + "fields": { + "$interceptors": { + "notCapturedReason": "depth", + "type": "io.micronaut.aop.Interceptor[][]" + }, + "$proxyMethods": { + "notCapturedReason": "depth", + "type": "io.micronaut.inject.ExecutableMethod[]" + } + } + } + } + } + } + }, + "apiMonitorInt": { + "type": "int", + "value": "86" + } + }, + "locals": { + "uuid": { + "type": "java.lang.String", + "value": "328a0839-de9b-40fb-8c7f-f02972a0bceb" + }, + "@return": { + "type": "java.lang.String", + "value": "328a0839-de9b-40fb-8c7f-f02972a0bceb" + } + } + } + }, + "language": "java", + "id": "6e34e113-2bb3-44be-9330-79de17fab0fc", + "probe": { + "evaluateAt": "DEFAULT", + "location": { + "method": "emitSnapshot", + "type": "com.datadog.debugger.apimonitor.SnapshotReadAfterWriteMonitorJob" + }, + "id": "59e78a5b-fa9a-4056-a2bf-a4384769d1ae", + "version": 1 + }, + "timestamp": 1676045474719 + } + } +} \ No newline at end of file diff --git a/pkg/dynamicinstrumentation/ditypes/testdata/snapshot-01.json b/pkg/dynamicinstrumentation/ditypes/testdata/snapshot-01.json new file mode 100644 index 0000000000000..2cbade7901259 --- /dev/null +++ b/pkg/dynamicinstrumentation/ditypes/testdata/snapshot-01.json @@ -0,0 +1,1660 @@ +{ + "service": "security-monitoring-entity-reducer", + "message": "Emitting entity with score", + "duration": 0, + "ddsource": "dd_debugger", + "ddtags": "env:prod,service:security-monitoring-entity-reducer", + "debugger": { + "snapshot": { + "stack": [ + { + "fileName": "EntityStatsOutput.java", + "function": "com.dd.logs.security_analytics.EntityStatsOutput.toByteString", + "lineNumber": 205 + }, + { + "fileName": "InternalIntakeReducerOutputEncoder.java", + "function": "com.dd.logs.rule_engine.outputs.internal_intake.InternalIntakeReducerOutputEncoder.encode", + "lineNumber": 31 + }, + { + "fileName": "InternalIntakeProducer.java", + "function": "com.dd.logs.internal_intake.producer.InternalIntakeProducer.processDatum", + "lineNumber": 116 + }, + { + "fileName": "WorkloadProcessor.java", + "function": "com.fsmatic.workload.WorkloadProcessor.onReceive", + "lineNumber": 332 + }, + { + "fileName": "AbstractActor.scala", + "function": "akka.actor.UntypedAbstractActor$$anonfun$receive$1.applyOrElse", + "lineNumber": 339 + }, + { + "fileName": "Actor.scala", + "function": "akka.actor.Actor.aroundReceive", + "lineNumber": 539 + }, + { + "fileName": "ActorSupport.java", + "function": "com.fsmatic.akka.ActorSupport.lambda$aroundReceive$0", + "lineNumber": 30 + }, + { + "function": "com.fsmatic.akka.ActorSupport$$Lambda/0x000000100176eaa8.accept", + "lineNumber": -1 + }, + { + "fileName": "MdcContextActor.java", + "function": "com.fsmatic.mdc.MdcContextActor.wrapReceive", + "lineNumber": 37 + }, + { + "fileName": "ActorSupport.java", + "function": "com.fsmatic.akka.ActorSupport.aroundReceive", + "lineNumber": 30 + }, + { + "fileName": "AActor.java", + "function": "com.fsmatic.akka.AActor.aroundReceive", + "lineNumber": 34 + }, + { + "fileName": "ActorCell.scala", + "function": "akka.actor.ActorCell.receiveMessage", + "lineNumber": 614 + }, + { + "fileName": "ActorCell.scala", + "function": "akka.actor.ActorCell.invoke", + "lineNumber": 583 + }, + { + "fileName": "Mailbox.scala", + "function": "akka.dispatch.Mailbox.processMailbox", + "lineNumber": 268 + }, + { + "fileName": "Mailbox.scala", + "function": "akka.dispatch.Mailbox.run", + "lineNumber": 229 + }, + { + "fileName": "Mailbox.scala", + "function": "akka.dispatch.Mailbox.exec", + "lineNumber": 241 + }, + { + "fileName": "ForkJoinTask.java", + "function": "akka.dispatch.forkjoin.ForkJoinTask.doExec", + "lineNumber": 260 + }, + { + "fileName": "ForkJoinPool.java", + "function": "akka.dispatch.forkjoin.ForkJoinPool$WorkQueue.runTask", + "lineNumber": 1339 + }, + { + "fileName": "ForkJoinPool.java", + "function": "akka.dispatch.forkjoin.ForkJoinPool.runWorker", + "lineNumber": 1979 + }, + { + "fileName": "ForkJoinWorkerThread.java", + "function": "akka.dispatch.forkjoin.ForkJoinWorkerThread.run", + "lineNumber": 107 + } + ], + "captures": { + "lines": { + "205": { + "arguments": { + "this": { + "type": "com.dd.logs.security_analytics.ImmutableEntityStatsOutput", + "fields": { + "projectionAttributes": { + "size": "0", + "notCapturedReason": "java.lang.RuntimeException: Unsupported Map type: com.google.common.collect.RegularImmutableMap", + "type": "com.google.common.collect.RegularImmutableMap" + }, + "signalScore": { + "type": "long", + "value": "1" + }, + "logger": { + "type": "com.dd.logging.BasicLogger", + "fields": { + "metas": { + "isNull": true, + "type": "java.util.Map" + }, + "logger": { + "type": "ch.qos.logback.classic.Logger", + "fields": { + "parent": { + "notCapturedReason": "depth", + "type": "ch.qos.logback.classic.Logger" + }, + "level": { + "isNull": true, + "type": "ch.qos.logback.classic.Level" + }, + "name": { + "type": "java.lang.String", + "value": "com.dd.logs.security_analytics.EntityStatsOutput" + }, + "aai": { + "isNull": true, + "type": "ch.qos.logback.core.spi.AppenderAttachableImpl" + }, + "childrenList": { + "isNull": true, + "type": "java.util.List" + }, + "loggerContext": { + "notCapturedReason": "depth", + "type": "ch.qos.logback.classic.LoggerContext" + }, + "effectiveLevelInt": { + "type": "int", + "value": "20000" + }, + "additive": { + "type": "boolean", + "value": "true" + } + } + }, + "name": { + "type": "java.lang.String", + "value": "com.dd.logs.security_analytics.EntityStatsOutput" + } + } + }, + "count": { + "type": "long", + "value": "1" + }, + "projectionTags": { + "size": "0", + "notCapturedReason": "java.lang.RuntimeException: Unsupported Map type: com.google.common.collect.SingletonImmutableBiMap", + "type": "com.google.common.collect.SingletonImmutableBiMap" + }, + "internalIntakeTimestamp": { + "type": "long", + "value": "1709233217857" + }, + "id": { + "type": "java.lang.String", + "value": "AY318mP5AAB-QSHUZlx-FQAA" + }, + "trackKey": { + "type": "com.dd.logs.Track$Key", + "fields": { + "type": { + "type": "com.dd.logs.TrackType", + "fields": { + "name": { + "type": "java.lang.String", + "value": "entitystat" + } + } + }, + "orgId": { + "type": "long", + "value": "2" + } + } + }, + "entity": { + "type": "com.dd.cloudsecurityplatform.pb.EntityOutput", + "fields": { + "id_": { + "type": "java.lang.String", + "value": "10.154.142.130" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "typeString_": { + "type": "java.lang.String", + "value": "ip_address" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "unknownFields": { + "type": "com.google.protobuf.UnknownFieldSet", + "fields": { + "fields": { + "notCapturedReason": "depth", + "type": "java.util.TreeMap" + } + } + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + } + } + } + }, + "locals": { + "ipAttributes": { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$StringProjectionValues$Builder", + "fields": { + "unknownFieldsOrBuilder": { + "type": "com.google.protobuf.UnknownFieldSet", + "fields": { + "fields": { + "size": "0", + "type": "java.util.TreeMap" + } + } + }, + "isClean": { + "type": "boolean", + "value": "false" + }, + "bitField0_": { + "type": "int", + "value": "0" + }, + "meAsParent": { + "isNull": true, + "type": "com.google.protobuf.GeneratedMessageV3$Builder$BuilderParentImpl" + }, + "value_": { + "size": "0", + "notCapturedReason": "java.lang.RuntimeException: Unsupported Collection type: com.google.protobuf.LazyStringArrayList", + "type": "com.google.protobuf.LazyStringArrayList" + }, + "builderParent": { + "isNull": true, + "type": "com.google.protobuf.GeneratedMessageV3$BuilderParent" + } + } + }, + "outputBuilder": { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$Builder", + "fields": { + "entity_": { + "type": "com.dd.cloudsecurityplatform.pb.EntityOutput", + "fields": { + "id_": { + "type": "java.lang.String", + "value": "10.154.142.130" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "typeString_": { + "type": "java.lang.String", + "value": "ip_address" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "unknownFields": { + "type": "com.google.protobuf.UnknownFieldSet", + "fields": { + "fields": { + "notCapturedReason": "depth", + "type": "java.util.TreeMap" + } + } + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + }, + "stringProjections_": { + "type": "com.google.protobuf.MapField", + "fields": { + "mode": { + "type": "com.google.protobuf.MapField$StorageMode", + "value": "MAP" + }, + "mapData": { + "size": "0", + "notCapturedReason": "java.lang.RuntimeException: Unsupported Map type: com.google.protobuf.MapField$MutabilityAwareMap", + "type": "com.google.protobuf.MapField$MutabilityAwareMap" + }, + "isMutable": { + "type": "boolean", + "value": "true" + }, + "listData": { + "isNull": true, + "type": "java.util.List" + }, + "converter": { + "type": "com.google.protobuf.MapField$ImmutableMessageConverter", + "fields": { + "defaultEntry": { + "notCapturedReason": "depth", + "type": "com.google.protobuf.MapEntry" + } + } + } + } + }, + "threatIntelIndicatorsMatched_": { + "size": "0", + "notCapturedReason": "java.lang.RuntimeException: Unsupported Collection type: com.google.protobuf.LazyStringArrayList", + "type": "com.google.protobuf.LazyStringArrayList" + }, + "bitField0_": { + "type": "int", + "value": "54" + }, + "entityBuilder_": { + "isNull": true, + "type": "com.google.protobuf.SingleFieldBuilderV3" + }, + "threatIntelResults_": { + "size": "9", + "elements": [ + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "15.158.54.42" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "" + }, + "unknownFields": { + "notCapturedReason": "depth", + "type": "com.google.protobuf.UnknownFieldSet" + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "residential_proxy" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "spur" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "199.66.15.4" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "https://spur.us" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "suspicious" + }, + "unknownFields": { + "notCapturedReason": "depth", + "type": "com.google.protobuf.UnknownFieldSet" + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "64.252.144.155" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "" + }, + "unknownFields": { + "notCapturedReason": "depth", + "type": "com.google.protobuf.UnknownFieldSet" + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "70.132.18.132" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "" + }, + "unknownFields": { + "notCapturedReason": "depth", + "type": "com.google.protobuf.UnknownFieldSet" + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "130.176.185.207" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "" + }, + "unknownFields": { + "notCapturedReason": "depth", + "type": "com.google.protobuf.UnknownFieldSet" + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "15.158.41.133" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "" + }, + "unknownFields": { + "notCapturedReason": "depth", + "type": "com.google.protobuf.UnknownFieldSet" + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "130.176.135.146" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "" + }, + "unknownFields": { + "notCapturedReason": "depth", + "type": "com.google.protobuf.UnknownFieldSet" + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "3.172.1.71" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "" + }, + "unknownFields": { + "notCapturedReason": "depth", + "type": "com.google.protobuf.UnknownFieldSet" + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "130.176.130.132" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "" + }, + "unknownFields": { + "notCapturedReason": "depth", + "type": "com.google.protobuf.UnknownFieldSet" + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + } + ], + "type": "java.util.ArrayList" + }, + "builderParent": { + "isNull": true, + "type": "com.google.protobuf.GeneratedMessageV3$BuilderParent" + }, + "unknownFieldsOrBuilder": { + "type": "com.google.protobuf.UnknownFieldSet", + "fields": { + "fields": { + "size": "0", + "type": "java.util.TreeMap" + } + } + }, + "geoIpMetadataBuilder_": { + "isNull": true, + "type": "com.google.protobuf.RepeatedFieldBuilderV3" + }, + "isClean": { + "type": "boolean", + "value": "false" + }, + "count_": { + "type": "long", + "value": "1" + }, + "tagProjections_": { + "type": "com.google.protobuf.MapField", + "fields": { + "mode": { + "type": "com.google.protobuf.MapField$StorageMode", + "value": "MAP" + }, + "mapData": { + "size": "0", + "notCapturedReason": "java.lang.RuntimeException: Unsupported Map type: com.google.protobuf.MapField$MutabilityAwareMap", + "type": "com.google.protobuf.MapField$MutabilityAwareMap" + }, + "isMutable": { + "type": "boolean", + "value": "true" + }, + "listData": { + "isNull": true, + "type": "java.util.List" + }, + "converter": { + "type": "com.google.protobuf.MapField$ImmutableMessageConverter", + "fields": { + "defaultEntry": { + "notCapturedReason": "depth", + "type": "com.google.protobuf.MapEntry" + } + } + } + } + }, + "geoIpMetadata_": { + "size": "0", + "type": "java.util.ArrayList" + }, + "ip_": { + "type": "java.lang.String", + "value": "" + }, + "meAsParent": { + "isNull": true, + "type": "com.google.protobuf.GeneratedMessageV3$Builder$BuilderParentImpl" + }, + "threatIntelResultsBuilder_": { + "isNull": true, + "type": "com.google.protobuf.RepeatedFieldBuilderV3" + }, + "hosts_": { + "size": "0", + "notCapturedReason": "java.lang.RuntimeException: Unsupported Collection type: com.google.protobuf.LazyStringArrayList", + "type": "com.google.protobuf.LazyStringArrayList" + }, + "signalScore_": { + "type": "long", + "value": "1" + } + } + }, + "geoIpMetadata": { + "size": "0", + "type": "java.util.ArrayList" + }, + "hosts": { + "size": "5", + "elements": [ + { + "type": "java.lang.String", + "value": "i-02d87409e6596f562" + }, + { + "type": "java.lang.String", + "value": "i-0f42b05f770544642" + }, + { + "type": "java.lang.String", + "value": "i-0ab705684278ad06b" + }, + { + "type": "java.lang.String", + "value": "i-0218eea919deb6e1a" + }, + { + "type": "java.lang.String", + "value": "i-0405eec023d49f192" + } + ], + "type": "java.util.ArrayList" + }, + "tagProjections": { + "entries": [ + [ + { + "type": "java.lang.String", + "value": "source" + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$StringProjectionValues", + "fields": { + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "value_": { + "size": "0", + "notCapturedReason": "java.lang.RuntimeException: Unsupported Collection type: com.google.protobuf.UnmodifiableLazyStringList", + "type": "com.google.protobuf.UnmodifiableLazyStringList" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "unknownFields": { + "type": "com.google.protobuf.UnknownFieldSet", + "fields": { + "fields": { + "notCapturedReason": "depth", + "type": "java.util.TreeMap" + } + } + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + } + ] + ], + "size": "1", + "type": "java.util.HashMap" + }, + "stringProjections": { + "entries": [ + [ + { + "type": "java.lang.String", + "value": "service" + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$StringProjectionValues", + "fields": { + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "value_": { + "size": "0", + "notCapturedReason": "java.lang.RuntimeException: Unsupported Collection type: com.google.protobuf.UnmodifiableLazyStringList", + "type": "com.google.protobuf.UnmodifiableLazyStringList" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "unknownFields": { + "type": "com.google.protobuf.UnknownFieldSet", + "fields": { + "fields": { + "notCapturedReason": "depth", + "type": "java.util.TreeMap" + } + } + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + } + ], + [ + { + "type": "java.lang.String", + "value": "custom.usr.id" + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$StringProjectionValues", + "fields": { + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "value_": { + "size": "0", + "notCapturedReason": "java.lang.RuntimeException: Unsupported Collection type: com.google.protobuf.UnmodifiableLazyStringList", + "type": "com.google.protobuf.UnmodifiableLazyStringList" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "unknownFields": { + "type": "com.google.protobuf.UnknownFieldSet", + "fields": { + "fields": { + "notCapturedReason": "depth", + "type": "java.util.TreeMap" + } + } + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + } + ] + ], + "size": "2", + "type": "java.util.HashMap" + }, + "threatIntelResults": { + "size": "9", + "elements": [ + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "15.158.54.42" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "" + }, + "unknownFields": { + "type": "com.google.protobuf.UnknownFieldSet", + "fields": { + "fields": { + "notCapturedReason": "depth", + "type": "java.util.TreeMap" + } + } + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "residential_proxy" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "spur" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "199.66.15.4" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "https://spur.us" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "suspicious" + }, + "unknownFields": { + "type": "com.google.protobuf.UnknownFieldSet", + "fields": { + "fields": { + "notCapturedReason": "depth", + "type": "java.util.TreeMap" + } + } + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "64.252.144.155" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "" + }, + "unknownFields": { + "type": "com.google.protobuf.UnknownFieldSet", + "fields": { + "fields": { + "notCapturedReason": "depth", + "type": "java.util.TreeMap" + } + } + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "70.132.18.132" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "" + }, + "unknownFields": { + "type": "com.google.protobuf.UnknownFieldSet", + "fields": { + "fields": { + "notCapturedReason": "depth", + "type": "java.util.TreeMap" + } + } + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "130.176.185.207" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "" + }, + "unknownFields": { + "type": "com.google.protobuf.UnknownFieldSet", + "fields": { + "fields": { + "notCapturedReason": "depth", + "type": "java.util.TreeMap" + } + } + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "15.158.41.133" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "" + }, + "unknownFields": { + "type": "com.google.protobuf.UnknownFieldSet", + "fields": { + "fields": { + "notCapturedReason": "depth", + "type": "java.util.TreeMap" + } + } + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "130.176.135.146" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "" + }, + "unknownFields": { + "type": "com.google.protobuf.UnknownFieldSet", + "fields": { + "fields": { + "notCapturedReason": "depth", + "type": "java.util.TreeMap" + } + } + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "3.172.1.71" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "" + }, + "unknownFields": { + "type": "com.google.protobuf.UnknownFieldSet", + "fields": { + "fields": { + "notCapturedReason": "depth", + "type": "java.util.TreeMap" + } + } + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "130.176.130.132" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "" + }, + "unknownFields": { + "type": "com.google.protobuf.UnknownFieldSet", + "fields": { + "fields": { + "notCapturedReason": "depth", + "type": "java.util.TreeMap" + } + } + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + } + ], + "type": "java.util.ArrayList" + } + } + } + } + }, + "language": "java", + "id": "d7141999-c5bd-4887-b855-66c7a4dbb9a4", + "probe": { + "location": { + "file": "domains/cloud-security-platform/apps/security-monitoring-entity-reducer/src/main/java/com/dd/logs/security_analytics/EntityStatsOutput.java", + "method": "toByteString", + "lines": [ + "205" + ], + "type": "com.dd.logs.security_analytics.EntityStatsOutput" + }, + "id": "13da639f-2b81-475c-9366-5aa227a07302", + "version": 1 + }, + "timestamp": 1709233217858 + } + }, + "logger": { + "thread_id": 120, + "method": "toByteString", + "thread_name": "FsmaticDataCluster-fsmatic.workload.default.work-dispatcher-42", + "name": "com.dd.logs.security_analytics.EntityStatsOutput", + "version": 2 + } +} \ No newline at end of file diff --git a/pkg/dynamicinstrumentation/ditypes/testdata/snapshot-02.json b/pkg/dynamicinstrumentation/ditypes/testdata/snapshot-02.json new file mode 100644 index 0000000000000..2f65ebd33f8fb --- /dev/null +++ b/pkg/dynamicinstrumentation/ditypes/testdata/snapshot-02.json @@ -0,0 +1,1645 @@ +{ + "service": "logs-intake-coordinator", + "message": "MetricsClient.parseSuccess response={Cannot find symbol: response} returns {status=ok, resType=time_series, series=..., fromDate=1709233490000, toDate=1709233550000}, ...", + "ddsource": "dd_debugger", + "duration": 2568054, + "ddtags": "git.commit.sha:3698e1f3da2142d6399ef311e80c970e8e89eb02,app:logs-intake-coordinator", + "debugger": { + "snapshot": { + "stack": [ + { + "fileName": "MetricsClient.java", + "function": "com.dd.logs.metricsclient.MetricsClient.parseSuccess", + "lineNumber": 16 + }, + { + "fileName": "AHttpServiceCall.java", + "function": "com.fsmatic.http.AHttpServiceCall.parseResponse", + "lineNumber": 203 + }, + { + "fileName": "AHttpServiceCall.java", + "function": "com.fsmatic.http.AHttpServiceCall$HttpCall.lambda$execute$1", + "lineNumber": 389 + }, + { + "function": "com.fsmatic.http.AHttpServiceCall$HttpCall$$Lambda/0x00007f8843a54460.apply", + "lineNumber": -1 + }, + { + "fileName": "CompletableFuture.java", + "function": "java.util.concurrent.CompletableFuture$UniApply.tryFire", + "lineNumber": 646 + }, + { + "fileName": "CompletableFuture.java", + "function": "java.util.concurrent.CompletableFuture.postComplete", + "lineNumber": 510 + }, + { + "fileName": "CompletableFuture.java", + "function": "java.util.concurrent.CompletableFuture.complete", + "lineNumber": 2179 + }, + { + "fileName": "CompletableCallback.java", + "function": "com.fsmatic.http.CompletableCallback.onResponse", + "lineNumber": 55 + }, + { + "fileName": "RealCall.java", + "function": "okhttp3.RealCall$AsyncCall.execute", + "lineNumber": 174 + }, + { + "fileName": "NamedRunnable.java", + "function": "okhttp3.internal.NamedRunnable.run", + "lineNumber": 32 + }, + { + "fileName": "ThreadPoolExecutor.java", + "function": "java.util.concurrent.ThreadPoolExecutor.runWorker", + "lineNumber": 1144 + }, + { + "fileName": "ThreadPoolExecutor.java", + "function": "java.util.concurrent.ThreadPoolExecutor$Worker.run", + "lineNumber": 642 + }, + { + "fileName": "Thread.java", + "function": "java.lang.Thread.runWith", + "lineNumber": 1596 + }, + { + "fileName": "Thread.java", + "function": "java.lang.Thread.run", + "lineNumber": 1583 + } + ], + "captures": { + "return": { + "arguments": { + "p0": { + "type": "okhttp3.Response", + "fields": { + "request": { + "type": "okhttp3.Request", + "fields": { + "headers": { + "type": "okhttp3.Headers", + "fields": { + "namesAndValues": { + "notCapturedReason": "depth", + "type": "java.lang.String[]" + } + } + }, + "method": { + "type": "java.lang.String", + "value": "GET" + }, + "body": { + "isNull": true, + "type": "okhttp3.RequestBody" + }, + "url": { + "type": "okhttp3.HttpUrl", + "fields": { + "password": { + "notCapturedReason": "redactedIdent", + "type": "java.lang.String" + }, + "fragment": { + "isNull": true, + "type": "java.lang.String" + }, + "scheme": { + "type": "java.lang.String", + "value": "https" + }, + "$$DD$source": { + "isNull": true, + "type": "datadog.trace.api.iast.Taintable$Source" + }, + "port": { + "type": "int", + "value": "443" + }, + "queryNamesAndValues": { + "notCapturedReason": "depth", + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "host": { + "type": "java.lang.String", + "value": "api.datad0g.com" + }, + "pathSegments": { + "notCapturedReason": "depth", + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "url": { + "size": "663", + "truncated": true, + "type": "java.lang.String", + "value": "https://api.datad0g.com/api/v1/query?query=timeshift%28%28max%3Akubernetes_state.statefulset.replicas_desired%7Bdatacenter%3Aus1.staging.dog%2Cdds%3Aevent-platform%2Cservice%3Alogs-intake-backend%7D%20by%20%7Bkube_stateful_set%7D%20-%20max%3Akubernetes_st" + }, + "username": { + "type": "java.lang.String", + "value": "" + } + } + }, + "cacheControl": { + "isNull": true, + "type": "okhttp3.CacheControl" + }, + "tags": { + "size": "0", + "type": "java.util.Collections$EmptyMap" + } + } + }, + "handshake": { + "type": "okhttp3.Handshake", + "fields": { + "localCertificates": { + "size": "0", + "type": "java.util.Collections$EmptyList" + }, + "peerCertificates": { + "size": "2", + "elements": [ + { + "notCapturedReason": "depth", + "type": "sun.security.x509.X509CertImpl" + }, + { + "notCapturedReason": "depth", + "type": "sun.security.x509.X509CertImpl" + } + ], + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "tlsVersion": { + "type": "okhttp3.TlsVersion", + "value": "TLS_1_2" + }, + "cipherSuite": { + "type": "okhttp3.CipherSuite", + "fields": { + "javaName": { + "type": "java.lang.String", + "value": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" + } + } + } + } + }, + "headers": { + "type": "okhttp3.Headers", + "fields": { + "namesAndValues": { + "size": "26", + "elements": [ + { + "type": "java.lang.String", + "value": "date" + }, + { + "type": "java.lang.String", + "value": "Thu, 29 Feb 2024 19:05:52 GMT" + }, + { + "type": "java.lang.String", + "value": "content-type" + }, + { + "type": "java.lang.String", + "value": "application/json" + }, + { + "type": "java.lang.String", + "value": "x-frame-options" + }, + { + "type": "java.lang.String", + "value": "SAMEORIGIN" + }, + { + "type": "java.lang.String", + "value": "content-security-policy" + }, + { + "type": "java.lang.String", + "value": "frame-ancestors 'self'; report-uri https://logs.browser-intake-datadoghq.com/api/v2/logs?dd-api-key=pub293163a918901030b79492fe1ab424cf&dd-evp-origin=content-security-policy&ddsource=csp-report&ddtags=site%3Adatad0g.com" + }, + { + "type": "java.lang.String", + "value": "vary" + }, + { + "type": "java.lang.String", + "value": "Accept-Encoding" + }, + { + "type": "java.lang.String", + "value": "x-ratelimit-limit" + }, + { + "type": "java.lang.String", + "value": "100" + }, + { + "type": "java.lang.String", + "value": "x-ratelimit-period" + }, + { + "type": "java.lang.String", + "value": "10" + }, + { + "type": "java.lang.String", + "value": "x-ratelimit-remaining" + }, + { + "type": "java.lang.String", + "value": "96" + }, + { + "type": "java.lang.String", + "value": "x-ratelimit-reset" + }, + { + "type": "java.lang.String", + "value": "10" + }, + { + "type": "java.lang.String", + "value": "x-ratelimit-name" + }, + { + "type": "java.lang.String", + "value": "batch_query" + }, + { + "type": "java.lang.String", + "value": "x-content-type-options" + }, + { + "type": "java.lang.String", + "value": "nosniff" + }, + { + "type": "java.lang.String", + "value": "strict-transport-security" + }, + { + "type": "java.lang.String", + "value": "max-age=31536000; includeSubDomains; preload" + }, + { + "type": "java.lang.String", + "value": "transfer-encoding" + }, + { + "type": "java.lang.String", + "value": "chunked" + } + ], + "type": "java.lang.String[]" + } + } + }, + "code": { + "type": "int", + "value": "200" + }, + "sentRequestAtMillis": { + "type": "long", + "value": "1709233550915" + }, + "networkResponse": { + "type": "okhttp3.Response", + "fields": { + "request": { + "type": "okhttp3.Request", + "fields": { + "headers": { + "notCapturedReason": "depth", + "type": "okhttp3.Headers" + }, + "method": { + "type": "java.lang.String", + "value": "GET" + }, + "body": { + "isNull": true, + "type": "okhttp3.RequestBody" + }, + "url": { + "notCapturedReason": "depth", + "type": "okhttp3.HttpUrl" + }, + "cacheControl": { + "isNull": true, + "type": "okhttp3.CacheControl" + }, + "tags": { + "notCapturedReason": "depth", + "type": "java.util.Collections$EmptyMap" + } + } + }, + "handshake": { + "type": "okhttp3.Handshake", + "fields": { + "localCertificates": { + "notCapturedReason": "depth", + "type": "java.util.Collections$EmptyList" + }, + "peerCertificates": { + "notCapturedReason": "depth", + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "tlsVersion": { + "type": "okhttp3.TlsVersion", + "value": "TLS_1_2" + }, + "cipherSuite": { + "notCapturedReason": "depth", + "type": "okhttp3.CipherSuite" + } + } + }, + "headers": { + "type": "okhttp3.Headers", + "fields": { + "namesAndValues": { + "notCapturedReason": "depth", + "type": "java.lang.String[]" + } + } + }, + "code": { + "type": "int", + "value": "200" + }, + "sentRequestAtMillis": { + "type": "long", + "value": "1709233550915" + }, + "networkResponse": { + "isNull": true, + "type": "okhttp3.Response" + }, + "message": { + "type": "java.lang.String", + "value": "OK" + }, + "body": { + "isNull": true, + "type": "okhttp3.ResponseBody" + }, + "cacheControl": { + "isNull": true, + "type": "okhttp3.CacheControl" + }, + "cacheResponse": { + "isNull": true, + "type": "okhttp3.Response" + }, + "protocol": { + "type": "okhttp3.Protocol", + "value": "HTTP_1_1" + }, + "priorResponse": { + "isNull": true, + "type": "okhttp3.Response" + }, + "receivedResponseAtMillis": { + "type": "long", + "value": "1709233552199" + }, + "exchange": { + "type": "okhttp3.internal.connection.Exchange", + "fields": { + "call": { + "notCapturedReason": "depth", + "type": "okhttp3.RealCall" + }, + "codec": { + "notCapturedReason": "depth", + "type": "okhttp3.internal.http1.Http1ExchangeCodec" + }, + "eventListener": { + "notCapturedReason": "depth", + "type": "com.fsmatic.http.OkHttpEventLogger" + }, + "duplex": { + "type": "boolean", + "value": "false" + }, + "transmitter": { + "notCapturedReason": "depth", + "type": "okhttp3.internal.connection.Transmitter" + }, + "finder": { + "notCapturedReason": "depth", + "type": "okhttp3.internal.connection.ExchangeFinder" + } + } + } + } + }, + "message": { + "type": "java.lang.String", + "value": "OK" + }, + "body": { + "type": "okhttp3.internal.http.RealResponseBody", + "fields": { + "reader": { + "type": "okhttp3.ResponseBody$BomAwareReader", + "fields": { + "delegate": { + "notCapturedReason": "depth", + "type": "java.io.InputStreamReader" + }, + "charset": { + "notCapturedReason": "depth", + "type": "sun.nio.cs.UTF_8" + }, + "skipBuffer": { + "notCapturedReason": "java.lang.reflect.InaccessibleObjectException: Unable to make field private char[] java.io.Reader.skipBuffer accessible: module java.base does not \"opens java.io\" to unnamed module @dc24521", + "type": "char[]" + }, + "closed": { + "type": "boolean", + "value": "true" + }, + "lock": { + "notCapturedReason": "java.lang.reflect.InaccessibleObjectException: Unable to make field protected java.lang.Object java.io.Reader.lock accessible: module java.base does not \"opens java.io\" to unnamed module @dc24521", + "type": "java.lang.Object" + }, + "source": { + "notCapturedReason": "depth", + "type": "okio.RealBufferedSource" + } + } + }, + "contentTypeString": { + "type": "java.lang.String", + "value": "application/json" + }, + "contentLength": { + "type": "long", + "value": "-1" + }, + "source": { + "type": "okio.RealBufferedSource", + "fields": { + "closed": { + "type": "boolean", + "value": "true" + }, + "buffer": { + "notCapturedReason": "depth", + "type": "okio.Buffer" + }, + "source": { + "notCapturedReason": "depth", + "type": "com.fsmatic.http.OkHttpMonitoringInterceptor$LengthTrackingSource" + } + } + } + } + }, + "cacheControl": { + "isNull": true, + "type": "okhttp3.CacheControl" + }, + "cacheResponse": { + "isNull": true, + "type": "okhttp3.Response" + }, + "protocol": { + "type": "okhttp3.Protocol", + "value": "HTTP_1_1" + }, + "priorResponse": { + "isNull": true, + "type": "okhttp3.Response" + }, + "receivedResponseAtMillis": { + "type": "long", + "value": "1709233552199" + }, + "exchange": { + "type": "okhttp3.internal.connection.Exchange", + "fields": { + "call": { + "type": "okhttp3.RealCall", + "fields": { + "originalRequest": { + "notCapturedReason": "depth", + "type": "okhttp3.Request" + }, + "forWebSocket": { + "type": "boolean", + "value": "false" + }, + "client": { + "notCapturedReason": "depth", + "type": "okhttp3.OkHttpClient" + }, + "executed": { + "type": "boolean", + "value": "true" + }, + "transmitter": { + "notCapturedReason": "depth", + "type": "okhttp3.internal.connection.Transmitter" + } + } + }, + "codec": { + "type": "okhttp3.internal.http1.Http1ExchangeCodec", + "fields": { + "sink": { + "notCapturedReason": "depth", + "type": "okio.RealBufferedSink" + }, + "client": { + "notCapturedReason": "depth", + "type": "okhttp3.OkHttpClient" + }, + "realConnection": { + "notCapturedReason": "depth", + "type": "okhttp3.internal.connection.RealConnection" + }, + "headerLimit": { + "type": "long", + "value": "261503" + }, + "source": { + "notCapturedReason": "depth", + "type": "okio.RealBufferedSource" + }, + "state": { + "type": "int", + "value": "6" + }, + "trailers": { + "notCapturedReason": "depth", + "type": "okhttp3.Headers" + } + } + }, + "eventListener": { + "type": "com.fsmatic.http.OkHttpEventLogger", + "fields": { + "loggers": { + "notCapturedReason": "depth", + "type": "java.util.concurrent.ConcurrentHashMap" + }, + "hostRegexps": { + "notCapturedReason": "depth", + "type": "java.util.ArrayList" + } + } + }, + "duplex": { + "type": "boolean", + "value": "false" + }, + "transmitter": { + "type": "okhttp3.internal.connection.Transmitter", + "fields": { + "request": { + "notCapturedReason": "depth", + "type": "okhttp3.Request" + }, + "noMoreExchanges": { + "type": "boolean", + "value": "true" + }, + "connectionPool": { + "notCapturedReason": "depth", + "type": "okhttp3.internal.connection.RealConnectionPool" + }, + "callStackTrace": { + "isNull": true, + "type": "java.lang.Object" + }, + "timeoutEarlyExit": { + "type": "boolean", + "value": "false" + }, + "exchangeResponseDone": { + "type": "boolean", + "value": "true" + }, + "timeout": { + "notCapturedReason": "depth", + "type": "okhttp3.internal.connection.Transmitter$1" + }, + "call": { + "notCapturedReason": "depth", + "type": "okhttp3.RealCall" + }, + "canceled": { + "type": "boolean", + "value": "false" + }, + "exchangeRequestDone": { + "type": "boolean", + "value": "true" + }, + "exchangeFinder": { + "notCapturedReason": "depth", + "type": "okhttp3.internal.connection.ExchangeFinder" + }, + "eventListener": { + "notCapturedReason": "depth", + "type": "com.fsmatic.http.OkHttpEventLogger" + }, + "client": { + "notCapturedReason": "depth", + "type": "okhttp3.OkHttpClient" + }, + "connection": { + "isNull": true, + "type": "okhttp3.internal.connection.RealConnection" + }, + "exchange": { + "isNull": true, + "type": "okhttp3.internal.connection.Exchange" + } + } + }, + "finder": { + "type": "okhttp3.internal.connection.ExchangeFinder", + "fields": { + "call": { + "notCapturedReason": "depth", + "type": "okhttp3.RealCall" + }, + "address": { + "notCapturedReason": "depth", + "type": "okhttp3.Address" + }, + "connectingConnection": { + "isNull": true, + "type": "okhttp3.internal.connection.RealConnection" + }, + "routeSelector": { + "notCapturedReason": "depth", + "type": "okhttp3.internal.connection.RouteSelector" + }, + "hasStreamFailure": { + "type": "boolean", + "value": "false" + }, + "eventListener": { + "notCapturedReason": "depth", + "type": "com.fsmatic.http.OkHttpEventLogger" + }, + "connectionPool": { + "notCapturedReason": "depth", + "type": "okhttp3.internal.connection.RealConnectionPool" + }, + "routeSelection": { + "isNull": true, + "type": "okhttp3.internal.connection.RouteSelector$Selection" + }, + "nextRouteToTry": { + "isNull": true, + "type": "okhttp3.Route" + }, + "transmitter": { + "notCapturedReason": "depth", + "type": "okhttp3.internal.connection.Transmitter" + } + } + } + } + } + } + }, + "p1": { + "type": "com.dd.logs.metricsclient.MetricsClientActions$QueryMetric", + "fields": { + "query": { + "size": "492", + "truncated": true, + "type": "java.lang.String", + "value": "timeshift((max:kubernetes_state.statefulset.replicas_desired{datacenter:us1.staging.dog,dds:event-platform,service:logs-intake-backend} by {kube_stateful_set} - max:kubernetes_state.statefulset.replicas_desired{datacenter:us1.staging.dog,dds:event-platfor" + }, + "notCritical": { + "type": "boolean", + "value": "false" + }, + "from": { + "type": "java.time.Instant", + "value": "2024-02-29T19:04:50.915679003Z" + }, + "to": { + "type": "java.time.Instant", + "value": "2024-02-29T19:05:50.915679003Z" + } + } + }, + "this": { + "type": "com.dd.logs.metricsclient.MetricsClient", + "fields": { + "shouldTrace": { + "type": "boolean", + "value": "true" + }, + "apiKey": { + "notCapturedReason": "redactedIdent", + "type": "java.lang.String" + }, + "throttledLogger": { + "type": "com.dd.logging.ThrottledLogger", + "fields": { + "logger": { + "type": "com.dd.logging.BasicLogger", + "fields": { + "metas": { + "isNull": true, + "type": "java.util.Map" + }, + "logger": { + "notCapturedReason": "depth", + "type": "ch.qos.logback.classic.Logger" + }, + "name": { + "type": "java.lang.String", + "value": "com.fsmatic.http.AHttpServiceCall" + } + } + }, + "throttler": { + "type": "com.dd.logging.throttler.ByPassThrottler", + "fields": { + "current": { + "notCapturedReason": "depth", + "type": "java.util.concurrent.atomic.AtomicReference" + }, + "rate": { + "notCapturedReason": "depth", + "type": "com.dd.logging.throttler.ByPassThrottler$Rate" + }, + "clock": { + "notCapturedReason": "depth", + "type": "java.time.Clock$SystemClock" + } + } + } + } + }, + "executor": { + "type": "com.fsmatic.rpc.RPCCallExecutor", + "fields": { + "policy": { + "type": "com.fsmatic.rpc.RPCCallExecutor$Policy$NoOp" + } + } + }, + "applicationKey": { + "type": "java.lang.String", + "value": "" + }, + "wrapper": { + "type": "com.fsmatic.http.HttpWrapper", + "fields": { + "shouldTrace": { + "type": "boolean", + "value": "true" + }, + "clientInstrumentation": { + "type": "com.fsmatic.http.OkHttpClientInstrumentation", + "fields": { + "dispatcherInstrumentation": { + "notCapturedReason": "depth", + "type": "com.fsmatic.http.OkHttpDispatcherInstrumentation" + } + } + }, + "client": { + "notCapturedReason": "fieldCount", + "type": "okhttp3.OkHttpClient", + "fields": { + "cache": { + "isNull": true, + "type": "okhttp3.Cache" + }, + "socketFactory": { + "notCapturedReason": "depth", + "type": "javax.net.DefaultSocketFactory" + }, + "internalCache": { + "isNull": true, + "type": "okhttp3.internal.cache.InternalCache" + }, + "hostnameVerifier": { + "notCapturedReason": "depth", + "type": "okhttp3.internal.tls.OkHostnameVerifier" + }, + "dns": { + "notCapturedReason": "depth", + "type": "okhttp3.Dns$$Lambda/0x00007f8843706ac8" + }, + "connectionPool": { + "notCapturedReason": "depth", + "type": "okhttp3.ConnectionPool" + }, + "certificateChainCleaner": { + "notCapturedReason": "depth", + "type": "okhttp3.internal.tls.BasicCertificateChainCleaner" + }, + "certificatePinner": { + "notCapturedReason": "depth", + "type": "okhttp3.CertificatePinner" + }, + "cookieJar": { + "notCapturedReason": "depth", + "type": "okhttp3.CookieJar$1" + }, + "connectionSpecs": { + "notCapturedReason": "depth", + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "networkInterceptors": { + "notCapturedReason": "depth", + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "proxySelector": { + "notCapturedReason": "depth", + "type": "sun.net.spi.DefaultProxySelector" + }, + "proxy": { + "isNull": true, + "type": "java.net.Proxy" + }, + "sslSocketFactory": { + "notCapturedReason": "depth", + "type": "sun.security.ssl.SSLSocketFactoryImpl" + }, + "eventListenerFactory": { + "notCapturedReason": "depth", + "type": "okhttp3.EventListener$$Lambda/0x00007f88437051a8" + }, + "proxyAuthenticator": { + "notCapturedReason": "depth", + "type": "okhttp3.Authenticator$$Lambda/0x00007f88437066a8" + }, + "protocols": { + "notCapturedReason": "depth", + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "dispatcher": { + "notCapturedReason": "depth", + "type": "okhttp3.Dispatcher" + }, + "authenticator": { + "notCapturedReason": "depth", + "type": "okhttp3.Authenticator$$Lambda/0x00007f88437066a8" + }, + "interceptors": { + "notCapturedReason": "depth", + "type": "java.util.Collections$UnmodifiableRandomAccessList" + } + } + }, + "mapper": { + "type": "com.fasterxml.jackson.databind.ObjectMapper", + "fields": { + "_serializerFactory": { + "notCapturedReason": "depth", + "type": "com.fasterxml.jackson.databind.ser.BeanSerializerFactory" + }, + "_deserializationContext": { + "notCapturedReason": "depth", + "type": "com.fasterxml.jackson.databind.deser.DefaultDeserializationContext$Impl" + }, + "_deserializationConfig": { + "notCapturedReason": "depth", + "type": "com.fasterxml.jackson.databind.DeserializationConfig" + }, + "_injectableValues": { + "isNull": true, + "type": "com.fasterxml.jackson.databind.InjectableValues" + }, + "_registeredModuleTypes": { + "notCapturedReason": "depth", + "type": "java.util.LinkedHashSet" + }, + "_jsonFactory": { + "notCapturedReason": "depth", + "type": "com.fasterxml.jackson.databind.MappingJsonFactory" + }, + "_coercionConfigs": { + "notCapturedReason": "depth", + "type": "com.fasterxml.jackson.databind.cfg.CoercionConfigs" + }, + "_subtypeResolver": { + "notCapturedReason": "depth", + "type": "com.fasterxml.jackson.databind.jsontype.impl.StdSubtypeResolver" + }, + "_configOverrides": { + "notCapturedReason": "depth", + "type": "com.fasterxml.jackson.databind.cfg.ConfigOverrides" + }, + "_serializerProvider": { + "notCapturedReason": "depth", + "type": "com.fasterxml.jackson.databind.ser.DefaultSerializerProvider$Impl" + }, + "_serializationConfig": { + "notCapturedReason": "depth", + "type": "com.fasterxml.jackson.databind.SerializationConfig" + }, + "_mixIns": { + "notCapturedReason": "depth", + "type": "com.fasterxml.jackson.databind.introspect.SimpleMixInResolver" + }, + "_typeFactory": { + "notCapturedReason": "depth", + "type": "com.fasterxml.jackson.databind.type.TypeFactory" + }, + "_rootDeserializers": { + "notCapturedReason": "depth", + "type": "java.util.concurrent.ConcurrentHashMap" + } + } + }, + "metrics": { + "type": "com.dd.metrics.WeakRefMetricsCache", + "fields": { + "cache": { + "notCapturedReason": "depth", + "type": "com.dd.metrics.WeakRefDoubleCache" + }, + "rootRegistry": { + "notCapturedReason": "depth", + "type": "com.dd.metrics.RootMetricRegistry" + } + } + }, + "internalHttpPort": { + "type": "int", + "value": "9091" + } + } + }, + "metrics": { + "type": "com.dd.metrics.WeakRefMetricsCache", + "fields": { + "cache": { + "type": "com.dd.metrics.WeakRefDoubleCache", + "fields": { + "layer1": { + "notCapturedReason": "depth", + "type": "java.util.concurrent.ConcurrentHashMap" + } + } + }, + "rootRegistry": { + "type": "com.dd.metrics.RootMetricRegistry", + "fields": { + "metrics": { + "notCapturedReason": "depth", + "type": "java.util.concurrent.ConcurrentHashMap" + } + } + } + } + }, + "uri": { + "type": "okhttp3.HttpUrl", + "fields": { + "password": { + "notCapturedReason": "redactedIdent", + "type": "java.lang.String" + }, + "fragment": { + "isNull": true, + "type": "java.lang.String" + }, + "scheme": { + "type": "java.lang.String", + "value": "https" + }, + "$$DD$source": { + "isNull": true, + "type": "datadog.trace.api.iast.Taintable$Source" + }, + "port": { + "type": "int", + "value": "443" + }, + "queryNamesAndValues": { + "isNull": true, + "type": "java.util.List" + }, + "host": { + "type": "java.lang.String", + "value": "api.datad0g.com" + }, + "pathSegments": { + "size": "3", + "elements": [ + { + "type": "java.lang.String", + "value": "api" + }, + { + "type": "java.lang.String", + "value": "v1" + }, + { + "type": "java.lang.String", + "value": "query" + } + ], + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "url": { + "type": "java.lang.String", + "value": "https://api.datad0g.com/api/v1/query" + }, + "username": { + "type": "java.lang.String", + "value": "" + } + } + }, + "timeout": { + "type": "long", + "value": "60000" + }, + "tags": { + "type": "com.dd.metrics.Tags", + "fields": { + "hashIsZero": { + "type": "boolean", + "value": "false" + }, + "hash": { + "type": "int", + "value": "0" + }, + "tags": { + "size": "1", + "elements": [ + { + "type": "java.lang.String", + "value": "action_name:metricsclient" + } + ], + "type": "java.util.ArrayList" + } + } + } + } + } + }, + "locals": { + "@return": { + "type": "com.dd.logs.metricsclient.ImmutableQueryResponse", + "fields": { + "fromDate": { + "type": "long", + "value": "1709233490000" + }, + "resType": { + "type": "java.lang.String", + "value": "time_series" + }, + "series": { + "size": "9", + "elements": [ + { + "type": "com.dd.logs.metricsclient.ImmutableSeries", + "fields": { + "unit": { + "notCapturedReason": "depth", + "type": "java.util.Collections$EmptyList" + }, + "expression": { + "size": "577", + "truncated": true, + "type": "java.lang.String", + "value": "timeshift(((max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-intake-backend-internal-all,datacenter:us1.staging.dog,dds:event-platform,service:logs-intake-backend} - max:kubernetes_state.statefulset.replicas_desired{kube_stateful_s" + }, + "metric": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "pointlist": { + "notCapturedReason": "depth", + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "displayName": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "scope": { + "type": "java.lang.String", + "value": "datacenter:us1.staging.dog,dds:event-platform,kube_cluster_name:oddish-b,kube_stateful_set:logs-intake-backend-internal-all,service:logs-intake-backend" + }, + "start": { + "type": "long", + "value": "1709233490000" + }, + "length": { + "type": "long", + "value": "16" + }, + "end": { + "type": "long", + "value": "1709233542000" + }, + "interval": { + "type": "long", + "value": "1" + }, + "aggr": { + "type": "java.lang.String", + "value": "max" + } + } + }, + { + "type": "com.dd.logs.metricsclient.ImmutableSeries", + "fields": { + "unit": { + "notCapturedReason": "depth", + "type": "java.util.Collections$EmptyList" + }, + "expression": { + "size": "556", + "truncated": true, + "type": "java.lang.String", + "value": "timeshift(((max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-intake-backend-spans,datacenter:us1.staging.dog,dds:event-platform,service:logs-intake-backend} - max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs" + }, + "metric": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "pointlist": { + "notCapturedReason": "depth", + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "displayName": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "scope": { + "type": "java.lang.String", + "value": "datacenter:us1.staging.dog,dds:event-platform,kube_cluster_name:oddish-b,kube_stateful_set:logs-intake-backend-spans,service:logs-intake-backend" + }, + "start": { + "type": "long", + "value": "1709233490000" + }, + "length": { + "type": "long", + "value": "16" + }, + "end": { + "type": "long", + "value": "1709233542000" + }, + "interval": { + "type": "long", + "value": "1" + }, + "aggr": { + "type": "java.lang.String", + "value": "max" + } + } + }, + { + "type": "com.dd.logs.metricsclient.ImmutableSeries", + "fields": { + "unit": { + "notCapturedReason": "depth", + "type": "java.util.Collections$EmptyList" + }, + "expression": { + "size": "550", + "truncated": true, + "type": "java.lang.String", + "value": "timeshift(((max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-intake-backend-tcp,datacenter:us1.staging.dog,dds:event-platform,service:logs-intake-backend} - max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-i" + }, + "metric": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "pointlist": { + "notCapturedReason": "depth", + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "displayName": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "scope": { + "type": "java.lang.String", + "value": "datacenter:us1.staging.dog,dds:event-platform,kube_cluster_name:oddish-b,kube_stateful_set:logs-intake-backend-tcp,service:logs-intake-backend" + }, + "start": { + "type": "long", + "value": "1709233490000" + }, + "length": { + "type": "long", + "value": "16" + }, + "end": { + "type": "long", + "value": "1709233542000" + }, + "interval": { + "type": "long", + "value": "1" + }, + "aggr": { + "type": "java.lang.String", + "value": "max" + } + } + }, + { + "type": "com.dd.logs.metricsclient.ImmutableSeries", + "fields": { + "unit": { + "notCapturedReason": "depth", + "type": "java.util.Collections$EmptyList" + }, + "expression": { + "size": "562", + "truncated": true, + "type": "java.lang.String", + "value": "timeshift(((max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-intake-backend-testing,datacenter:us1.staging.dog,dds:event-platform,service:logs-intake-backend} - max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:lo" + }, + "metric": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "pointlist": { + "notCapturedReason": "depth", + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "displayName": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "scope": { + "type": "java.lang.String", + "value": "datacenter:us1.staging.dog,dds:event-platform,kube_cluster_name:oddish-b,kube_stateful_set:logs-intake-backend-testing,service:logs-intake-backend" + }, + "start": { + "type": "long", + "value": "1709233490000" + }, + "length": { + "type": "long", + "value": "16" + }, + "end": { + "type": "long", + "value": "1709233542000" + }, + "interval": { + "type": "long", + "value": "1" + }, + "aggr": { + "type": "java.lang.String", + "value": "max" + } + } + }, + { + "type": "com.dd.logs.metricsclient.ImmutableSeries", + "fields": { + "unit": { + "notCapturedReason": "depth", + "type": "java.util.Collections$EmptyList" + }, + "expression": { + "size": "559", + "truncated": true, + "type": "java.lang.String", + "value": "timeshift(((max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-intake-backend-upload,datacenter:us1.staging.dog,dds:event-platform,service:logs-intake-backend} - max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:log" + }, + "metric": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "pointlist": { + "notCapturedReason": "depth", + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "displayName": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "scope": { + "type": "java.lang.String", + "value": "datacenter:us1.staging.dog,dds:event-platform,kube_cluster_name:oddish-b,kube_stateful_set:logs-intake-backend-upload,service:logs-intake-backend" + }, + "start": { + "type": "long", + "value": "1709233490000" + }, + "length": { + "type": "long", + "value": "16" + }, + "end": { + "type": "long", + "value": "1709233542000" + }, + "interval": { + "type": "long", + "value": "1" + }, + "aggr": { + "type": "java.lang.String", + "value": "max" + } + } + }, + { + "type": "com.dd.logs.metricsclient.ImmutableSeries", + "fields": { + "unit": { + "notCapturedReason": "depth", + "type": "java.util.Collections$EmptyList" + }, + "expression": { + "size": "550", + "truncated": true, + "type": "java.lang.String", + "value": "timeshift(((max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-intake-backend-all,datacenter:us1.staging.dog,dds:event-platform,service:logs-intake-backend} - max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-i" + }, + "metric": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "pointlist": { + "notCapturedReason": "depth", + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "displayName": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "scope": { + "type": "java.lang.String", + "value": "datacenter:us1.staging.dog,dds:event-platform,kube_cluster_name:oddish-b,kube_stateful_set:logs-intake-backend-all,service:logs-intake-backend" + }, + "start": { + "type": "long", + "value": "1709233490000" + }, + "length": { + "type": "long", + "value": "16" + }, + "end": { + "type": "long", + "value": "1709233542000" + }, + "interval": { + "type": "long", + "value": "1" + }, + "aggr": { + "type": "java.lang.String", + "value": "max" + } + } + }, + { + "type": "com.dd.logs.metricsclient.ImmutableSeries", + "fields": { + "unit": { + "notCapturedReason": "depth", + "type": "java.util.Collections$EmptyList" + }, + "expression": { + "size": "574", + "truncated": true, + "type": "java.lang.String", + "value": "timeshift(((max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-intake-backend-all-datadog,datacenter:us1.staging.dog,dds:event-platform,service:logs-intake-backend} - max:kubernetes_state.statefulset.replicas_desired{kube_stateful_se" + }, + "metric": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "pointlist": { + "notCapturedReason": "depth", + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "displayName": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "scope": { + "type": "java.lang.String", + "value": "datacenter:us1.staging.dog,dds:event-platform,kube_cluster_name:oddish-b,kube_stateful_set:logs-intake-backend-all-datadog,service:logs-intake-backend" + }, + "start": { + "type": "long", + "value": "1709233490000" + }, + "length": { + "type": "long", + "value": "16" + }, + "end": { + "type": "long", + "value": "1709233542000" + }, + "interval": { + "type": "long", + "value": "1" + }, + "aggr": { + "type": "java.lang.String", + "value": "max" + } + } + }, + { + "type": "com.dd.logs.metricsclient.ImmutableSeries", + "fields": { + "unit": { + "notCapturedReason": "depth", + "type": "java.util.Collections$EmptyList" + }, + "expression": { + "size": "562", + "truncated": true, + "type": "java.lang.String", + "value": "timeshift(((max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-intake-backend-all-rum,datacenter:us1.staging.dog,dds:event-platform,service:logs-intake-backend} - max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:lo" + }, + "metric": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "pointlist": { + "notCapturedReason": "depth", + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "displayName": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "scope": { + "type": "java.lang.String", + "value": "datacenter:us1.staging.dog,dds:event-platform,kube_cluster_name:oddish-b,kube_stateful_set:logs-intake-backend-all-rum,service:logs-intake-backend" + }, + "start": { + "type": "long", + "value": "1709233490000" + }, + "length": { + "type": "long", + "value": "16" + }, + "end": { + "type": "long", + "value": "1709233542000" + }, + "interval": { + "type": "long", + "value": "1" + }, + "aggr": { + "type": "java.lang.String", + "value": "max" + } + } + }, + { + "type": "com.dd.logs.metricsclient.ImmutableSeries", + "fields": { + "unit": { + "notCapturedReason": "depth", + "type": "java.util.Collections$EmptyList" + }, + "expression": { + "size": "553", + "truncated": true, + "type": "java.lang.String", + "value": "timeshift(((max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-intake-backend-logs,datacenter:us1.staging.dog,dds:event-platform,service:logs-intake-backend} - max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-" + }, + "metric": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "pointlist": { + "notCapturedReason": "depth", + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "displayName": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "scope": { + "type": "java.lang.String", + "value": "datacenter:us1.staging.dog,dds:event-platform,kube_cluster_name:oddish-b,kube_stateful_set:logs-intake-backend-logs,service:logs-intake-backend" + }, + "start": { + "type": "long", + "value": "1709233490000" + }, + "length": { + "type": "long", + "value": "16" + }, + "end": { + "type": "long", + "value": "1709233542000" + }, + "interval": { + "type": "long", + "value": "1" + }, + "aggr": { + "type": "java.lang.String", + "value": "max" + } + } + } + ], + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "toDate": { + "type": "long", + "value": "1709233550000" + }, + "query": { + "size": "492", + "truncated": true, + "type": "java.lang.String", + "value": "timeshift((max:kubernetes_state.statefulset.replicas_desired{datacenter:us1.staging.dog,dds:event-platform,service:logs-intake-backend} by {kube_stateful_set} - max:kubernetes_state.statefulset.replicas_desired{datacenter:us1.staging.dog,dds:event-platfor" + }, + "groupBy": { + "size": "1", + "elements": [ + { + "type": "java.lang.String", + "value": "kube_stateful_set" + } + ], + "type": "java.util.Collections$SingletonList" + }, + "message": { + "type": "java.lang.String", + "value": "" + }, + "status": { + "type": "java.lang.String", + "value": "ok" + } + } + } + } + } + }, + "language": "java", + "id": "97775bd9-ca14-4192-8c15-21a177819305", + "evaluationErrors": [ + { + "expr": "response", + "message": "Cannot find symbol: response" + } + ], + "probe": { + "location": { + "method": "parseSuccess", + "type": "com.dd.logs.metricsclient.MetricsClient" + }, + "id": "23a08460-521f-4364-aff5-081221aba86d", + "version": 3 + }, + "timestamp": 1709233552203 + } + }, + "logger": { + "thread_id": 18170, + "method": "parseSuccess", + "thread_name": "OkHttp https://api.datad0g.com/...", + "name": "com.dd.logs.metricsclient.MetricsClient", + "version": 2 + } +} \ No newline at end of file diff --git a/pkg/dynamicinstrumentation/ebpf/ebpf.go b/pkg/dynamicinstrumentation/ebpf/ebpf.go new file mode 100644 index 0000000000000..177dcc2146f17 --- /dev/null +++ b/pkg/dynamicinstrumentation/ebpf/ebpf.go @@ -0,0 +1,174 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +// Package ebpf provides utility for setting up and instrumenting the bpf code +// used by dynamic instrumentation +package ebpf + +import ( + "errors" + "fmt" + "io" + "text/template" + "time" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/link" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/diagnostics" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" + ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" + "github.com/DataDog/datadog-agent/pkg/ebpf/bytecode/runtime" + "github.com/DataDog/datadog-agent/pkg/util/log" +) + +// SetupEventsMap creates the ringbuffer which all programs will use for sending output +func SetupEventsMap() error { + var err error + events, err := ebpf.NewMap(&ebpf.MapSpec{ + Name: "events", + Type: ebpf.RingBuf, + MaxEntries: 1 << 24, + }) + if err != nil { + return fmt.Errorf("could not create bpf map for sharing events with userspace: %w", err) + } + ditypes.EventsRingbuffer = events + return nil +} + +// AttachBPFUprobe attaches the probe to the specified process +func AttachBPFUprobe(procInfo *ditypes.ProcessInfo, probe *ditypes.Probe) error { + executable, err := link.OpenExecutable(procInfo.BinaryPath) + if err != nil { + diagnostics.Diagnostics.SetError(procInfo.ServiceName, procInfo.RuntimeID, probe.ID, "ATTACH_ERROR", err.Error()) + return fmt.Errorf("could not open proc executable for attaching bpf probe: %w", err) + } + + spec, err := ebpf.LoadCollectionSpecFromReader(probe.InstrumentationInfo.BPFObjectFileReader) + if err != nil { + diagnostics.Diagnostics.SetError(procInfo.ServiceName, procInfo.RuntimeID, probe.ID, "ATTACH_ERROR", err.Error()) + return fmt.Errorf("could not create bpf collection for probe %s: %w", probe.ID, err) + } + + mapReplacements := map[string]*ebpf.Map{} + if probe.ID != ditypes.ConfigBPFProbeID { + // config probe is special and should not be on the same ringbuffer + // as the rest of regular events. Despite having the same "events" name, + // not using the pinned map means the config program uses a different + // ringbuffer. + mapReplacements["events"] = ditypes.EventsRingbuffer + } else { + configEvents, err := ebpf.NewMap(&ebpf.MapSpec{ + Type: ebpf.RingBuf, + MaxEntries: 1 << 24, + }) + if err != nil { + return fmt.Errorf("could not create bpf map for receiving probe configurations: %w", err) + } + mapReplacements["events"] = configEvents + } + + // Load the ebpf object + opts := ebpf.CollectionOptions{ + MapReplacements: mapReplacements, + } + + bpfObject, err := ebpf.NewCollectionWithOptions(spec, opts) + if err != nil { + var ve *ebpf.VerifierError + if errors.As(err, &ve) { + log.Infof("Verifier error: %+v\n", ve) + } + diagnostics.Diagnostics.SetError(procInfo.ServiceName, procInfo.RuntimeID, probe.ID, "ATTACH_ERROR", err.Error()) + return fmt.Errorf("could not load bpf collection for probe %s: %w", probe.ID, err) + } + + procInfo.InstrumentationObjects[probe.ID] = bpfObject + + // Populate map used for zero'ing out regions of memory + zeroValMap, ok := bpfObject.Maps["zeroval"] + if !ok { + diagnostics.Diagnostics.SetError(procInfo.ServiceName, procInfo.RuntimeID, probe.ID, "ATTACH_ERROR", "could not find bpf map for zero value") + return fmt.Errorf("could not find bpf map for zero value in bpf object") + } + + var zeroSlice = make([]uint8, probe.InstrumentationInfo.InstrumentationOptions.ArgumentsMaxSize) + var index uint32 + err = zeroValMap.Update(index, zeroSlice, 0) + if err != nil { + diagnostics.Diagnostics.SetError(procInfo.ServiceName, procInfo.RuntimeID, probe.ID, "ATTACH_ERROR", "could not find bpf map for zero value") + return fmt.Errorf("could not use bpf map for zero value in bpf object: %w", err) + } + + // Attach BPF probe to function in executable + bpfProgram, ok := bpfObject.Programs[probe.GetBPFFuncName()] + if !ok { + diagnostics.Diagnostics.SetError(procInfo.ServiceName, procInfo.RuntimeID, probe.ID, "ATTACH_ERROR", fmt.Sprintf("couldn't find bpf program for symbol %s", probe.FuncName)) + return fmt.Errorf("could not find bpf program for symbol %s", probe.FuncName) + } + + link, err := executable.Uprobe(probe.FuncName, bpfProgram, &link.UprobeOptions{ + PID: int(procInfo.PID), + }) + if err != nil { + diagnostics.Diagnostics.SetError(procInfo.ServiceName, procInfo.RuntimeID, probe.ID, "UPROBE_FAILURE", err.Error()) + return fmt.Errorf("could not attach bpf program via uprobe: %w", err) + } + + procInfo.SetUprobeLink(probe.ID, &link) + diagnostics.Diagnostics.SetStatus(procInfo.ServiceName, procInfo.RuntimeID, probe.ID, ditypes.StatusInstalled) + + return nil +} + +// CompileBPFProgram compiles the code for a single probe associated with the process given by procInfo +func CompileBPFProgram(procInfo *ditypes.ProcessInfo, probe *ditypes.Probe) error { + f := func(in io.Reader, out io.Writer) error { + fileContents, err := io.ReadAll(in) + if err != nil { + return err + } + programTemplate, err := template.New("program_template").Parse(string(fileContents)) + if err != nil { + return err + } + err = programTemplate.Execute(out, probe) + if err != nil { + return err + } + return nil + } + + cfg := ddebpf.NewConfig() + opts := runtime.CompileOptions{ + AdditionalFlags: getCFlags(cfg), + ModifyCallback: f, + UseKernelHeaders: true, + } + compiledOutput, err := runtime.Dynamicinstrumentation.CompileWithOptions(cfg, opts) + if err != nil { + return err + } + probe.InstrumentationInfo.BPFObjectFileReader = compiledOutput + return nil +} + +func getCFlags(config *ddebpf.Config) []string { + cflags := []string{ + "-g", + "-Wno-unused-variable", + } + if config.BPFDebug { + cflags = append(cflags, "-DDEBUG=1") + } + return cflags +} + +const ( + compilationStepTimeout = 60 * time.Second +) diff --git a/pkg/dynamicinstrumentation/eventparser/event_parser.go b/pkg/dynamicinstrumentation/eventparser/event_parser.go new file mode 100644 index 0000000000000..952a1bb435cfd --- /dev/null +++ b/pkg/dynamicinstrumentation/eventparser/event_parser.go @@ -0,0 +1,268 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +// Package eventparser is used for parsing raw bytes from bpf code into events +package eventparser + +import ( + "encoding/binary" + "fmt" + "reflect" + "unsafe" + + "golang.org/x/sys/unix" + + "github.com/DataDog/datadog-agent/pkg/util/log" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ratelimiter" +) + +// MaxBufferSize is the maximum size of the output buffer from bpf which is read by this package +const MaxBufferSize = 10000 + +var ( + byteOrder = binary.LittleEndian +) + +// ParseEvent takes the raw buffer from bpf and parses it into an event. It also potentially +// applies a rate limit +func ParseEvent(record []byte, ratelimiters *ratelimiter.MultiProbeRateLimiter) *ditypes.DIEvent { + event := ditypes.DIEvent{} + + if len(record) < ditypes.SizeofBaseEvent { + log.Tracef("malformed event record (length %d)", len(record)) + return nil + } + baseEvent := *(*ditypes.BaseEvent)(unsafe.Pointer(&record[0])) + event.ProbeID = unix.ByteSliceToString(baseEvent.Probe_id[:]) + + allowed, _, _ := ratelimiters.AllowOneEvent(event.ProbeID) + if !allowed { + // log.Infof("event dropped by rate limit. Probe %s\t(%d dropped events out of %d)\n", + // event.ProbeID, droppedEvents, droppedEvents+successfulEvents) + return nil + } + + event.PID = baseEvent.Pid + event.UID = baseEvent.Uid + event.StackPCs = baseEvent.Program_counters[:] + event.Argdata = readParams(record[ditypes.SizeofBaseEvent:]) + return &event +} + +// ParseParams extracts just the parsed parameters from the full event record +func ParseParams(record []byte) ([]*ditypes.Param, error) { + if len(record) < 392 { + return nil, fmt.Errorf("malformed event record (length %d)", len(record)) + } + return readParams(record[392:]), nil +} + +func readParams(values []byte) []*ditypes.Param { + outputParams := []*ditypes.Param{} + for i := 0; i < MaxBufferSize; { + if i+3 >= len(values) { + break + } + paramTypeDefinition := parseTypeDefinition(values[i:]) + if paramTypeDefinition == nil { + break + } + + sizeOfTypeDefinition := countBufferUsedByTypeDefinition(paramTypeDefinition) + i += sizeOfTypeDefinition + val, numBytesRead := parseParamValue(paramTypeDefinition, values[i:]) + if reflect.Kind(val.Kind) == reflect.Slice { + // In BPF we read the slice by reading the maximum size of a slice + // that we allow, instead of just the size of the slice (which we + // know at runtime). This is to satisfy the verifier. When parsing + // here, we read just the actual slice content, but have to move the + // buffer index ahead by the amount of space used by the max read. + i += ditypes.SliceMaxSize + } else { + i += numBytesRead + } + outputParams = append(outputParams, val) + } + return outputParams +} + +// parseParamValue takes the representation of the param type's definition and the +// actual values in the buffer and populates the definition with the value parsed +// from the byte buffer. It returns the resulting parameter and an indication of +// how many bytes were read from the buffer +func parseParamValue(definition *ditypes.Param, buffer []byte) (*ditypes.Param, int) { + // Start by creating a stack with each layer of the definition + // which will correspond with the layers of the values read from buffer. + // This is done using a temporary stack. + tempStack := newParamStack() + definitionStack := newParamStack() + tempStack.push(definition) + for !tempStack.isEmpty() { + current := tempStack.pop() + definitionStack.push(copyParam(current)) + for i := 0; i < len(current.Fields); i++ { + tempStack.push(current.Fields[i]) + } + } + var i int + valueStack := newParamStack() + for i = 0; i+3 < len(buffer); { + paramDefinition := definitionStack.pop() + if paramDefinition == nil { + break + } + if !isTypeWithHeader(paramDefinition.Kind) { + // This is a regular value (no sub-fields). + // We parse the value of it from the buffer and push it to the value stack + paramDefinition.ValueStr = parseIndividualValue(paramDefinition.Kind, buffer[i:i+int(paramDefinition.Size)]) + i += int(paramDefinition.Size) + valueStack.push(paramDefinition) + } else if reflect.Kind(paramDefinition.Kind) == reflect.Pointer { + // Pointers are unique in that they have their own value, and sub-fields. + // We parse the value of it from the buffer, place it in the value for + // the pointer itself, then pop the next value and place it as a sub-field. + paramDefinition.ValueStr = parseIndividualValue(paramDefinition.Kind, buffer[i:i+int(paramDefinition.Size)]) + i += int(paramDefinition.Size) + paramDefinition.Fields = append(paramDefinition.Fields, valueStack.pop()) + valueStack.push(paramDefinition) + } else { + // This is a type with sub-fields which have already been parsed and push + // onto the value stack. We pop those and set them as fields in this type. + // We then push this type onto the value stack as it may also be a sub-field. + // In header types like this, paramDefinition.Size corresponds with the number of + // fields under it. + for n := 0; n < int(paramDefinition.Size); n++ { + paramDefinition.Fields = append([]*ditypes.Param{valueStack.pop()}, paramDefinition.Fields...) + } + valueStack.push(paramDefinition) + } + } + return valueStack.pop(), i +} + +func copyParam(p *ditypes.Param) *ditypes.Param { + return &ditypes.Param{ + Type: p.Type, + Kind: p.Kind, + Size: p.Size, + } +} + +func parseKindToString(kind byte) string { + if kind == 255 { + return "Unsupported" + } else if kind == 254 { + return "reached field limit" + } + + return reflect.Kind(kind).String() +} + +// parseTypeDefinition is given a buffer which contains the header type definition +// for basic/complex types, and the actual content of those types. +// It returns a fully populated tree of `ditypes.Param` which will be used for parsing +// the actual values +func parseTypeDefinition(b []byte) *ditypes.Param { + stack := newParamStack() + i := 0 + for { + if len(b) < 3 { + return nil + } + newParam := &ditypes.Param{ + Kind: b[i], + Size: binary.LittleEndian.Uint16(b[i+1 : i+3]), + Type: parseKindToString(b[i]), + } + if newParam.Kind == 0 && newParam.Size == 0 { + break + } + i += 3 + if isTypeWithHeader(newParam.Kind) { + stack.push(newParam) + continue + } + + stackCheck: + if stack.isEmpty() { + return newParam + } + top := stack.peek() + top.Fields = append(top.Fields, newParam) + if len(top.Fields) == int(top.Size) || + (reflect.Kind(top.Kind) == reflect.Pointer && len(top.Fields) == 1) { + newParam = stack.pop() + goto stackCheck + } + + } + return nil +} + +// countBufferUsedByTypeDefinition is used to determine that amount of bytes +// that were used to read the type definition. Each individual element of the +// definition uses 3 bytes (1 for kind, 2 for size). This is a needed calculation +// so we know where we should read the actual values in the buffer. +func countBufferUsedByTypeDefinition(root *ditypes.Param) int { + queue := []*ditypes.Param{root} + counter := 0 + for len(queue) != 0 { + front := queue[0] + queue = queue[1:] + counter += 3 + queue = append(queue, front.Fields...) + } + return counter +} + +func isTypeWithHeader(pieceType byte) bool { + return reflect.Kind(pieceType) == reflect.Struct || + reflect.Kind(pieceType) == reflect.Slice || + reflect.Kind(pieceType) == reflect.Array || + reflect.Kind(pieceType) == reflect.Pointer +} + +func parseIndividualValue(paramType byte, paramValueBytes []byte) string { + switch reflect.Kind(paramType) { + case reflect.Uint8: + return fmt.Sprintf("%d", uint8(paramValueBytes[0])) + case reflect.Int8: + return fmt.Sprintf("%d", int8(paramValueBytes[0])) + case reflect.Uint16: + return fmt.Sprintf("%d", byteOrder.Uint16(paramValueBytes)) + case reflect.Int16: + return fmt.Sprintf("%d", int16(byteOrder.Uint16(paramValueBytes))) + case reflect.Uint32: + return fmt.Sprintf("%d", byteOrder.Uint32(paramValueBytes)) + case reflect.Int32: + return fmt.Sprintf("%d", int32(byteOrder.Uint32(paramValueBytes))) + case reflect.Uint64: + return fmt.Sprintf("%d", byteOrder.Uint64(paramValueBytes)) + case reflect.Int64: + return fmt.Sprintf("%d", int64(byteOrder.Uint64(paramValueBytes))) + case reflect.Uint: + return fmt.Sprintf("%d", byteOrder.Uint64(paramValueBytes)) + case reflect.Int: + return fmt.Sprintf("%d", int(byteOrder.Uint64(paramValueBytes))) + case reflect.Pointer: + return fmt.Sprintf("0x%X", byteOrder.Uint64(paramValueBytes)) + case reflect.String: + return string(paramValueBytes) + case reflect.Bool: + if paramValueBytes[0] == 1 { + return "true" + } else { + return "false" + } + case ditypes.KindUnsupported: + return "UNSUPPORTED" + default: + return "" + } +} diff --git a/pkg/dynamicinstrumentation/eventparser/event_parser_test.go b/pkg/dynamicinstrumentation/eventparser/event_parser_test.go new file mode 100644 index 0000000000000..94496b5cd2d0f --- /dev/null +++ b/pkg/dynamicinstrumentation/eventparser/event_parser_test.go @@ -0,0 +1,298 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package eventparser + +import ( + "fmt" + "reflect" + "testing" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" +) + +func TestCountBufferUsedByTypeDefinition(t *testing.T) { + tests := []struct { + name string + param *ditypes.Param + expected int + }{ + { + name: "Struct with nested structs and ints", + param: &ditypes.Param{ + Kind: byte(reflect.Struct), + Size: 2, + Fields: []*ditypes.Param{ + {Kind: byte(reflect.Struct), Size: 2, Fields: []*ditypes.Param{ + {Kind: byte(reflect.Int), Size: 8}, + {Kind: byte(reflect.Int), Size: 8}, + }}, + {Kind: byte(reflect.Int), Size: 8}, + }, + }, + expected: 15, + }, + { + name: "Complex nested structure", + param: &ditypes.Param{ + Type: "slice", Size: 0x2, Kind: 0x17, + Fields: []*ditypes.Param{ + {Type: "struct", Size: 0x2, Kind: 0x19, Fields: []*ditypes.Param{ + {Type: "uint8", Size: 0x1, Kind: 0x8}, + {Type: "struct", Size: 0x2, Kind: 0x19, Fields: []*ditypes.Param{ + {Type: "uint8", Size: 0x1, Kind: 0x8}, + {Type: "uint8", Size: 0x1, Kind: 0x8}, + }}, + }}, + }, + }, + expected: 18, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := countBufferUsedByTypeDefinition(tt.param) + if result != tt.expected { + t.Errorf("Expected %d, got %d", tt.expected, result) + } + }) + } +} + +func TestParseParamValue(t *testing.T) { + tests := []struct { + name string + inputBuffer []byte + inputDefinition *ditypes.Param + expectedValue *ditypes.Param + }{ + { + name: "Basic slice of structs", + inputBuffer: []byte{ + 1, 2, 0, 3, 0, 0, 0, // Content of slice element 1 + 4, 5, 0, 6, 0, 0, 0, // Content of slice element 2 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // Extra padding + }, + inputDefinition: &ditypes.Param{ + Type: "slice", Size: 0x2, Kind: 0x17, + Fields: []*ditypes.Param{ + {Type: "struct", Size: 0x3, Kind: 0x19, Fields: []*ditypes.Param{ + {Type: "uint8", Size: 0x1, Kind: 0x8}, + {Type: "uint16", Size: 0x2, Kind: 0x9}, + {Type: "uint32", Size: 0x4, Kind: 0xa}, + }}, + {Type: "struct", Size: 0x3, Kind: 0x19, Fields: []*ditypes.Param{ + {Type: "uint8", Size: 0x1, Kind: 0x8}, + {Type: "uint16", Size: 0x2, Kind: 0x9}, + {Type: "uint32", Size: 0x4, Kind: 0xa}, + }}, + }, + }, + expectedValue: &ditypes.Param{ + Type: "slice", Size: 0x2, Kind: 0x17, + Fields: []*ditypes.Param{ + {Type: "struct", Size: 0x3, Kind: 0x19, Fields: []*ditypes.Param{ + {ValueStr: "1", Type: "uint8", Size: 0x1, Kind: 0x8}, + {ValueStr: "2", Type: "uint16", Size: 0x2, Kind: 0x9}, + {ValueStr: "3", Type: "uint32", Size: 0x4, Kind: 0xa}, + }}, + {Type: "struct", Size: 0x3, Kind: 0x19, Fields: []*ditypes.Param{ + {ValueStr: "4", Type: "uint8", Size: 0x1, Kind: 0x8}, + {ValueStr: "5", Type: "uint16", Size: 0x2, Kind: 0x9}, + {ValueStr: "6", Type: "uint32", Size: 0x4, Kind: 0xa}, + }}, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + val, _ := parseParamValue(tt.inputDefinition, tt.inputBuffer) + if !reflect.DeepEqual(val, tt.expectedValue) { + t.Errorf("Parsed incorrectly! Got %+v, expected %+v", val, tt.expectedValue) + } + }) + } +} + +func TestReadParams(t *testing.T) { + tests := []struct { + name string + inputBuffer []byte + expectedResult []*ditypes.Param + }{ + { + name: "Basic slice of structs", + inputBuffer: []byte{ + 23, 2, 0, // Slice with 2 elements + 25, 3, 0, // Slice elements are each a struct with 3 fields + 8, 1, 0, // Struct field 1 is a uint8 (size 1) + 9, 2, 0, // Struct field 2 is a uint16 (size 2) + 8, 1, 0, // Struct field 3 is a uint8 (size 1) + 25, 3, 0, // Slice elements are each a struct with 3 fields + 8, 1, 0, // Struct field 1 is a uint8 (size 1) + 9, 2, 0, // Struct field 2 is a uint16 (size 2) + 8, 1, 0, // Struct field 3 is a uint8 (size 1) + 1, 2, 0, 3, // Content of slice element 1 (not relevant for this function) + 4, 5, 0, 6, // Content of slice element 2 (not relevant for this function) + // Padding + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, + }, + expectedResult: []*ditypes.Param{{ + Type: "slice", Size: 0x2, Kind: 0x17, + Fields: []*ditypes.Param{ + {Type: "struct", Size: 0x3, Kind: 0x19, Fields: []*ditypes.Param{ + {ValueStr: "1", Type: "uint8", Size: 0x1, Kind: 0x8}, + {ValueStr: "2", Type: "uint16", Size: 0x2, Kind: 0x9}, + {ValueStr: "3", Type: "uint8", Size: 0x1, Kind: 0x8}, + }}, + {Type: "struct", Size: 0x3, Kind: 0x19, Fields: []*ditypes.Param{ + {ValueStr: "4", Type: "uint8", Size: 0x1, Kind: 0x8}, + {ValueStr: "5", Type: "uint16", Size: 0x2, Kind: 0x9}, + {ValueStr: "6", Type: "uint8", Size: 0x1, Kind: 0x8}, + }}, + }, + }}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + output := readParams(tt.inputBuffer) + if !reflect.DeepEqual(output, tt.expectedResult) { + fmt.Printf("Got: %v\n", output) + fmt.Printf("Expected: %v\n", tt.expectedResult) + t.Errorf("Didn't read correctly!") + } + }) + } +} +func TestParseTypeDefinition(t *testing.T) { + tests := []struct { + name string + inputBuffer []byte + expectedResult *ditypes.Param + }{ + { + name: "Slice of structs with uint8 and uint16 fields", + inputBuffer: []byte{ + 23, 2, 0, // Slice with 2 elements + + 25, 3, 0, // Slice elements are each a struct with 3 fields + + 8, 1, 0, // Struct field 1 is a uint8 (size 1) + 9, 2, 0, // Struct field 2 is a uint16 (size 2) + 8, 1, 0, // Struct field 3 is a uint8 (size 1) + + 25, 3, 0, // Slice elements are each a struct with 3 fields + + 8, 1, 0, // Struct field 1 is a uint8 (size 1) + 9, 2, 0, // Struct field 2 is a uint16 (size 2) + 8, 1, 0, // Struct field 3 is a uint8 (size 1) + + // Padding + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + }, + expectedResult: &ditypes.Param{ + Type: "slice", Size: 0x2, Kind: 0x17, + Fields: []*ditypes.Param{ + { + Type: "struct", Size: 0x3, Kind: 0x19, + Fields: []*ditypes.Param{ + {Type: "uint8", Size: 0x1, Kind: 0x8}, + {Type: "uint16", Size: 0x2, Kind: 0x9}, + {Type: "uint8", Size: 0x1, Kind: 0x8}, + }, + }, + { + Type: "struct", Size: 0x3, Kind: 0x19, + Fields: []*ditypes.Param{ + {Type: "uint8", Size: 0x1, Kind: 0x8}, + {Type: "uint16", Size: 0x2, Kind: 0x9}, + {Type: "uint8", Size: 0x1, Kind: 0x8}, + }, + }, + }, + }, + }, + { + name: "Nested struct fields", + inputBuffer: []byte{ + 23, 2, 0, // Slice with 2 elements + 25, 4, 0, // Slice elements are each a struct with 2 fields + 8, 1, 0, // Struct field 1 is a uint8 (size 1) + 8, 1, 0, // Struct field 2 is a uint8 (size 1) + 8, 1, 0, // Struct field 3 is a uint8 (size 1) + 25, 2, 0, // Struct field 4 is a struct with 2 fields + 8, 1, 0, // Nested struct field 1 is a uint8 (size 1) + 8, 1, 0, // Nested struct field 2 is a uint8 (size 1) + 25, 4, 0, // Slice elements are each a struct with 2 fields + 8, 1, 0, // Struct field 1 is a uint8 (size 1) + 8, 1, 0, // Struct field 2 is a uint8 (size 1) + 8, 1, 0, // Struct field 3 is a uint8 (size 1) + 25, 2, 0, // Struct field 4 is a struct with 2 fields + 8, 1, 0, // Nested struct field 1 is a uint8 (size 1) + 8, 1, 0, // Nested struct field 2 is a uint8 (size 1) + 1, 2, 3, // Content of slice element 1 (top-level uint8, then 2 second tier uint8s) + 4, 5, 6, // Content of slice element 2 (top-level uint8, then 2 second tier uint8s) + // Padding + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + }, + expectedResult: &ditypes.Param{ + Type: "slice", Size: 0x2, Kind: 0x17, + Fields: []*ditypes.Param{ + { + Type: "struct", Size: 0x4, Kind: 0x19, + Fields: []*ditypes.Param{ + {Type: "uint8", Size: 0x1, Kind: 0x8}, + {Type: "uint8", Size: 0x1, Kind: 0x8}, + {Type: "uint8", Size: 0x1, Kind: 0x8}, + { + Type: "struct", Size: 0x2, Kind: 0x19, + Fields: []*ditypes.Param{ + {Type: "uint8", Size: 0x1, Kind: 0x8}, + {Type: "uint8", Size: 0x1, Kind: 0x8}, + }, + }, + }, + }, + { + Type: "struct", Size: 0x4, Kind: 0x19, + Fields: []*ditypes.Param{ + {Type: "uint8", Size: 0x1, Kind: 0x8}, + {Type: "uint8", Size: 0x1, Kind: 0x8}, + {Type: "uint8", Size: 0x1, Kind: 0x8}, + { + Type: "struct", Size: 0x2, Kind: 0x19, + Fields: []*ditypes.Param{ + {Type: "uint8", Size: 0x1, Kind: 0x8}, + {Type: "uint8", Size: 0x1, Kind: 0x8}, + }, + }, + }, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + typeDefinition := parseTypeDefinition(tt.inputBuffer) + if !reflect.DeepEqual(typeDefinition, tt.expectedResult) { + fmt.Printf("%v\n", typeDefinition) + fmt.Printf("%v\n", tt.expectedResult) + t.Errorf("Not equal!") + } + }) + } +} diff --git a/pkg/dynamicinstrumentation/eventparser/param_stack.go b/pkg/dynamicinstrumentation/eventparser/param_stack.go new file mode 100644 index 0000000000000..b2359951ca25a --- /dev/null +++ b/pkg/dynamicinstrumentation/eventparser/param_stack.go @@ -0,0 +1,45 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package eventparser + +import ( + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" +) + +type paramStack struct { + arr []*ditypes.Param +} + +func newParamStack() *paramStack { + s := paramStack{arr: []*ditypes.Param{}} + return &s +} + +func (s *paramStack) isEmpty() bool { + return len(s.arr) == 0 +} + +func (s *paramStack) pop() *ditypes.Param { + if s.isEmpty() { + return nil + } + top := s.peek() + s.arr = s.arr[0 : len(s.arr)-1] + return top +} + +func (s *paramStack) peek() *ditypes.Param { + if s.isEmpty() { + return nil + } + return s.arr[len(s.arr)-1] +} + +func (s *paramStack) push(p *ditypes.Param) { + s.arr = append(s.arr, p) +} diff --git a/pkg/dynamicinstrumentation/config.go b/pkg/dynamicinstrumentation/module/config.go similarity index 96% rename from pkg/dynamicinstrumentation/config.go rename to pkg/dynamicinstrumentation/module/config.go index 8265cf8d5a3f1..fa8c7530d2242 100644 --- a/pkg/dynamicinstrumentation/config.go +++ b/pkg/dynamicinstrumentation/module/config.go @@ -3,7 +3,9 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package dynamicinstrumentation +//go:build linux_bpf + +package module import ( "github.com/DataDog/datadog-agent/cmd/system-probe/config" diff --git a/pkg/dynamicinstrumentation/doc.go b/pkg/dynamicinstrumentation/module/doc.go similarity index 51% rename from pkg/dynamicinstrumentation/doc.go rename to pkg/dynamicinstrumentation/module/doc.go index 026de960a20b0..145cc294d401c 100644 --- a/pkg/dynamicinstrumentation/doc.go +++ b/pkg/dynamicinstrumentation/module/doc.go @@ -3,6 +3,9 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -// Package dynamicinstrumentation encapsulates a system-probe module which uses uprobes and bpf -// to exfiltrate data from running processes -package dynamicinstrumentation +//go:build linux_bpf + +// Package module encapsulates a system-probe module which uses uprobes and bpf +// to exfiltrate data from running processes. This is the Go implementation of +// the dynamic instrumentation product. +package module diff --git a/pkg/dynamicinstrumentation/module/module.go b/pkg/dynamicinstrumentation/module/module.go new file mode 100644 index 0000000000000..c5cbfced2b919 --- /dev/null +++ b/pkg/dynamicinstrumentation/module/module.go @@ -0,0 +1,73 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package module + +import ( + "net/http" + + "github.com/DataDog/datadog-agent/cmd/system-probe/api/module" + "github.com/DataDog/datadog-agent/cmd/system-probe/utils" + coreconfig "github.com/DataDog/datadog-agent/pkg/config/setup" + "github.com/DataDog/datadog-agent/pkg/util/log" + + di "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation" +) + +// Module is the dynamic instrumentation system probe module +type Module struct { + godi *di.GoDI +} + +// NewModule creates a new dynamic instrumentation system probe module +func NewModule(config *Config) (*Module, error) { + godi, err := di.RunDynamicInstrumentation(&di.DIOptions{ + Offline: coreconfig.SystemProbe().GetBool("dynamic_instrumentation.offline_mode"), + ProbesFilePath: coreconfig.SystemProbe().GetString("dynamic_instrumentation.probes_file_path"), + SnapshotOutput: coreconfig.SystemProbe().GetString("dynamic_instrumentation.snapshot_output_file_path"), + DiagnosticOutput: coreconfig.SystemProbe().GetString("dynamic_instrumentation.diagnostics_output_file_path"), + }) + if err != nil { + return nil, err + } + return &Module{godi}, nil +} + +// Close disables the dynamic instrumentation system probe module +func (m *Module) Close() { + if m.godi == nil { + log.Info("Could not close dynamic instrumentation module, already closed") + return + } + log.Info("Closing dynamic instrumentation module") + m.godi.Close() +} + +// GetStats returns a map of various metrics about the state of the module +func (m *Module) GetStats() map[string]interface{} { + if m == nil || m.godi == nil { + log.Info("Could not get stats from dynamic instrumentation module, closed") + return map[string]interface{}{} + } + debug := map[string]interface{}{} + stats := m.godi.GetStats() + debug["PIDEventsCreated"] = stats.PIDEventsCreatedCount + debug["ProbeEventsCreated"] = stats.ProbeEventsCreatedCount + return debug +} + +// Register creates a health check endpoint for the dynamic instrumentation module +func (m *Module) Register(httpMux *module.Router) error { + httpMux.HandleFunc("/check", utils.WithConcurrencyLimit(utils.DefaultMaxConcurrentRequests, + func(w http.ResponseWriter, req *http.Request) { + stats := []string{} + utils.WriteAsJSON(w, stats) + })) + + log.Info("Registering dynamic instrumentation module") + return nil +} diff --git a/pkg/dynamicinstrumentation/module_linux.go b/pkg/dynamicinstrumentation/module/module_stub.go similarity index 55% rename from pkg/dynamicinstrumentation/module_linux.go rename to pkg/dynamicinstrumentation/module/module_stub.go index 193e8a90646ac..83956088c9466 100644 --- a/pkg/dynamicinstrumentation/module_linux.go +++ b/pkg/dynamicinstrumentation/module/module_stub.go @@ -3,34 +3,44 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package dynamicinstrumentation +//go:build !linux_bpf + +// Package module provides the dynamic instrumentaiton module. This is a stub meaning +// this empty file is used if the target platform does not support features required +// by dynamic instrumentation. +package module import ( "github.com/DataDog/datadog-agent/cmd/system-probe/api/module" - "github.com/DataDog/datadog-agent/pkg/util/log" + sysconfigtypes "github.com/DataDog/datadog-agent/cmd/system-probe/config/types" ) //nolint:revive // TODO(DEBUG) Fix revive linter -type Module struct{} +type Config struct{} //nolint:revive // TODO(DEBUG) Fix revive linter -func NewModule(config *Config) (*Module, error) { - return &Module{}, nil +func NewConfig(_ *sysconfigtypes.Config) (*Config, error) { + return &Config{}, nil } //nolint:revive // TODO(DEBUG) Fix revive linter -func (m *Module) Close() { - log.Info("Closing user tracer module") +type Module struct { } +//nolint:revive // TODO(DEBUG) Fix revive linter +func NewModule(config *Config) (*Module, error) { + return nil, nil +} + +//nolint:revive // TODO(DEBUG) Fix revive linter +func (m *Module) Close() {} + //nolint:revive // TODO(DEBUG) Fix revive linter func (m *Module) GetStats() map[string]interface{} { - debug := map[string]interface{}{} - return debug + return nil } //nolint:revive // TODO(DEBUG) Fix revive linter func (m *Module) Register(_ *module.Router) error { - log.Info("Registering dynamic instrumentation module") return nil } diff --git a/pkg/dynamicinstrumentation/proctracker/proctracker.go b/pkg/dynamicinstrumentation/proctracker/proctracker.go new file mode 100644 index 0000000000000..f03d86c17efaa --- /dev/null +++ b/pkg/dynamicinstrumentation/proctracker/proctracker.go @@ -0,0 +1,251 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +// Package proctracker provides a facility for Dynamic Instrumentation to discover +// and track the lifecycle of processes running on the same host +package proctracker + +import ( + "debug/elf" + "errors" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + + "github.com/DataDog/datadog-agent/pkg/util/log" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/link" + + "github.com/DataDog/datadog-agent/pkg/network/go/bininspect" + "github.com/DataDog/datadog-agent/pkg/network/go/binversion" + "github.com/DataDog/datadog-agent/pkg/process/monitor" + "github.com/DataDog/datadog-agent/pkg/security/secl/model" + "github.com/DataDog/datadog-agent/pkg/security/utils" + "github.com/DataDog/datadog-agent/pkg/util/kernel" + "golang.org/x/sys/unix" +) + +type processTrackerCallback func(ditypes.DIProcs) + +// ProcessTracker is adapted from https://github.com/DataDog/datadog-agent/blob/main/pkg/network/protocols/http/ebpf_gotls.go +type ProcessTracker struct { + procRoot string + lock sync.RWMutex + pm *monitor.ProcessMonitor + processes processes + binaries binaries + callback processTrackerCallback + unsubscribe []func() +} + +// NewProcessTracker creates a new ProcessTracer +func NewProcessTracker(callback processTrackerCallback) *ProcessTracker { + pt := ProcessTracker{ + pm: monitor.GetProcessMonitor(), + procRoot: kernel.ProcFSRoot(), + callback: callback, + binaries: make(map[binaryID]*runningBinary), + processes: make(map[pid]binaryID), + } + return &pt +} + +// Start subscribes to exec and exit events so dynamic instrumentation can be made +// aware of new processes that may need to be instrumented or instrumented processes +// that should no longer be instrumented +func (pt *ProcessTracker) Start() error { + + unsubscribeExec := pt.pm.SubscribeExec(pt.handleProcessStart) + unsubscribeExit := pt.pm.SubscribeExit(pt.handleProcessStop) + + pt.unsubscribe = append(pt.unsubscribe, unsubscribeExec) + pt.unsubscribe = append(pt.unsubscribe, unsubscribeExit) + + err := pt.pm.Initialize(false) + if err != nil { + return err + } + + return nil +} + +// Stop unsubscribes from exec and exit events +func (pt *ProcessTracker) Stop() { + for _, unsubscribe := range pt.unsubscribe { + unsubscribe() + } +} + +func (pt *ProcessTracker) handleProcessStart(pid uint32) { + exePath := filepath.Join(pt.procRoot, strconv.FormatUint(uint64(pid), 10), "exe") + + go pt.inspectBinary(exePath, pid) +} + +func (pt *ProcessTracker) handleProcessStop(pid uint32) { + pt.unregisterProcess(pid) +} + +func (pt *ProcessTracker) inspectBinary(exePath string, pid uint32) { + serviceName := getServiceName(pid) + if serviceName == "" { + // if the expected env vars are not set we don't inspect the binary + return + } + log.Info("Found instrumentation candidate", serviceName) + // binPath, err := os.Readlink(exePath) + // if err != nil { + // // /proc could be slow to update so we retry a few times + // end := time.Now().Add(10 * time.Millisecond) + // for end.After(time.Now()) { + // binPath, err = os.Readlink(exePath) + // if err == nil { + // break + // } + // time.Sleep(time.Millisecond) + // } + // } + // if err != nil { + // // we can't access the binary path here (pid probably ended already) + // // there is not much we can do, and we don't want to flood the logs + // log.Infof("cannot follow link %s -> %s, %s", exePath, binPath, err) + // // in docker, following the symlink does not work, but we can open the file in /proc + // // if we can't follow the symlink we try to open /proc directly + // // TODO: validate this approach + // binPath = exePath + // } + + // TODO: switch to using exePath for the demo, use conditional logic above moving forward + binPath := exePath + f, err := os.Open(exePath) + if err != nil { + // this should be a debug log, but we want to know if this happens + log.Infof("could not open file %s, %s", binPath, err) + return + } + defer f.Close() + + elfFile, err := elf.NewFile(f) + if err != nil { + log.Infof("file %s could not be parsed as an ELF file: %s", binPath, err) + return + } + + noFuncs := make(map[string]bininspect.FunctionConfiguration) + noStructs := make(map[bininspect.FieldIdentifier]bininspect.StructLookupFunction) + _, err = bininspect.InspectNewProcessBinary(elfFile, noFuncs, noStructs) + if errors.Is(err, binversion.ErrNotGoExe) { + return + } + if err != nil { + log.Infof("error reading exe: %s", err) + return + } + + var stat syscall.Stat_t + if err = syscall.Stat(binPath, &stat); err != nil { + log.Infof("could not stat binary path %s: %s", binPath, err) + return + } + binID := binaryID{ + Id_major: unix.Major(stat.Dev), + Id_minor: unix.Minor(stat.Dev), + Ino: stat.Ino, + } + pt.registerProcess(binID, pid, stat.Mtim, binPath, serviceName) +} + +func (pt *ProcessTracker) registerProcess(binID binaryID, pid pid, mTime syscall.Timespec, binaryPath string, serviceName string) { + pt.lock.Lock() + defer pt.lock.Unlock() + + pt.processes[pid] = binID + if bin, ok := pt.binaries[binID]; ok { + // process that uses this binary already exists + bin.processCount++ + } else { + + pt.binaries[binID] = &runningBinary{ + binID: binID, + mTime: mTime, + processCount: 1, + binaryPath: binaryPath, + serviceName: serviceName, + } + } + state := pt.currentState() + pt.callback(state) +} + +func getServiceName(pid uint32) string { + envVars, _, err := utils.EnvVars([]string{"DD"}, pid, model.MaxArgsEnvsSize) + if err != nil { + return "" + } + + serviceName := "" + diEnabled := false + for _, envVar := range envVars { + parts := strings.SplitN(envVar, "=", 2) + if len(parts) == 2 && parts[0] == "DD_SERVICE" { + serviceName = parts[1] + } + if len(parts) == 2 && parts[0] == "DD_DYNAMIC_INSTRUMENTATION_ENABLED" { + diEnabled = parts[1] == "true" + } + } + + if !diEnabled { + return "" + } + return serviceName +} + +func (pt *ProcessTracker) unregisterProcess(pid pid) { + pt.lock.Lock() + defer pt.lock.Unlock() + + binID, ok := pt.processes[pid] + if !ok { + return + } + delete(pt.processes, pid) + + bin, ok := pt.binaries[binID] + if !ok { + return + } + bin.processCount-- + if bin.processCount == 0 { + delete(pt.binaries, binID) + state := pt.currentState() + pt.callback(state) + } +} + +func (pt *ProcessTracker) currentState() map[ditypes.PID]*ditypes.ProcessInfo { + state := make(map[ditypes.PID]*ditypes.ProcessInfo) + + for pid, binID := range pt.processes { + bin := pt.binaries[binID] + state[pid] = &ditypes.ProcessInfo{ + PID: pid, + BinaryPath: bin.binaryPath, + ServiceName: bin.serviceName, + + ProbesByID: make(map[ditypes.ProbeID]*ditypes.Probe), + InstrumentationUprobes: make(map[ditypes.ProbeID]*link.Link), + InstrumentationObjects: make(map[ditypes.ProbeID]*ebpf.Collection), + } + } + return state +} diff --git a/pkg/dynamicinstrumentation/proctracker/types.go b/pkg/dynamicinstrumentation/proctracker/types.go new file mode 100644 index 0000000000000..a377cbef780d8 --- /dev/null +++ b/pkg/dynamicinstrumentation/proctracker/types.go @@ -0,0 +1,43 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package proctracker + +import ( + "syscall" + + "github.com/DataDog/datadog-agent/pkg/network/protocols/http/gotls" +) + +type pid = uint32 + +type binaryID = gotls.TlsBinaryId + +type runningBinary struct { + // Inode number of the binary + binID binaryID + + // Modification time of the hooked binary, at the time of hooking. + mTime syscall.Timespec + + // Reference counter for the number of currently running processes for + // this binary. + processCount int32 + + // The location of the binary on the filesystem, as a string. + binaryPath string + + // The value of DD_SERVICE for the given binary. + // Associating a service name with a binary is not correct because + // we may have the same binary running with different service names + // on the same machine. However, for simplicity in the prototype we + // assume a 1:1 mapping. + serviceName string +} + +type binaries map[binaryID]*runningBinary +type processes map[pid]binaryID diff --git a/pkg/dynamicinstrumentation/ratelimiter/ratelimit.go b/pkg/dynamicinstrumentation/ratelimiter/ratelimit.go new file mode 100644 index 0000000000000..0283c526c5c05 --- /dev/null +++ b/pkg/dynamicinstrumentation/ratelimiter/ratelimit.go @@ -0,0 +1,92 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +// Package ratelimiter implements a simple rate limiter used for tracking and limiting +// the rate of events being produced per probe +package ratelimiter + +import ( + "math" + + "golang.org/x/time/rate" +) + +// SingleRateLimiter is a wrapper on top of golang.org/x/time/rate which implements a rate limiter but also +// returns the effective rate of allowance. +type SingleRateLimiter struct { + rate float64 + limiter *rate.Limiter + droppedEvents int64 + successfulEvents int64 +} + +// MultiProbeRateLimiter is used for tracking and limiting the rate of events +// being produced for multiple probes +type MultiProbeRateLimiter struct { + defaultRate float64 + x map[string]*SingleRateLimiter +} + +// NewMultiProbeRateLimiter creates a new MultiProbeRateLimiter +func NewMultiProbeRateLimiter(defaultRatePerSecond float64) *MultiProbeRateLimiter { + return &MultiProbeRateLimiter{ + defaultRate: defaultRatePerSecond, + x: map[string]*SingleRateLimiter{}, + } +} + +// SetRate sets the rate for events with a specific ID. Specify mps=0 to +// disable rate limiting. +func (mr *MultiProbeRateLimiter) SetRate(id string, mps float64) { + mr.x[id] = NewSingleEventRateLimiter(mps) +} + +// AllowOneEvent is called to determine if an event should be allowed according to +// the configured rate limit. It returns a bool to say allowed or not, then the number +// of dropped events, and then the number of successful events +func (mr *MultiProbeRateLimiter) AllowOneEvent(id string) (bool, int64, int64) { + rateLimiter, ok := mr.x[id] + if !ok { + mr.SetRate(id, mr.defaultRate) + rateLimiter = mr.x[id] + } + return rateLimiter.AllowOneEvent(), + rateLimiter.droppedEvents, rateLimiter.successfulEvents +} + +// NewSingleEventRateLimiter returns a rate limiter which restricts the number of single events sampled per second. +// This defaults to infinite, allow all behaviour. The MaxPerSecond value of the rule may override the default. +func NewSingleEventRateLimiter(mps float64) *SingleRateLimiter { + limit := math.MaxFloat64 + if mps > 0 { + limit = mps + } + return &SingleRateLimiter{ + rate: mps, + limiter: rate.NewLimiter(rate.Limit(limit), int(math.Ceil(limit))), + } +} + +// AllowOneEvent returns the rate limiter's decision to allow an event to be processed, and the +// effective rate at the time it is called. The effective rate is computed by averaging the rate +// for the previous second with the current rate +func (r *SingleRateLimiter) AllowOneEvent() bool { + + if r.rate == 0 { + return true + } + + var sampled = false + if r.limiter.Allow() { + sampled = true + r.successfulEvents++ + } else { + r.droppedEvents++ + } + + return sampled +} diff --git a/pkg/dynamicinstrumentation/ratelimiter/ratelimit_test.go b/pkg/dynamicinstrumentation/ratelimiter/ratelimit_test.go new file mode 100644 index 0000000000000..88cc21aa199a0 --- /dev/null +++ b/pkg/dynamicinstrumentation/ratelimiter/ratelimit_test.go @@ -0,0 +1,48 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package ratelimiter + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRateLimit(t *testing.T) { + + testCases := []struct { + name string + limitPerSecond float64 + }{ + { + name: "expected1", + limitPerSecond: 1.0, + }, + { + name: "expected2", + limitPerSecond: 5.0, + }, + } + + for _, testcase := range testCases { + + const timesToRun = 10000 + t.Run(testcase.name, func(t *testing.T) { + + r := NewSingleEventRateLimiter(testcase.limitPerSecond) + + for i := 0; i < timesToRun; i++ { + r.AllowOneEvent() + } + + assert.Equal(t, float64(timesToRun-float64(r.droppedEvents)), testcase.limitPerSecond) + assert.Equal(t, float64(r.droppedEvents), timesToRun-testcase.limitPerSecond) + assert.Equal(t, float64(r.successfulEvents), testcase.limitPerSecond) + }) + } +} diff --git a/pkg/dynamicinstrumentation/ringbufconsumer.go b/pkg/dynamicinstrumentation/ringbufconsumer.go new file mode 100644 index 0000000000000..ed6c574377ce5 --- /dev/null +++ b/pkg/dynamicinstrumentation/ringbufconsumer.go @@ -0,0 +1,64 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package dynamicinstrumentation + +import ( + "fmt" + + "github.com/DataDog/datadog-agent/pkg/util/log" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/eventparser" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ratelimiter" + "github.com/cilium/ebpf/ringbuf" +) + +// startRingbufferConsumer opens the pinned bpf ringbuffer map +func (goDI *GoDI) startRingbufferConsumer() (func(), error) { + r, err := ringbuf.NewReader(ditypes.EventsRingbuffer) + if err != nil { + return nil, fmt.Errorf("couldn't set up reader for ringbuffer: %w", err) + } + + var ( + record ringbuf.Record + closed = false + ) + + closeFunc := func() { + closed = true + r.Close() + } + + // TODO: ensure rate limiters are removed once probes are removed + rateLimiters := ratelimiter.NewMultiProbeRateLimiter(1.0) + rateLimiters.SetRate(ditypes.ConfigBPFProbeID, 0) + + go func() { + for { + if closed { + break + } + err = r.ReadInto(&record) + if err != nil { + log.Infof("couldn't read event off ringbuffer: %s", err.Error()) + continue + } + + event := eventparser.ParseEvent(record.RawSample, rateLimiters) + if event == nil { + continue + } + goDI.stats.PIDEventsCreatedCount[event.PID]++ + goDI.stats.ProbeEventsCreatedCount[event.ProbeID]++ + goDI.processEvent(event) + } + }() + + return closeFunc, nil +} diff --git a/pkg/dynamicinstrumentation/uploader/di_log_converter.go b/pkg/dynamicinstrumentation/uploader/di_log_converter.go new file mode 100644 index 0000000000000..8f6d3063dc7bf --- /dev/null +++ b/pkg/dynamicinstrumentation/uploader/di_log_converter.go @@ -0,0 +1,159 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package uploader + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/DataDog/datadog-agent/pkg/util/log" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" + + "github.com/google/uuid" +) + +// NewDILog creates a new snapshot upload based on the event and relevant process +func NewDILog(procInfo *ditypes.ProcessInfo, event *ditypes.DIEvent) *ditypes.SnapshotUpload { + if procInfo == nil { + log.Infof("Process with pid %d not found, ignoring event", event.PID) + return nil + } + probe := procInfo.GetProbe(event.ProbeID) + if probe == nil { + log.Info("Probe ID not found, ignoring event", event.ProbeID) + return nil + } + + snapshotID, _ := uuid.NewUUID() + argDefs := getFunctionArguments(procInfo, probe) + var captures ditypes.Captures + if probe.InstrumentationInfo.InstrumentationOptions.CaptureParameters { + captures = convertCaptures(argDefs, event.Argdata) + } else { + captures = reportCaptureError(argDefs) + } + + capturesJSON, _ := json.Marshal(captures) + stackTrace, err := parseStackTrace(procInfo, event.StackPCs) + if err != nil { + log.Infof("event from pid/probe %d/%s does not include stack trace: %s\n", event.PID, event.ProbeID, err) + } + return &ditypes.SnapshotUpload{ + Service: probe.ServiceName, + Message: fmt.Sprintf("%s %s", probe.FuncName, capturesJSON), + DDSource: "dd_debugger", + DDTags: "", + Debugger: struct { + ditypes.Snapshot `json:"snapshot"` + }{ + Snapshot: ditypes.Snapshot{ + ID: &snapshotID, + Timestamp: time.Now().UnixNano() / int64(time.Millisecond), + Language: "go", + ProbeInSnapshot: convertProbe(probe), + Captures: captures, + Stack: stackTrace, + }, + }, + Duration: 0, + } +} + +func convertProbe(probe *ditypes.Probe) ditypes.ProbeInSnapshot { + module, function := parseFuncName(probe.FuncName) + return ditypes.ProbeInSnapshot{ + ID: getProbeUUID(probe.ID), + ProbeLocation: ditypes.ProbeLocation{ + Method: function, + Type: module, + }, + } +} + +func convertCaptures(defs []ditypes.Parameter, captures []*ditypes.Param) ditypes.Captures { + return ditypes.Captures{ + Entry: &ditypes.Capture{ + Arguments: convertArgs(defs, captures), + }, + } +} + +func reportCaptureError(defs []ditypes.Parameter) ditypes.Captures { + args := make(map[string]*ditypes.CapturedValue) + for _, def := range defs { + args[def.Name] = &ditypes.CapturedValue{ + Type: def.Type, + NotCapturedReason: "Failed to instrument, type is unsupported or too complex", + } + } + return ditypes.Captures{ + Entry: &ditypes.Capture{ + Arguments: args, + }, + } +} + +func convertArgs(defs []ditypes.Parameter, captures []*ditypes.Param) map[string]*ditypes.CapturedValue { + args := make(map[string]*ditypes.CapturedValue) + for idx, capture := range captures { + var argName string + if idx < len(defs) { + argName = defs[idx].Name + } else { + argName = fmt.Sprintf("arg_%d", idx) + } + + if capture == nil { + continue + } + + cv := &ditypes.CapturedValue{Type: capture.Type} + if capture.ValueStr != "" || capture.Type == "string" { + // we make a copy of the string so the pointer isn't overwritten in the loop + valueCopy := capture.ValueStr + cv.Value = &valueCopy + } + if capture.Fields != nil && idx < len(defs) { + cv.Fields = convertArgs(defs[idx].ParameterPieces, capture.Fields) + } + args[argName] = cv + } + return args +} + +func parseFuncName(funcName string) (string, string) { + parts := strings.Split(funcName, ".") + if len(parts) == 2 { + return parts[0], parts[1] + } + return "", funcName +} + +func getFunctionArguments(proc *ditypes.ProcessInfo, probe *ditypes.Probe) []ditypes.Parameter { + return proc.TypeMap.Functions[probe.FuncName] +} + +func getProbeUUID(probeID string) string { + // the RC config ID format is datadog///_/ + // if we fail to parse it, we just return the original probeID string + parts := strings.Split(probeID, "/") + if len(parts) != 5 { + return probeID + } + idPart := parts[len(parts)-2] + parts = strings.Split(idPart, "_") + if len(parts) != 2 { + return probeID + } + // we could also validate that the extracted string is a valid UUID, + // but it's not necessary since we tolerate IDs that don't parse + return parts[1] +} diff --git a/pkg/dynamicinstrumentation/uploader/offline.go b/pkg/dynamicinstrumentation/uploader/offline.go new file mode 100644 index 0000000000000..a1d19375ebf47 --- /dev/null +++ b/pkg/dynamicinstrumentation/uploader/offline.go @@ -0,0 +1,83 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package uploader + +import ( + "encoding/json" + "os" + "sync" + + "github.com/DataDog/datadog-agent/pkg/util/log" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/diagnostics" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" +) + +// OfflineSerializer is used for serializing events and printing instead of +// uploading to the DataDog backend +type OfflineSerializer[T any] struct { + outputFile *os.File + mu sync.Mutex +} + +// NewOfflineLogSerializer creates an offline serializer for serializing events and printing instead of +// uploading to the DataDog backend +func NewOfflineLogSerializer(outputPath string) (*OfflineSerializer[ditypes.SnapshotUpload], error) { + if outputPath == "" { + panic("No snapshot output path set") + } + return NewOfflineSerializer[ditypes.SnapshotUpload](outputPath) +} + +// NewOfflineDiagnosticSerializer creates an offline serializer for serializing diagnostic information +// and printing instead of uploading to the DataDog backend +func NewOfflineDiagnosticSerializer(dm *diagnostics.DiagnosticManager, outputPath string) (*OfflineSerializer[ditypes.DiagnosticUpload], error) { + if outputPath == "" { + panic("No diagnostic output path set") + } + ds, err := NewOfflineSerializer[ditypes.DiagnosticUpload](outputPath) + if err != nil { + return nil, err + } + go func() { + for diagnostic := range dm.Updates { + ds.Enqueue(diagnostic) + } + }() + return ds, nil +} + +// NewOfflineSerializer is the generic create method for offline serialization +// of events or diagnostic output +func NewOfflineSerializer[T any](outputPath string) (*OfflineSerializer[T], error) { + file, err := os.OpenFile(outputPath, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0644) + if err != nil { + return nil, err + } + u := &OfflineSerializer[T]{ + outputFile: file, + } + return u, nil +} + +// Enqueue writes data to the offline serializer +func (s *OfflineSerializer[T]) Enqueue(item *T) bool { + s.mu.Lock() + defer s.mu.Unlock() + bs, err := json.Marshal(item) + if err != nil { + log.Info("Failed to marshal item", item) + return false + } + + _, err = s.outputFile.WriteString(string(bs) + "\n") + if err != nil { + log.Error(err) + } + return true +} diff --git a/pkg/dynamicinstrumentation/uploader/stack_trace.go b/pkg/dynamicinstrumentation/uploader/stack_trace.go new file mode 100644 index 0000000000000..f428e2c40e0d2 --- /dev/null +++ b/pkg/dynamicinstrumentation/uploader/stack_trace.go @@ -0,0 +1,151 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package uploader + +import ( + "cmp" + "debug/dwarf" + "errors" + "fmt" + "slices" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" +) + +// parseStackTrace parses a raw byte array into 10 uint64 program counters +// which then get resolved into strings representing lines of a stack trace +func parseStackTrace(procInfo *ditypes.ProcessInfo, rawProgramCounters []uint64) ([]ditypes.StackFrame, error) { + stackTrace := make([]ditypes.StackFrame, 0) + if procInfo == nil { + return stackTrace, errors.New("nil process info") + } + + for i := range rawProgramCounters { + if rawProgramCounters[i] == 0 { + break + } + + entries, ok := procInfo.TypeMap.InlinedFunctions[rawProgramCounters[i]] + if ok { + for n := range entries { + inlinedFuncInfo, err := pcToLine(procInfo, rawProgramCounters[i]) + if err != nil { + return stackTrace, fmt.Errorf("could not resolve pc to inlined function info: %w", err) + } + + symName, lineNumber, err := parseInlinedEntry(procInfo.DwarfData.Reader(), entries[n]) + if err != nil { + return stackTrace, fmt.Errorf("could not get inlined entries: %w", err) + } + stackFrame := ditypes.StackFrame{Function: fmt.Sprintf("%s [inlined in %s]", symName, inlinedFuncInfo.fn), FileName: inlinedFuncInfo.file, Line: int(lineNumber)} + stackTrace = append(stackTrace, stackFrame) + } + } + + funcInfo, err := pcToLine(procInfo, rawProgramCounters[i]) + if err != nil { + return stackTrace, fmt.Errorf("could not resolve pc to function info: %w", err) + } + stackFrame := ditypes.StackFrame{Function: funcInfo.fn, FileName: funcInfo.file, Line: int(funcInfo.line)} + stackTrace = append(stackTrace, stackFrame) + + if funcInfo.fn == "main.main" { + break + } + } + return stackTrace, nil +} + +type funcInfo struct { + file string + line int64 + fn string +} + +func pcToLine(procInfo *ditypes.ProcessInfo, pc uint64) (*funcInfo, error) { + + var ( + file string + line int64 + fn string + ) + + typeMap := procInfo.TypeMap + + functionIndex, _ := slices.BinarySearchFunc(typeMap.FunctionsByPC, &ditypes.LowPCEntry{LowPC: pc}, func(a, b *ditypes.LowPCEntry) int { + return cmp.Compare(b.LowPC, a.LowPC) + }) + + var fileNumber int64 + + if functionIndex >= len(typeMap.FunctionsByPC) { + return nil, fmt.Errorf("invalid function index") + } + funcEntry := typeMap.FunctionsByPC[functionIndex].Entry + for _, field := range funcEntry.Field { + if field.Attr == dwarf.AttrName { + fn = field.Val.(string) + } + if field.Attr == dwarf.AttrDeclFile { + fileNumber = field.Val.(int64) + } + if field.Attr == dwarf.AttrDeclLine { + line = field.Val.(int64) + } + } + + compileUnitIndex, _ := slices.BinarySearchFunc(typeMap.DeclaredFiles, &ditypes.LowPCEntry{LowPC: pc}, func(a, b *ditypes.LowPCEntry) int { + return cmp.Compare(b.LowPC, a.LowPC) + }) + + compileUnitEntry := typeMap.DeclaredFiles[compileUnitIndex].Entry + + cuLineReader, err := procInfo.DwarfData.LineReader(compileUnitEntry) + if err != nil { + return nil, fmt.Errorf("could not get file line reader for compile unit: %w", err) + } + files := cuLineReader.Files() + if len(files) < int(fileNumber) { + return nil, fmt.Errorf("invalid file number in dwarf function entry associated with compile unit") + } + + file = files[fileNumber].Name + + return &funcInfo{ + file: file, + line: line, + fn: fn, + }, nil +} + +func parseInlinedEntry(reader *dwarf.Reader, e *dwarf.Entry) (name string, line int64, err error) { + + var offset dwarf.Offset + + for i := range e.Field { + if e.Field[i].Attr == dwarf.AttrAbstractOrigin { + offset = e.Field[i].Val.(dwarf.Offset) + reader.Seek(offset) + entry, err := reader.Next() + if err != nil { + return "", -1, fmt.Errorf("could not read inlined function origin: %w", err) + } + for j := range entry.Field { + if entry.Field[j].Attr == dwarf.AttrName { + name = entry.Field[j].Val.(string) + } + } + } + + if e.Field[i].Attr == dwarf.AttrCallLine { + line = e.Field[i].Val.(int64) + } + } + + return name, line, nil +} diff --git a/pkg/dynamicinstrumentation/uploader/uploader.go b/pkg/dynamicinstrumentation/uploader/uploader.go new file mode 100644 index 0000000000000..f14fa8233e0a4 --- /dev/null +++ b/pkg/dynamicinstrumentation/uploader/uploader.go @@ -0,0 +1,221 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +// Package uploader provides functionality for uploading events and diagnostic +// information to the DataDog backend +package uploader + +import ( + "bytes" + "encoding/json" + "fmt" + "mime/multipart" + "net/http" + "net/textproto" + "os" + "time" + + "github.com/DataDog/datadog-agent/pkg/util/log" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/diagnostics" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" +) + +// LogUploader is the interface for uploading Dynamic Instrumentation logs +type LogUploader interface { + Enqueue(item *ditypes.SnapshotUpload) bool +} + +// DiagnosticUploader is the interface for uploading Dynamic Instrumentation +// diagnostic information +type DiagnosticUploader interface { + Enqueue(item *ditypes.DiagnosticUpload) bool +} + +// Uploader is a generic form of uploader functionality +type Uploader[T any] struct { + buffer chan *T + client *http.Client + + batchSize int + uploadMode UploadMode +} + +// UploadMode reflects the kind of data that is being uploaded +type UploadMode bool + +const ( + // UploadModeDiagnostic means the data being uploaded is diagnostic information + UploadModeDiagnostic UploadMode = true + //UploadModeLog means the data being uploaded is logs + UploadModeLog UploadMode = false +) + +func startDiagnosticUploader(dm *diagnostics.DiagnosticManager) *Uploader[ditypes.DiagnosticUpload] { + u := NewUploader[ditypes.DiagnosticUpload](UploadModeDiagnostic) + go func() { + for diagnostic := range dm.Updates { + u.Enqueue(diagnostic) + } + }() + return u +} + +// NewLogUploader creates a new log uploader +func NewLogUploader() *Uploader[ditypes.SnapshotUpload] { + return NewUploader[ditypes.SnapshotUpload](UploadModeLog) +} + +// NewDiagnosticUploader creates a new diagnostic uploader +func NewDiagnosticUploader() *Uploader[ditypes.DiagnosticUpload] { + return startDiagnosticUploader(diagnostics.Diagnostics) +} + +// NewUploader creates a new uploader of a specified generic type +func NewUploader[T any](mode UploadMode) *Uploader[T] { + u := &Uploader[T]{ + buffer: make(chan *T, 100), + client: &http.Client{}, + + batchSize: 100, + uploadMode: mode, + } + go u.processBuffer() + return u +} + +// Enqueue enqueues data to be uploaded. It's return value reflects whether +// or not the upload queue was full +func (u *Uploader[T]) Enqueue(item *T) bool { + select { + case u.buffer <- item: + return true + default: + log.Infof("Uploader buffer full, dropping message %+v", item) + return false + } +} + +func (u *Uploader[T]) processBuffer() { + flushTimer := time.NewTicker(1 * time.Second) + defer flushTimer.Stop() + + batch := make([]*T, 0, 5) + + for { + select { + case item := <-u.buffer: + batch = append(batch, item) + if len(batch) >= u.batchSize { + batchCopy := make([]*T, len(batch)) + copy(batchCopy, batch) + go u.uploadBatch(batchCopy) + batch = batch[:0] + flushTimer.Reset(1 * time.Second) + } + case <-flushTimer.C: + if len(batch) > 0 { + batchCopy := make([]*T, len(batch)) + copy(batchCopy, batch) + go u.uploadBatch(batchCopy) + batch = batch[:0] + } + flushTimer.Reset(1 * time.Second) + } + } +} + +func (u *Uploader[T]) uploadBatch(batch []*T) { + switch u.uploadMode { + case UploadModeDiagnostic: + u.uploadDiagnosticBatch(batch) + case UploadModeLog: + u.uploadLogBatch(batch) + } +} + +// there's no need to do endpoint discovery, we can just hardcode the URLs +// it's guaranteed that if datadog-agent has Go DI it will also have the proxy upload endpoints + +func (u *Uploader[T]) uploadLogBatch(batch []*T) { + // TODO: find out if there are more efficient ways of sending logs to the backend + // this is the way all other DI runtimes upload data + url := fmt.Sprintf("http://%s:8126/debugger/v1/input", getAgentHost()) + body, _ := json.Marshal(batch) + req, err := http.NewRequest("POST", url, bytes.NewReader(body)) + if err != nil { + log.Info("Failed to build request", err) + return + } + req.Header.Set("Content-Type", "application/json") + + resp, err := u.client.Do(req) + if err != nil { + log.Info("Error uploading log batch", err) + return + } + defer resp.Body.Close() + log.Info("HTTP", resp.StatusCode, url) +} + +func (u *Uploader[T]) uploadDiagnosticBatch(batch []*T) { + url := fmt.Sprintf("http://%s:8126/debugger/v1/diagnostics", getAgentHost()) + + // Create a buffer to hold the multipart form data + var b bytes.Buffer + w := multipart.NewWriter(&b) + + diagnosticJSON, err := json.Marshal(batch) + if err != nil { + log.Info("Failed to marshal diagnostic batch", err, batch) + return + } + + header := make(textproto.MIMEHeader) + header.Set("Content-Disposition", `form-data; name="event"; filename="event.json"`) + header.Set("Content-Type", "application/json") + fw, err := w.CreatePart(header) + if err != nil { + log.Info("Failed to create form file", err) + return + } + + // Write the JSON data to the form-data part + if _, err = fw.Write(diagnosticJSON); err != nil { + log.Info("Failed to write data to form file", err) + return + } + + // Close the multipart writer, otherwise the request will be missing the terminating boundary. + w.Close() + + // Create a new request + req, err := http.NewRequest("POST", url, &b) + if err != nil { + log.Info("Failed to build request", err) + return + } + + // Set the content type to multipart/form-data and include the boundary + req.Header.Set("Content-Type", w.FormDataContentType()) + resp, err := u.client.Do(req) + if err != nil { + log.Info("Error uploading diagnostic batch", err) + return + } + defer resp.Body.Close() + + log.Info("HTTP", resp.StatusCode, url) +} + +func getAgentHost() string { + ddAgentHost := os.Getenv("DD_AGENT_HOST") + if ddAgentHost == "" { + ddAgentHost = "localhost" + } + return ddAgentHost +} diff --git a/pkg/dynamicinstrumentation/util/file_watcher.go b/pkg/dynamicinstrumentation/util/file_watcher.go new file mode 100644 index 0000000000000..ea05116ea4b24 --- /dev/null +++ b/pkg/dynamicinstrumentation/util/file_watcher.go @@ -0,0 +1,61 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package util + +import ( + "os" + "time" + + "github.com/DataDog/datadog-agent/pkg/util/log" +) + +// FileWatcher is used to track updates to a particular filepath +type FileWatcher struct { + filePath string +} + +// NewFileWatcher creates a FileWatcher to track updates to a specified file +func NewFileWatcher(filePath string) *FileWatcher { + return &FileWatcher{filePath: filePath} +} + +func (fw *FileWatcher) readFile() ([]byte, error) { + content, err := os.ReadFile(fw.filePath) + if err != nil { + return nil, err + } + return content, nil +} + +// Watch watches the target file for changes and returns a channel that will receive +// the file's content whenever it changes. +// The initial implementation used fsnotify, but this was losing update events when running +// e2e tests - this simpler implementation behaves as expected, even if it's less efficient. +// Since this is meant to be used only for testing and development, it's fine to keep this +// implementation. +func (fw *FileWatcher) Watch() (<-chan []byte, error) { + updateChan := make(chan []byte) + prevContent := []byte{} + ticker := time.NewTicker(100 * time.Millisecond) + go func() { + defer close(updateChan) + for range ticker.C { + content, err := fw.readFile() + if err != nil { + log.Infof("Error reading file %s: %s", fw.filePath, err) + return + } + if len(content) > 0 && string(content) != string(prevContent) { + prevContent = content + updateChan <- content + } + } + }() + + return updateChan, nil +} diff --git a/pkg/dynamicinstrumentation/util/file_watcher_test.go b/pkg/dynamicinstrumentation/util/file_watcher_test.go new file mode 100644 index 0000000000000..894395b9f61c0 --- /dev/null +++ b/pkg/dynamicinstrumentation/util/file_watcher_test.go @@ -0,0 +1,110 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package util + +import ( + "io/fs" + "os" + "path/filepath" + "testing" + "time" + + "github.com/DataDog/datadog-agent/pkg/util/log" + + "github.com/stretchr/testify/assert" +) + +func TestFileWatcherMultipleFiles(t *testing.T) { + // create two temporary files + f1, _ := os.CreateTemp("", "file-watcher-test-") + f2, _ := os.CreateTemp("", "file-watcher-test-") + defer f1.Close() + defer f2.Close() + defer os.Remove(f1.Name()) + defer os.Remove(f2.Name()) + + // get the absolute path for both files + fp1, _ := filepath.Abs(f1.Name()) + fp2, _ := filepath.Abs(f2.Name()) + + // initialize file contents + os.WriteFile(fp1, []byte("This is file 1"), fs.ModeAppend) + os.WriteFile(fp2, []byte("This is file 2"), fs.ModeAppend) + + // initialize file watchers + fw1 := NewFileWatcher(fp1) + fw2 := NewFileWatcher(fp2) + + ch1, err := fw1.Watch() + assert.NoError(t, err) + ch2, err := fw2.Watch() + assert.NoError(t, err) + + fc1 := <-ch1 + assert.Equal(t, "This is file 1", string(fc1)) + fc2 := <-ch2 + assert.Equal(t, "This is file 2", string(fc2)) + + os.WriteFile(fp1, []byte("Updated file 1"), fs.ModeAppend) + os.WriteFile(fp2, []byte("Updated file 2"), fs.ModeAppend) + + fc1 = <-ch1 + assert.Equal(t, "Updated file 1", string(fc1)) + fc2 = <-ch2 + assert.Equal(t, "Updated file 2", string(fc2)) +} + +func TestFileWatcherDeletedFile(t *testing.T) { + timeout := time.After(1 * time.Second) + done := make(chan bool) + go func() { + f, _ := os.CreateTemp("", "file-watcher-delete-test-") + defer f.Close() + defer os.Remove(f.Name()) + + fp, _ := filepath.Abs(f.Name()) + os.WriteFile(fp, []byte("Initial"), fs.ModeAppend) + + info, err := os.Stat(f.Name()) + if err != nil { + panic(err) + } + m := info.Mode() + + fw := NewFileWatcher(fp) + ch, err := fw.Watch() + assert.NoError(t, err) + + fc := <-ch + assert.Equal(t, "Initial", string(fc)) + + // delete file and check that we are still receiving updates + os.Remove(f.Name()) + os.WriteFile(fp, []byte("Updated"), fs.ModeAppend) + err = os.Chmod(fp, m) + assert.NoError(t, err) + + info, err = os.Stat(f.Name()) + if err != nil { + panic(err) + } + m = info.Mode() + log.Info(m) + + fc, ok := <-ch + assert.True(t, ok, "expected channel to be open") + assert.Equal(t, "Updated", string(fc), "expected to receive new file contents on channel") + done <- true + }() + + select { + case <-timeout: + t.Fatal("Timeout exceeded") + case <-done: + } +} diff --git a/pkg/ebpf/bytecode/runtime/.gitignore b/pkg/ebpf/bytecode/runtime/.gitignore index a4383358ec72f..520bd681081be 100644 --- a/pkg/ebpf/bytecode/runtime/.gitignore +++ b/pkg/ebpf/bytecode/runtime/.gitignore @@ -1 +1,2 @@ *.d +dynamicinstrumentation.go diff --git a/pkg/ebpf/bytecode/runtime/asset.go b/pkg/ebpf/bytecode/runtime/asset.go index 2d0812368b5b2..0c0add2da5702 100644 --- a/pkg/ebpf/bytecode/runtime/asset.go +++ b/pkg/ebpf/bytecode/runtime/asset.go @@ -8,7 +8,9 @@ package runtime import ( + "bytes" "crypto/sha256" + "encoding/hex" "fmt" "io" "os" @@ -37,34 +39,59 @@ func newAsset(filename, hash string) *asset { } } +// CompileOptions are options used to compile eBPF programs at runtime +type CompileOptions struct { + // AdditionalFlags are extra flags passed to clang + AdditionalFlags []string + // ModifyCallback is a callback function that is allowed to modify the contents before compilation + ModifyCallback func(in io.Reader, out io.Writer) error + // StatsdClient is a statsd client to use for telemetry + StatsdClient statsd.ClientInterface + // UseKernelHeaders enables the inclusion of kernel headers from the host + UseKernelHeaders bool +} + // Compile compiles the asset to an object file, writes it to the configured output directory, and // then opens and returns the compiled output func (a *asset) Compile(config *ebpf.Config, additionalFlags []string, client statsd.ClientInterface) (CompiledOutput, error) { + return a.compile(config, CompileOptions{AdditionalFlags: additionalFlags, StatsdClient: client, UseKernelHeaders: true}) +} + +// CompileWithOptions is the same as Compile, but takes an options struct with additional choices. +func (a *asset) CompileWithOptions(config *ebpf.Config, opts CompileOptions) (CompiledOutput, error) { + return a.compile(config, opts) +} + +func (a *asset) compile(config *ebpf.Config, opts CompileOptions) (CompiledOutput, error) { log.Debugf("starting runtime compilation of %s", a.filename) start := time.Now() a.tm.compilationEnabled = true defer func() { a.tm.compilationDuration = time.Since(start) - if client != nil { - a.tm.SubmitTelemetry(a.filename, client) + if opts.StatsdClient != nil { + a.tm.SubmitTelemetry(a.filename, opts.StatsdClient) } }() - opts := kernel.HeaderOptions{ - DownloadEnabled: config.EnableKernelHeaderDownload, - Dirs: config.KernelHeadersDirs, - DownloadDir: config.KernelHeadersDownloadDir, - AptConfigDir: config.AptConfigDir, - YumReposDir: config.YumReposDir, - ZypperReposDir: config.ZypperReposDir, - } - kernelHeaders := kernel.GetKernelHeaders(opts, client) - if len(kernelHeaders) == 0 { - a.tm.compilationResult = headerFetchErr - return nil, fmt.Errorf("unable to find kernel headers") + var kernelHeaders []string + if opts.UseKernelHeaders { + headerOpts := kernel.HeaderOptions{ + DownloadEnabled: config.EnableKernelHeaderDownload, + Dirs: config.KernelHeadersDirs, + DownloadDir: config.KernelHeadersDownloadDir, + AptConfigDir: config.AptConfigDir, + YumReposDir: config.YumReposDir, + ZypperReposDir: config.ZypperReposDir, + } + kernelHeaders = kernel.GetKernelHeaders(headerOpts, opts.StatsdClient) + if len(kernelHeaders) == 0 { + a.tm.compilationResult = headerFetchErr + return nil, fmt.Errorf("unable to find kernel headers") + } } + a.tm.compilationResult = verificationError outputDir := config.RuntimeCompilerOutputDir p := filepath.Join(config.BPFDir, "runtime", a.filename) @@ -78,22 +105,61 @@ func (a *asset) Compile(config *ebpf.Config, additionalFlags []string, client st return nil, fmt.Errorf("unable to create compiler output directory %s: %w", outputDir, err) } - protectedFile, err := createProtectedFile(fmt.Sprintf("%s-%s", a.filename, a.hash), outputDir, f) + diskProtectedFile, err := createProtectedFile(fmt.Sprintf("%s-%s", a.filename, a.hash), outputDir, f) if err != nil { return nil, fmt.Errorf("failed to create ram backed file from %s: %w", f.Name(), err) } defer func() { - if err := protectedFile.Close(); err != nil { - log.Debugf("error closing protected file %s: %s", protectedFile.Name(), err) + if err := diskProtectedFile.Close(); err != nil { + log.Debugf("error closing protected file %s: %s", diskProtectedFile.Name(), err) } }() + protectedFile := diskProtectedFile + hash := a.hash - if err = a.verify(protectedFile); err != nil { - a.tm.compilationResult = verificationError + if err = a.verify(diskProtectedFile); err != nil { return nil, fmt.Errorf("error reading input file: %s", err) } - out, result, err := compileToObjectFile(protectedFile.Name(), outputDir, a.filename, a.hash, additionalFlags, kernelHeaders) + a.tm.compilationResult = compilationErr + if opts.ModifyCallback != nil { + outBuf := &bytes.Buffer{} + // seek to the start and read all of protected file contents + if _, err := diskProtectedFile.Seek(0, io.SeekStart); err != nil { + return nil, fmt.Errorf("seek disk protected file: %w", err) + } + + // run modify callback + if err := opts.ModifyCallback(diskProtectedFile, outBuf); err != nil { + return nil, fmt.Errorf("modify callback: %w", err) + } + outReader := bytes.NewReader(outBuf.Bytes()) + + // update hash + hash, err = sha256Reader(outReader) + if err != nil { + return nil, fmt.Errorf("hash post-modification protected file: %w", err) + } + if _, err := outReader.Seek(0, io.SeekStart); err != nil { + return nil, fmt.Errorf("seek post-modification contents: %w", err) + } + + // create new protected file with the post-modification contents + postModifyProtectedFile, err := createProtectedFile(fmt.Sprintf("%s-%s", a.filename, hash), outputDir, outReader) + if err != nil { + return nil, fmt.Errorf("create post-modification protected file: %w", err) + } + defer func() { + if err := postModifyProtectedFile.Close(); err != nil { + log.Debugf("close post-modification protected file %s: %s", postModifyProtectedFile.Name(), err) + } + }() + + // set compilation to use post-modification contents + protectedFile = postModifyProtectedFile + } + + out, result, err := compileToObjectFile(protectedFile.Name(), outputDir, a.filename, hash, opts.AdditionalFlags, kernelHeaders) a.tm.compilationResult = result return out, err @@ -111,17 +177,24 @@ func createProtectedFile(name, runtimeDir string, source io.Reader) (ProtectedFi // verify reads the asset from the reader and verifies the content hash matches what is expected. func (a *asset) verify(source ProtectedFile) error { - h := sha256.New() - if _, err := io.Copy(h, source.Reader()); err != nil { - return fmt.Errorf("error hashing file %s: %w", source.Name(), err) + sum, err := sha256Reader(source) + if err != nil { + return fmt.Errorf("hash file %s: %w", source.Name(), err) } - if fmt.Sprintf("%x", h.Sum(nil)) != a.hash { + if sum != a.hash { return fmt.Errorf("file content hash does not match expected value") } - return nil } +func sha256Reader(r io.Reader) (string, error) { + h := sha256.New() + if _, err := io.Copy(h, r); err != nil { + return "", err + } + return hex.EncodeToString(h.Sum(nil)), nil +} + // GetTelemetry returns the compilation telemetry for this asset func (a *asset) GetTelemetry() CompilationTelemetry { return a.tm diff --git a/pkg/ebpf/bytecode/runtime/protected_file.go b/pkg/ebpf/bytecode/runtime/protected_file.go index a59224f09cf19..4e25867cc815b 100644 --- a/pkg/ebpf/bytecode/runtime/protected_file.go +++ b/pkg/ebpf/bytecode/runtime/protected_file.go @@ -20,8 +20,7 @@ import ( // ProtectedFile represents a symlink to a sealed ram-backed file type ProtectedFile interface { - Close() error - Reader() io.Reader + io.ReadSeekCloser Name() string } @@ -104,6 +103,10 @@ func (m *ramBackedFile) Name() string { return m.symlink } -func (m *ramBackedFile) Reader() io.Reader { - return m.file +func (m *ramBackedFile) Seek(offset int64, whence int) (int64, error) { + return m.file.Seek(offset, whence) +} + +func (m *ramBackedFile) Read(p []byte) (n int, err error) { + return m.file.Read(p) } diff --git a/pkg/ebpf/bytecode/runtime/runtime_compilation_helpers.go b/pkg/ebpf/bytecode/runtime/runtime_compilation_helpers.go index 9394d168d971a..4cd657d96d26f 100644 --- a/pkg/ebpf/bytecode/runtime/runtime_compilation_helpers.go +++ b/pkg/ebpf/bytecode/runtime/runtime_compilation_helpers.go @@ -76,7 +76,7 @@ func compileToObjectFile(inFile, outputDir, filename, inHash string, additionalF } // RHEL platforms back-ported the __BPF_FUNC_MAPPER macro, so we can always use the dynamic method there - if kv >= kernel.VersionCode(4, 10, 0) || family == "rhel" { + if len(kernelHeaders) > 0 && (kv >= kernel.VersionCode(4, 10, 0) || family == "rhel") { var helperPath string helperPath, err = includeHelperAvailability(kernelHeaders) if err != nil { diff --git a/pkg/ebpf/cgo/genpost.go b/pkg/ebpf/cgo/genpost.go index f280dbe88ad73..5d8bb3ff811f8 100644 --- a/pkg/ebpf/cgo/genpost.go +++ b/pkg/ebpf/cgo/genpost.go @@ -25,7 +25,7 @@ func main() { // Convert []int8 to []byte in multiple generated fields from the kernel, to simplify // conversion to string; see golang.org/issue/20753 - convertInt8ArrayToByteArrayRegex := regexp.MustCompile(`(Request_fragment|Topic_name|Buf|Cgroup|RemoteAddr|LocalAddr|Cgroup_name|Victim_comm|Trigger_comm|LocalAddress|RemoteAddress)(\s+)\[(\d+)\]u?int8`) + convertInt8ArrayToByteArrayRegex := regexp.MustCompile(`(Request_fragment|Topic_name|Buf|Cgroup|RemoteAddr|LocalAddr|Cgroup_name|Victim_comm|Trigger_comm|LocalAddress|RemoteAddress|Probe_id)(\s+)\[(\d+)\]u?int8`) b = convertInt8ArrayToByteArrayRegex.ReplaceAll(b, []byte("$1$2[$3]byte")) b, err = format.Source(b) diff --git a/pkg/ebpf/compiler/compiler.go b/pkg/ebpf/compiler/compiler.go index a26c90c57c4e2..1553ef5d42ac4 100644 --- a/pkg/ebpf/compiler/compiler.go +++ b/pkg/ebpf/compiler/compiler.go @@ -66,10 +66,6 @@ func kernelHeaderPaths(headerDirs []string) []string { // CompileToObjectFile compiles an eBPF program func CompileToObjectFile(inFile, outputFile string, cflags []string, headerDirs []string) error { - if len(headerDirs) == 0 { - return fmt.Errorf("unable to find kernel headers") - } - tmpIncludeDir, err := writeStdarg() if err != nil { return err diff --git a/tasks/system_probe.py b/tasks/system_probe.py index 4714474f5b564..ddb1ee61bb2a9 100644 --- a/tasks/system_probe.py +++ b/tasks/system_probe.py @@ -54,6 +54,7 @@ "./pkg/collector/corechecks/ebpf/...", "./pkg/collector/corechecks/servicediscovery/module/...", "./pkg/process/monitor/...", + "./pkg/dynamicinstrumentation/...", ] TEST_PACKAGES = " ".join(TEST_PACKAGES_LIST) # change `timeouts` in `test/new-e2e/system-probe/test-runner/main.go` if you change them here @@ -386,6 +387,7 @@ def ninja_runtime_compilation_files(nw: NinjaWriter, gobin): "pkg/network/tracer/connection/kprobe/compile.go": "tracer", "pkg/network/tracer/offsetguess_test.go": "offsetguess-test", "pkg/security/ebpf/compile.go": "runtime-security", + "pkg/dynamicinstrumentation/codegen/compile.go": "dynamicinstrumentation", } nw.rule( @@ -494,6 +496,7 @@ def ninja_cgo_type_files(nw: NinjaWriter): "pkg/ebpf/types.go": [ "pkg/ebpf/c/lock_contention.h", ], + "pkg/dynamicinstrumentation/ditypes/ebpf.go": ["pkg/dynamicinstrumentation/codegen/c/types.h"], } nw.rule( name="godefs", From d44450ffcdf32cf8a84fae93896015170731e2f2 Mon Sep 17 00:00:00 2001 From: Ken Schneider <103530259+ken-schneider@users.noreply.github.com> Date: Fri, 6 Sep 2024 21:42:28 -0400 Subject: [PATCH 076/128] [NETPATH-297] Update timeout to be per hop instead of per path (#29092) --- .../conf.d/network_path.d/conf.yaml.example | 16 ++--- cmd/system-probe/modules/traceroute.go | 4 +- .../corechecks/networkpath/config.go | 16 ++++- .../corechecks/networkpath/config_test.go | 67 +++++++++++++++++++ pkg/config/setup/config.go | 7 +- pkg/config/setup/config_test.go | 2 +- pkg/networkpath/traceroute/runner.go | 6 +- pkg/networkpath/traceroute/tcp/tcpv4.go | 8 +-- pkg/process/net/common.go | 4 +- ...imeout-configuration-4ccec24497bd1574.yaml | 14 ++++ 10 files changed, 117 insertions(+), 27 deletions(-) create mode 100644 releasenotes/notes/network-path-change-timeout-configuration-4ccec24497bd1574.yaml diff --git a/cmd/agent/dist/conf.d/network_path.d/conf.yaml.example b/cmd/agent/dist/conf.d/network_path.d/conf.yaml.example index 233f284d2576d..6f3e29eed0bfd 100644 --- a/cmd/agent/dist/conf.d/network_path.d/conf.yaml.example +++ b/cmd/agent/dist/conf.d/network_path.d/conf.yaml.example @@ -5,11 +5,11 @@ init_config: # # min_collection_interval: 60 - ## @param timeout - integer - optional - default: 10000 - ## Specifies how much time the full traceroute should take - ## in milliseconds + ## @param timeout - integer - optional - default: 1000 + ## Specifies how much time in milliseconds the traceroute should + ## wait for a response from each hop before timing out. # - # timeout: 10000 + # timeout: 1000 # Network Path integration is used to monitor individual endpoints. # Supported platforms are Linux and Windows. macOS is not supported yet. @@ -36,11 +36,11 @@ instances: # # max_ttl: - ## @param timeout - integer - optional - default: 10000 - ## Specifies how much time the full traceroute should take - ## in milliseconds + ## @param timeout - integer - optional - default: 1000 + ## Specifies how much time in milliseconds the traceroute should + ## wait for a response from each hop before timing out. # - # timeout: 10000 + # timeout: 1000 ## @param min_collection_interval - number - optional - default: 60 ## Specifies how frequently we should probe the endpoint. diff --git a/cmd/system-probe/modules/traceroute.go b/cmd/system-probe/modules/traceroute.go index 320106472c087..6e0667e120784 100644 --- a/cmd/system-probe/modules/traceroute.go +++ b/cmd/system-probe/modules/traceroute.go @@ -98,8 +98,8 @@ func (t *traceroute) RegisterGRPC(_ grpc.ServiceRegistrar) error { func (t *traceroute) Close() {} func logTracerouteRequests(cfg tracerouteutil.Config, client string, runCount uint64, start time.Time) { - args := []interface{}{cfg.DestHostname, client, cfg.DestPort, cfg.MaxTTL, cfg.Timeout, runCount, time.Since(start)} - msg := "Got request on /traceroute/%s?client_id=%s&port=%d&maxTTL=%d&timeout=%d (count: %d): retrieved traceroute in %s" + args := []interface{}{cfg.DestHostname, client, cfg.DestPort, cfg.MaxTTL, cfg.Timeout, cfg.Protocol, runCount, time.Since(start)} + msg := "Got request on /traceroute/%s?client_id=%s&port=%d&maxTTL=%d&timeout=%d&protocol=%s (count: %d): retrieved traceroute in %s" switch { case runCount <= 5, runCount%20 == 0: log.Infof(msg, args...) diff --git a/pkg/collector/corechecks/networkpath/config.go b/pkg/collector/corechecks/networkpath/config.go index c7b57f6924a6a..60b805d076c22 100644 --- a/pkg/collector/corechecks/networkpath/config.go +++ b/pkg/collector/corechecks/networkpath/config.go @@ -21,10 +21,17 @@ const ( defaultCheckInterval time.Duration = 1 * time.Minute ) +// Number is a type that is used to make a generic version +// of the firstNonZero function +type Number interface { + ~int | ~int64 | ~uint8 +} + // InitConfig is used to deserialize integration init config type InitConfig struct { MinCollectionInterval int64 `yaml:"min_collection_interval"` TimeoutMs int64 `yaml:"timeout"` + MaxTTL uint8 `yaml:"max_ttl"` } // InstanceConfig is used to deserialize integration instance config @@ -83,7 +90,6 @@ func NewCheckConfig(rawInstance integration.Data, rawInitConfig integration.Data c.DestPort = instance.DestPort c.SourceService = instance.SourceService c.DestinationService = instance.DestinationService - c.MaxTTL = instance.MaxTTL c.Protocol = payload.Protocol(strings.ToUpper(instance.Protocol)) c.MinCollectionInterval = firstNonZero( @@ -104,13 +110,19 @@ func NewCheckConfig(rawInstance integration.Data, rawInitConfig integration.Data return nil, fmt.Errorf("timeout must be > 0") } + c.MaxTTL = firstNonZero( + instance.MaxTTL, + initConfig.MaxTTL, + setup.DefaultNetworkPathMaxTTL, + ) + c.Tags = instance.Tags c.Namespace = coreconfig.Datadog().GetString("network_devices.namespace") return c, nil } -func firstNonZero(values ...time.Duration) time.Duration { +func firstNonZero[T Number](values ...T) T { for _, value := range values { if value != 0 { return value diff --git a/pkg/collector/corechecks/networkpath/config_test.go b/pkg/collector/corechecks/networkpath/config_test.go index e34be2bb11cb9..bc1999876a46d 100644 --- a/pkg/collector/corechecks/networkpath/config_test.go +++ b/pkg/collector/corechecks/networkpath/config_test.go @@ -36,6 +36,7 @@ hostname: 1.2.3.4 MinCollectionInterval: time.Duration(60) * time.Second, Namespace: "my-namespace", Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond, + MaxTTL: setup.DefaultNetworkPathMaxTTL, }, }, { @@ -71,6 +72,7 @@ min_collection_interval: 10 MinCollectionInterval: time.Duration(42) * time.Second, Namespace: "my-namespace", Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond, + MaxTTL: setup.DefaultNetworkPathMaxTTL, }, }, { @@ -86,6 +88,7 @@ min_collection_interval: 10 MinCollectionInterval: time.Duration(10) * time.Second, Namespace: "my-namespace", Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond, + MaxTTL: setup.DefaultNetworkPathMaxTTL, }, }, { @@ -98,6 +101,7 @@ hostname: 1.2.3.4 MinCollectionInterval: time.Duration(1) * time.Minute, Namespace: "my-namespace", Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond, + MaxTTL: setup.DefaultNetworkPathMaxTTL, }, }, { @@ -115,6 +119,7 @@ destination_service: service-b MinCollectionInterval: time.Duration(60) * time.Second, Namespace: "my-namespace", Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond, + MaxTTL: setup.DefaultNetworkPathMaxTTL, }, }, { @@ -130,6 +135,7 @@ protocol: udp Namespace: "my-namespace", Protocol: payload.ProtocolUDP, Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond, + MaxTTL: setup.DefaultNetworkPathMaxTTL, }, }, { @@ -145,6 +151,7 @@ protocol: UDP Namespace: "my-namespace", Protocol: payload.ProtocolUDP, Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond, + MaxTTL: setup.DefaultNetworkPathMaxTTL, }, }, { @@ -160,6 +167,7 @@ protocol: TCP Namespace: "my-namespace", Protocol: payload.ProtocolTCP, Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond, + MaxTTL: setup.DefaultNetworkPathMaxTTL, }, }, { @@ -177,6 +185,7 @@ min_collection_interval: 10 MinCollectionInterval: time.Duration(42) * time.Second, Namespace: "my-namespace", Timeout: 50000 * time.Millisecond, + MaxTTL: setup.DefaultNetworkPathMaxTTL, }, }, { @@ -195,6 +204,7 @@ timeout: 70000 MinCollectionInterval: time.Duration(42) * time.Second, Namespace: "my-namespace", Timeout: 50000 * time.Millisecond, + MaxTTL: setup.DefaultNetworkPathMaxTTL, }, }, { @@ -212,6 +222,7 @@ timeout: 70000 MinCollectionInterval: time.Duration(42) * time.Second, Namespace: "my-namespace", Timeout: 70000 * time.Millisecond, + MaxTTL: setup.DefaultNetworkPathMaxTTL, }, }, { @@ -228,6 +239,7 @@ min_collection_interval: 10 MinCollectionInterval: time.Duration(42) * time.Second, Namespace: "my-namespace", Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond, + MaxTTL: setup.DefaultNetworkPathMaxTTL, }, }, { @@ -242,6 +254,61 @@ timeout: -1 `), expectedError: "timeout must be > 0", }, + { + name: "maxTTL from instance config", + rawInstance: []byte(` +hostname: 1.2.3.4 +max_ttl: 50 +min_collection_interval: 42 +`), + rawInitConfig: []byte(` +min_collection_interval: 10 +`), + expectedConfig: &CheckConfig{ + DestHostname: "1.2.3.4", + MinCollectionInterval: time.Duration(42) * time.Second, + Namespace: "my-namespace", + Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond, + MaxTTL: 50, + }, + }, + { + name: "maxTTL from instance config preferred over init config", + rawInstance: []byte(` +hostname: 1.2.3.4 +max_ttl: 50 +min_collection_interval: 42 +`), + rawInitConfig: []byte(` +min_collection_interval: 10 +max_ttl: 64 +`), + expectedConfig: &CheckConfig{ + DestHostname: "1.2.3.4", + MinCollectionInterval: time.Duration(42) * time.Second, + Namespace: "my-namespace", + Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond, + MaxTTL: 50, + }, + }, + { + name: "maxTTL from init config", + rawInstance: []byte(` +hostname: 1.2.3.4 +min_collection_interval: 42 +`), + rawInitConfig: []byte(` +min_collection_interval: 10 +max_ttl: 64 +`), + expectedConfig: &CheckConfig{ + DestHostname: "1.2.3.4", + MinCollectionInterval: time.Duration(42) * time.Second, + Namespace: "my-namespace", + Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond, + MaxTTL: 64, + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/pkg/config/setup/config.go b/pkg/config/setup/config.go index fd4555ba702ba..4cf537291aae8 100644 --- a/pkg/config/setup/config.go +++ b/pkg/config/setup/config.go @@ -105,7 +105,10 @@ const ( DefaultMaxMessageSizeBytes = 256 * 1000 // DefaultNetworkPathTimeout defines the default timeout for a network path test - DefaultNetworkPathTimeout = 10000 + DefaultNetworkPathTimeout = 1000 + + // DefaultNetworkPathMaxTTL defines the default maximum TTL for traceroute tests + DefaultNetworkPathMaxTTL = 30 ) // datadog is the global configuration object @@ -437,7 +440,7 @@ func InitConfig(config pkgconfigmodel.Config) { config.BindEnvAndSetDefault("network_path.connections_monitoring.enabled", false) config.BindEnvAndSetDefault("network_path.collector.workers", 4) config.BindEnvAndSetDefault("network_path.collector.timeout", DefaultNetworkPathTimeout) - config.BindEnvAndSetDefault("network_path.collector.max_ttl", 30) + config.BindEnvAndSetDefault("network_path.collector.max_ttl", DefaultNetworkPathMaxTTL) config.BindEnvAndSetDefault("network_path.collector.input_chan_size", 1000) config.BindEnvAndSetDefault("network_path.collector.processing_chan_size", 1000) config.BindEnvAndSetDefault("network_path.collector.pathtest_contexts_limit", 10000) diff --git a/pkg/config/setup/config_test.go b/pkg/config/setup/config_test.go index d2b59e285c2a0..2325bfe23afbb 100644 --- a/pkg/config/setup/config_test.go +++ b/pkg/config/setup/config_test.go @@ -665,7 +665,7 @@ func TestNetworkPathDefaults(t *testing.T) { assert.Equal(t, false, config.GetBool("network_path.connections_monitoring.enabled")) assert.Equal(t, 4, config.GetInt("network_path.collector.workers")) - assert.Equal(t, 10000, config.GetInt("network_path.collector.timeout")) + assert.Equal(t, 1000, config.GetInt("network_path.collector.timeout")) assert.Equal(t, 30, config.GetInt("network_path.collector.max_ttl")) assert.Equal(t, 1000, config.GetInt("network_path.collector.input_chan_size")) assert.Equal(t, 1000, config.GetInt("network_path.collector.processing_chan_size")) diff --git a/pkg/networkpath/traceroute/runner.go b/pkg/networkpath/traceroute/runner.go index cf57bb40f73d0..2e0fabbc88eec 100644 --- a/pkg/networkpath/traceroute/runner.go +++ b/pkg/networkpath/traceroute/runner.go @@ -42,8 +42,6 @@ const ( DefaultNumPaths = 1 // DefaultMinTTL defines the default minimum TTL DefaultMinTTL = 1 - // DefaultMaxTTL defines the default maximum TTL - DefaultMaxTTL = 30 // DefaultDelay defines the default delay DefaultDelay = 50 //msec // DefaultOutputFormat defines the default output format @@ -117,12 +115,12 @@ func (r *Runner) RunTraceroute(ctx context.Context, cfg Config) (payload.Network maxTTL := cfg.MaxTTL if maxTTL == 0 { - maxTTL = DefaultMaxTTL + maxTTL = setup.DefaultNetworkPathMaxTTL } var timeout time.Duration if cfg.Timeout == 0 { - timeout = setup.DefaultNetworkPathTimeout * time.Millisecond + timeout = setup.DefaultNetworkPathTimeout * time.Duration(maxTTL) * time.Millisecond } else { timeout = cfg.Timeout } diff --git a/pkg/networkpath/traceroute/tcp/tcpv4.go b/pkg/networkpath/traceroute/tcp/tcpv4.go index 6b5e9b8db94a0..23f3c45950689 100644 --- a/pkg/networkpath/traceroute/tcp/tcpv4.go +++ b/pkg/networkpath/traceroute/tcp/tcpv4.go @@ -103,15 +103,9 @@ func (t *TCPv4) TracerouteSequential() (*Results, error) { // hops should be of length # of hops hops := make([]*Hop, 0, t.MaxTTL-t.MinTTL) - // TODO: better logic around timeout for sequential is needed - // right now we're just hacking around the existing - // need to convert uint8 to int for proper conversion to - // time.Duration - timeout := t.Timeout / time.Duration(int(t.MaxTTL-t.MinTTL)) - for i := int(t.MinTTL); i <= int(t.MaxTTL); i++ { seqNumber := rand.Uint32() - hop, err := t.sendAndReceive(rawIcmpConn, rawTCPConn, i, seqNumber, timeout) + hop, err := t.sendAndReceive(rawIcmpConn, rawTCPConn, i, seqNumber, t.Timeout) if err != nil { return nil, fmt.Errorf("failed to run traceroute: %w", err) } diff --git a/pkg/process/net/common.go b/pkg/process/net/common.go index 640c3e82dadd2..2480dc1d69f84 100644 --- a/pkg/process/net/common.go +++ b/pkg/process/net/common.go @@ -227,7 +227,9 @@ func (r *RemoteSysProbeUtil) GetPing(clientID string, host string, count int, in // GetTraceroute returns the results of a traceroute to a host func (r *RemoteSysProbeUtil) GetTraceroute(clientID string, host string, port uint16, protocol nppayload.Protocol, maxTTL uint8, timeout time.Duration) ([]byte, error) { - ctx, cancel := context.WithTimeout(context.Background(), timeout+10*time.Second) // allow extra time for the system probe communication overhead + httpTimeout := timeout*time.Duration(maxTTL) + 10*time.Second // allow extra time for the system probe communication overhead, calculate full timeout for TCP traceroute + log.Tracef("Network Path traceroute HTTP request timeout: %s", httpTimeout) + ctx, cancel := context.WithTimeout(context.Background(), httpTimeout) defer cancel() req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/%s?client_id=%s&port=%d&max_ttl=%d&timeout=%d&protocol=%s", tracerouteURL, host, clientID, port, maxTTL, timeout, protocol), nil) diff --git a/releasenotes/notes/network-path-change-timeout-configuration-4ccec24497bd1574.yaml b/releasenotes/notes/network-path-change-timeout-configuration-4ccec24497bd1574.yaml new file mode 100644 index 0000000000000..79170d87fd8cb --- /dev/null +++ b/releasenotes/notes/network-path-change-timeout-configuration-4ccec24497bd1574.yaml @@ -0,0 +1,14 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +upgrade: + - | + Changes behavior of the timeout for Network Path. Previously, the timeout + signified the total time to wait for a full traceroute to complete. Now, + the timeout signifies the time to wait for each hop in the traceroute. + Additionally, the default timeout has been changed to 1000ms. From ef6b6bde18249afb9824769bb6b5020e3a4e95e5 Mon Sep 17 00:00:00 2001 From: Nicolas Schweitzer Date: Mon, 9 Sep 2024 10:34:05 +0200 Subject: [PATCH 077/128] [release] Update current milestone to 7.59.0 (#29131) --- release.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/release.json b/release.json index 43d540aa6b09e..c04669e36a18a 100644 --- a/release.json +++ b/release.json @@ -1,6 +1,6 @@ { "base_branch": "main", - "current_milestone": "7.58.0", + "current_milestone": "7.59.0", "last_stable": { "6": "6.53.0", "7": "7.56.2" From 18081d5f9b9d3ba2264b571ae59acc1028bf2a71 Mon Sep 17 00:00:00 2001 From: Kevin Fairise <132568982+KevinFairise2@users.noreply.github.com> Date: Mon, 9 Sep 2024 11:41:48 +0200 Subject: [PATCH 078/128] Fix typo on ownerhsip (#29103) Co-authored-by: Nicolas Schweitzer --- .github/CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 62262edfd9967..c05ff3a97e27b 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -98,7 +98,7 @@ /.gitlab/common/test_infra_version.yml @DataDog/agent-devx-loops @DataDog/agent-devx-infra /.gitlab/e2e/e2e.yml @DataDog/container-integrations @DataDog/agent-devx-loops -/.gitlab/e2e/k8s_e2e.yml @DataDog/container-integrations @DataDog/agent-devx-loops +/.gitlab/e2e_k8s/e2e_k8s.yml @DataDog/container-integrations @DataDog/agent-devx-loops /.gitlab/e2e/install_packages @DataDog/agent-delivery /.gitlab/container_build/fakeintake.yml @DataDog/agent-e2e-testing @DataDog/agent-devx-loops /.gitlab/binary_build/fakeintake.yml @DataDog/agent-e2e-testing @DataDog/agent-devx-loops From d1e91253eb08b8346587751f223e0cb23b880cd0 Mon Sep 17 00:00:00 2001 From: Sylvain Afchain Date: Mon, 9 Sep 2024 12:27:06 +0200 Subject: [PATCH 079/128] [CWS] discard the last good parent inode (#29118) --- pkg/security/probe/discarders_linux.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/security/probe/discarders_linux.go b/pkg/security/probe/discarders_linux.go index 8adc92a9737a5..30b4333d1995b 100644 --- a/pkg/security/probe/discarders_linux.go +++ b/pkg/security/probe/discarders_linux.go @@ -399,13 +399,14 @@ func (id *inodeDiscarders) discardParentInode(req *erpc.Request, rs *rules.RuleS parentKey := pathKey for i := 0; i < discarderDepth; i++ { - parentKey, err = id.dentryResolver.GetParent(parentKey) + key, err := id.dentryResolver.GetParent(parentKey) if err != nil || dentry.IsFakeInode(pathKey.Inode) { if i == 0 { return false, 0, 0, err } break } + parentKey = key } // do not insert multiple time the same discarder From 372bd5315bf9191249a06e8546c919621af1cd4e Mon Sep 17 00:00:00 2001 From: Adel Haj Hassan <41540817+adel121@users.noreply.github.com> Date: Mon, 9 Sep 2024 13:37:13 +0200 Subject: [PATCH 080/128] remove unnecessary println (#29129) --- comp/core/tagger/taggerimpl/tagstore/tagstore_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/comp/core/tagger/taggerimpl/tagstore/tagstore_test.go b/comp/core/tagger/taggerimpl/tagstore/tagstore_test.go index 9e64b8f314e0b..eb2e21ac3ee01 100644 --- a/comp/core/tagger/taggerimpl/tagstore/tagstore_test.go +++ b/comp/core/tagger/taggerimpl/tagstore/tagstore_test.go @@ -6,7 +6,6 @@ package tagstore import ( - "fmt" "sync" "testing" "time" @@ -39,7 +38,6 @@ func (s *StoreTestSuite) SetupTest() { s.clock.Add(time.Since(time.Unix(0, 0))) mockConfig := configmock.New(s.T()) - fmt.Println("New Checkpoint: ", mockConfig) s.tagstore = newTagStoreWithClock(mockConfig, s.clock, telemetryStore) } From f5b57294b3760250e154ab84ccb747632270b0c8 Mon Sep 17 00:00:00 2001 From: Nicolas Schweitzer Date: Mon, 9 Sep 2024 13:39:03 +0200 Subject: [PATCH 081/128] fix(release): Set milestone as non-devel tag (#29134) --- tasks/release.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tasks/release.py b/tasks/release.py index 4132df03ec59d..8668e9f3a8b50 100644 --- a/tasks/release.py +++ b/tasks/release.py @@ -598,6 +598,7 @@ def create_release_branches(ctx, base_directory="~/dd", major_versions="6,7", up current = current_version(ctx, max(list_major_versions)) next = current.next_version(bump_minor=True) current.rc = False + current.devel = False next.devel = False # Strings with proper branch/tag names From e9cee3d3b15dc05282449539fcc8e9c91c4e04e1 Mon Sep 17 00:00:00 2001 From: Guy Arbitman Date: Mon, 9 Sep 2024 15:21:09 +0300 Subject: [PATCH 082/128] Revert "[USMON-417] gotls: Restore support on fedora (#27437)" (#29128) --- .../ebpf/c/protocols/tls/go-tls-maps.h | 4 - pkg/network/ebpf/c/protocols/tls/https.h | 46 +++++++++--- .../protocols/tls/gotls/testutil/helpers.go | 27 ++++++- pkg/network/usm/ebpf_gotls.go | 73 +++++-------------- pkg/network/usm/kafka_monitor_test.go | 6 +- pkg/network/usm/monitor_tls_test.go | 12 +-- pkg/network/usm/postgres_monitor_test.go | 2 +- .../usm/tests/tracer_usm_linux_test.go | 4 +- pkg/network/usm/usm_grpc_monitor_test.go | 2 +- pkg/network/usm/usm_http2_monitor_test.go | 2 +- 10 files changed, 95 insertions(+), 83 deletions(-) diff --git a/pkg/network/ebpf/c/protocols/tls/go-tls-maps.h b/pkg/network/ebpf/c/protocols/tls/go-tls-maps.h index 58f3ce3ac771f..1c66dee1d0abc 100644 --- a/pkg/network/ebpf/c/protocols/tls/go-tls-maps.h +++ b/pkg/network/ebpf/c/protocols/tls/go-tls-maps.h @@ -9,10 +9,6 @@ // offsets_data map contains the information about the locations of structs in the inspected binary, mapped by the binary's inode number. BPF_HASH_MAP(offsets_data, go_tls_offsets_data_key_t, tls_offsets_data_t, 1024) -// Maps PID to the - tuple, that is used to find the offsets_data map for the binary. -// Size is a 10 times the size of the offsets_data map, to have enough space for all the binaries. -BPF_HASH_MAP(pid_to_device_inode, u32, go_tls_offsets_data_key_t, 10240) - /* go_tls_read_args is used to get the read function info when running in the read-return uprobe. The key contains the go routine id and the pid. */ BPF_LRU_MAP(go_tls_read_args, go_tls_function_args_key_t, go_tls_read_args_data_t, 2048) diff --git a/pkg/network/ebpf/c/protocols/tls/https.h b/pkg/network/ebpf/c/protocols/tls/https.h index ccb9550f3602c..db485ab14403c 100644 --- a/pkg/network/ebpf/c/protocols/tls/https.h +++ b/pkg/network/ebpf/c/protocols/tls/https.h @@ -3,6 +3,10 @@ #ifdef COMPILE_CORE #include "ktypes.h" +#define MINORBITS 20 +#define MINORMASK ((1U << MINORBITS) - 1) +#define MAJOR(dev) ((unsigned int) ((dev) >> MINORBITS)) +#define MINOR(dev) ((unsigned int) ((dev) & MINORMASK)) #else #include #include @@ -283,19 +287,41 @@ static __always_inline void map_ssl_ctx_to_sock(struct sock *skp) { bpf_map_update_with_telemetry(ssl_sock_by_ctx, &ssl_ctx, &ssl_sock, BPF_ANY); } - -// Retrieves the result of binary analysis for the current task binary's inode number. -// For the current PID, we retrieve the inode number of the binary and then we look up the binary's analysis result. +/** + * get_offsets_data retrieves the result of binary analysis for the + * current task binary's inode number. + */ static __always_inline tls_offsets_data_t* get_offsets_data() { - u64 pid_tgid = bpf_get_current_pid_tgid(); - u32 pid = pid_tgid >> 32; - go_tls_offsets_data_key_t *key = bpf_map_lookup_elem(&pid_to_device_inode, &pid); - if (key == NULL) { - log_debug("get_offsets_data: could not find key for pid %u", pid); + struct task_struct *t = (struct task_struct *) bpf_get_current_task(); + struct inode *inode; + go_tls_offsets_data_key_t key; + dev_t dev_id; + + inode = BPF_CORE_READ(t, mm, exe_file, f_inode); + if (!inode) { + log_debug("get_offsets_data: could not read f_inode field"); + return NULL; + } + + int err; + err = BPF_CORE_READ_INTO(&key.ino, inode, i_ino); + if (err) { + log_debug("get_offsets_data: could not read i_ino field"); return NULL; } - go_tls_offsets_data_key_t key_copy = *key; - return bpf_map_lookup_elem(&offsets_data, &key_copy); + + err = BPF_CORE_READ_INTO(&dev_id, inode, i_sb, s_dev); + if (err) { + log_debug("get_offsets_data: could not read s_dev field"); + return NULL; + } + + key.device_id_major = MAJOR(dev_id); + key.device_id_minor = MINOR(dev_id); + + log_debug("get_offsets_data: task binary inode number: %llu; device ID %x:%x", key.ino, key.device_id_major, key.device_id_minor); + + return bpf_map_lookup_elem(&offsets_data, &key); } #endif diff --git a/pkg/network/protocols/tls/gotls/testutil/helpers.go b/pkg/network/protocols/tls/gotls/testutil/helpers.go index fcf418ae505a9..2315779aa184a 100644 --- a/pkg/network/protocols/tls/gotls/testutil/helpers.go +++ b/pkg/network/protocols/tls/gotls/testutil/helpers.go @@ -8,11 +8,34 @@ package testutil import ( + "slices" + "testing" + + "github.com/stretchr/testify/require" + "github.com/DataDog/datadog-agent/pkg/network/config" usmconfig "github.com/DataDog/datadog-agent/pkg/network/usm/config" + "github.com/DataDog/datadog-agent/pkg/util/kernel" +) + +const ( + fedoraPlatform = "fedora" ) +var fedoraUnsupportedVersions = []string{"35", "36", "37", "38"} + +// isFedora returns true if the current OS is Fedora. +// go-tls does not work correctly on Fedora 35, 36, 37 and 38. +func isFedora(t *testing.T) bool { + platform, err := kernel.Platform() + require.NoError(t, err) + platformVersion, err := kernel.PlatformVersion() + require.NoError(t, err) + + return platform == fedoraPlatform && slices.Contains(fedoraUnsupportedVersions, platformVersion) +} + // GoTLSSupported returns true if GO-TLS monitoring is supported on the current OS. -func GoTLSSupported(cfg *config.Config) bool { - return usmconfig.TLSSupported(cfg) && (cfg.EnableRuntimeCompiler || cfg.EnableCORE) +func GoTLSSupported(t *testing.T, cfg *config.Config) bool { + return usmconfig.TLSSupported(cfg) && (cfg.EnableRuntimeCompiler || cfg.EnableCORE) && !isFedora(t) } diff --git a/pkg/network/usm/ebpf_gotls.go b/pkg/network/usm/ebpf_gotls.go index b7e75f7c3c964..e0090e3ba889c 100644 --- a/pkg/network/usm/ebpf_gotls.go +++ b/pkg/network/usm/ebpf_gotls.go @@ -42,7 +42,6 @@ import ( const ( offsetsDataMap = "offsets_data" - pidToDeviceInodeMap = "pid_to_device_inode" goTLSReadArgsMap = "go_tls_read_args" goTLSWriteArgsMap = "go_tls_write_args" connectionTupleByGoTLSMap = "conn_tup_by_go_tls_conn" @@ -116,11 +115,6 @@ type goTLSProgram struct { // inodes. offsetsDataMap *ebpf.Map - // eBPF map holding the mapping of PIDs to device/inode numbers. - // On some filesystems (like btrfs), the device-id in the task-struct can be different from the device-id extracted - // in the user-mode. This map is used to ensure the eBPF probes are getting the correct device/inode numbers. - pidToDeviceInodeMap *ebpf.Map - // binAnalysisMetric handles telemetry on the time spent doing binary // analysis binAnalysisMetric *libtelemetry.Counter @@ -137,7 +131,6 @@ var _ utils.Attacher = &goTLSProgram{} var goTLSSpec = &protocols.ProtocolSpec{ Maps: []*manager.Map{ {Name: offsetsDataMap}, - {Name: pidToDeviceInodeMap}, {Name: goTLSReadArgsMap}, {Name: goTLSWriteArgsMap}, {Name: connectionTupleByGoTLSMap}, @@ -223,10 +216,6 @@ func (p *goTLSProgram) PreStart(m *manager.Manager) error { if err != nil { return fmt.Errorf("could not get offsets_data map: %s", err) } - p.pidToDeviceInodeMap, _, err = m.GetMap(pidToDeviceInodeMap) - if err != nil { - return fmt.Errorf("could not get %s map: %s", pidToDeviceInodeMap, err) - } procMonitor := monitor.GetProcessMonitor() cleanupExec := procMonitor.SubscribeExec(p.handleProcessStart) @@ -253,7 +242,7 @@ func (p *goTLSProgram) PreStart(m *manager.Manager) error { processSet := p.registry.GetRegisteredProcesses() deletedPids := monitor.FindDeletedProcesses(processSet) for deletedPid := range deletedPids { - _ = p.DetachPID(deletedPid) + _ = p.registry.Unregister(deletedPid) } } } @@ -289,7 +278,6 @@ var ( // DetachPID detaches the provided PID from the eBPF program. func (p *goTLSProgram) DetachPID(pid uint32) error { - _ = p.pidToDeviceInodeMap.Delete(unsafe.Pointer(&pid)) return p.registry.Unregister(pid) } @@ -350,13 +338,12 @@ func (p *goTLSProgram) AttachPID(pid uint32) error { // Check go process probeList := make([]manager.ProbeIdentificationPair, 0) - return p.registry.Register(binPath, pid, - registerCBCreator(p.manager, p.offsetsDataMap, p.pidToDeviceInodeMap, &probeList, p.binAnalysisMetric, p.binNoSymbolsMetric), - unregisterCBCreator(p.manager, &probeList, p.offsetsDataMap, p.pidToDeviceInodeMap), - alreadyCBCreator(p.pidToDeviceInodeMap)) + return p.registry.Register(binPath, pid, registerCBCreator(p.manager, p.offsetsDataMap, &probeList, p.binAnalysisMetric, p.binNoSymbolsMetric), + unregisterCBCreator(p.manager, &probeList, p.offsetsDataMap), + utils.IgnoreCB) } -func registerCBCreator(mgr *manager.Manager, offsetsDataMap, pidToDeviceInodeMap *ebpf.Map, probeIDs *[]manager.ProbeIdentificationPair, binAnalysisMetric, binNoSymbolsMetric *libtelemetry.Counter) func(path utils.FilePath) error { +func registerCBCreator(mgr *manager.Manager, offsetsDataMap *ebpf.Map, probeIDs *[]manager.ProbeIdentificationPair, binAnalysisMetric, binNoSymbolsMetric *libtelemetry.Counter) func(path utils.FilePath) error { return func(filePath utils.FilePath) error { start := time.Now() @@ -379,13 +366,13 @@ func registerCBCreator(mgr *manager.Manager, offsetsDataMap, pidToDeviceInodeMap return fmt.Errorf("error extracting inspectoin data from %s: %w", filePath.HostPath, err) } - if err := addInspectionResultToMap(offsetsDataMap, pidToDeviceInodeMap, filePath, inspectionResult); err != nil { + if err := addInspectionResultToMap(offsetsDataMap, filePath.ID, inspectionResult); err != nil { return fmt.Errorf("failed adding inspection rules: %w", err) } pIDs, err := attachHooks(mgr, inspectionResult, filePath.HostPath, filePath.ID) if err != nil { - removeInspectionResultFromMap(offsetsDataMap, pidToDeviceInodeMap, filePath) + removeInspectionResultFromMap(offsetsDataMap, filePath.ID) return fmt.Errorf("error while attaching hooks to %s: %w", filePath.HostPath, err) } *probeIDs = pIDs @@ -398,21 +385,6 @@ func registerCBCreator(mgr *manager.Manager, offsetsDataMap, pidToDeviceInodeMap } } -// alreadyCBCreator handles the case where a binary is already registered. In such a case the registry callback won't -// be called, so we need to add a mapping from the PID to the device/inode of the binary. -func alreadyCBCreator(pidToDeviceInodeMap *ebpf.Map) func(utils.FilePath) error { - return func(filePath utils.FilePath) error { - if filePath.PID == 0 { - return nil - } - return pidToDeviceInodeMap.Put(unsafe.Pointer(&filePath.PID), unsafe.Pointer(&gotls.TlsBinaryId{ - Id_major: unix.Major(filePath.ID.Dev), - Id_minor: unix.Minor(filePath.ID.Dev), - Ino: filePath.ID.Inode, - })) - } -} - func (p *goTLSProgram) handleProcessExit(pid pid) { _ = p.DetachPID(pid) } @@ -423,39 +395,32 @@ func (p *goTLSProgram) handleProcessStart(pid pid) { // addInspectionResultToMap runs a binary inspection and adds the result to the // map that's being read by the probes, indexed by the binary's inode number `ino`. -func addInspectionResultToMap(offsetsDataMap, pidToDeviceInodeMap *ebpf.Map, filePath utils.FilePath, result *bininspect.Result) error { +func addInspectionResultToMap(offsetsDataMap *ebpf.Map, binID utils.PathIdentifier, result *bininspect.Result) error { offsetsData, err := inspectionResultToProbeData(result) if err != nil { return fmt.Errorf("error while parsing inspection result: %w", err) } key := &gotls.TlsBinaryId{ - Id_major: unix.Major(filePath.ID.Dev), - Id_minor: unix.Minor(filePath.ID.Dev), - Ino: filePath.ID.Inode, + Id_major: unix.Major(binID.Dev), + Id_minor: unix.Minor(binID.Dev), + Ino: binID.Inode, } if err := offsetsDataMap.Put(unsafe.Pointer(key), unsafe.Pointer(&offsetsData)); err != nil { - return fmt.Errorf("could not write binary inspection result to map for binID %v (pid %v): %w", filePath.ID, filePath.PID, err) + return fmt.Errorf("could not write binary inspection result to map for binID %v: %w", binID, err) } - if err := pidToDeviceInodeMap.Put(unsafe.Pointer(&filePath.PID), unsafe.Pointer(key)); err != nil { - return fmt.Errorf("could not write pid to device/inode (%s) map for pid %v: %w", filePath.ID.String(), filePath.PID, err) - } return nil } -func removeInspectionResultFromMap(offsetsDataMap, pidToDeviceInodeMap *ebpf.Map, filePath utils.FilePath) { +func removeInspectionResultFromMap(offsetsDataMap *ebpf.Map, binID utils.PathIdentifier) { key := &gotls.TlsBinaryId{ - Id_major: unix.Major(filePath.ID.Dev), - Id_minor: unix.Minor(filePath.ID.Dev), - Ino: filePath.ID.Inode, - } - if filePath.PID != 0 { - _ = pidToDeviceInodeMap.Delete(unsafe.Pointer(&filePath.PID)) + Id_major: unix.Major(binID.Dev), + Id_minor: unix.Minor(binID.Dev), + Ino: binID.Inode, } if err := offsetsDataMap.Delete(unsafe.Pointer(key)); err != nil { - log.Errorf("could not remove inspection result from map for ino %v: %s", filePath.ID, err) - return + log.Errorf("could not remove inspection result from map for ino %v: %s", binID, err) } } @@ -510,12 +475,12 @@ func attachHooks(mgr *manager.Manager, result *bininspect.Result, binPath string return probeIDs, nil } -func unregisterCBCreator(mgr *manager.Manager, probeIDs *[]manager.ProbeIdentificationPair, offsetsDataMap, pidToDeviceInodeMap *ebpf.Map) func(path utils.FilePath) error { +func unregisterCBCreator(mgr *manager.Manager, probeIDs *[]manager.ProbeIdentificationPair, offsetsDataMap *ebpf.Map) func(path utils.FilePath) error { return func(path utils.FilePath) error { if len(*probeIDs) == 0 { return nil } - removeInspectionResultFromMap(offsetsDataMap, pidToDeviceInodeMap, path) + removeInspectionResultFromMap(offsetsDataMap, path.ID) for _, probeID := range *probeIDs { err := mgr.DetachHook(probeID) if err != nil { diff --git a/pkg/network/usm/kafka_monitor_test.go b/pkg/network/usm/kafka_monitor_test.go index c5ac048be3a80..bbcedaecb2be6 100644 --- a/pkg/network/usm/kafka_monitor_test.go +++ b/pkg/network/usm/kafka_monitor_test.go @@ -148,7 +148,7 @@ func (s *KafkaProtocolParsingSuite) TestKafkaProtocolParsing() { for mode, name := range map[bool]string{false: "without TLS", true: "with TLS"} { t.Run(name, func(t *testing.T) { - if mode && !gotlsutils.GoTLSSupported(config.New()) { + if mode && !gotlsutils.GoTLSSupported(t, config.New()) { t.Skip("GoTLS not supported for this setup") } for _, version := range versions { @@ -1244,7 +1244,7 @@ func (s *KafkaProtocolParsingSuite) TestKafkaFetchRaw() { }) t.Run("with TLS", func(t *testing.T) { - if !gotlsutils.GoTLSSupported(config.New()) { + if !gotlsutils.GoTLSSupported(t, config.New()) { t.Skip("GoTLS not supported for this setup") } @@ -1470,7 +1470,7 @@ func (s *KafkaProtocolParsingSuite) TestKafkaProduceRaw() { }) t.Run("with TLS", func(t *testing.T) { - if !gotlsutils.GoTLSSupported(config.New()) { + if !gotlsutils.GoTLSSupported(t, config.New()) { t.Skip("GoTLS not supported for this setup") } diff --git a/pkg/network/usm/monitor_tls_test.go b/pkg/network/usm/monitor_tls_test.go index b9df53c92dd16..c706b5fe874d4 100644 --- a/pkg/network/usm/monitor_tls_test.go +++ b/pkg/network/usm/monitor_tls_test.go @@ -548,9 +548,11 @@ func (s *tlsSuite) TestJavaInjection() { } func TestHTTPGoTLSAttachProbes(t *testing.T) { + t.Skip("skipping GoTLS tests while we investigate their flakiness") + modes := []ebpftest.BuildMode{ebpftest.RuntimeCompiled, ebpftest.CORE} ebpftest.TestBuildModes(t, modes, "", func(t *testing.T) { - if !gotlstestutil.GoTLSSupported(config.New()) { + if !gotlstestutil.GoTLSSupported(t, config.New()) { t.Skip("GoTLS not supported for this setup") } @@ -569,7 +571,7 @@ func testHTTP2GoTLSAttachProbes(t *testing.T, cfg *config.Config) { if !http2.Supported() { t.Skip("HTTP2 not supported for this setup") } - if !gotlstestutil.GoTLSSupported(cfg) { + if !gotlstestutil.GoTLSSupported(t, cfg) { t.Skip("GoTLS not supported for this setup") } @@ -601,7 +603,7 @@ func TestHTTPSGoTLSAttachProbesOnContainer(t *testing.T) { t.Skip("Skipping a flaky test") modes := []ebpftest.BuildMode{ebpftest.RuntimeCompiled, ebpftest.CORE} ebpftest.TestBuildModes(t, modes, "", func(t *testing.T) { - if !gotlstestutil.GoTLSSupported(config.New()) { + if !gotlstestutil.GoTLSSupported(t, config.New()) { t.Skip("GoTLS not supported for this setup") } @@ -619,7 +621,7 @@ func TestOldConnectionRegression(t *testing.T) { modes := []ebpftest.BuildMode{ebpftest.RuntimeCompiled, ebpftest.CORE} ebpftest.TestBuildModes(t, modes, "", func(t *testing.T) { - if !gotlstestutil.GoTLSSupported(config.New()) { + if !gotlstestutil.GoTLSSupported(t, config.New()) { t.Skip("GoTLS not supported for this setup") } @@ -694,7 +696,7 @@ func TestOldConnectionRegression(t *testing.T) { func TestLimitListenerRegression(t *testing.T) { modes := []ebpftest.BuildMode{ebpftest.RuntimeCompiled, ebpftest.CORE} ebpftest.TestBuildModes(t, modes, "", func(t *testing.T) { - if !gotlstestutil.GoTLSSupported(config.New()) { + if !gotlstestutil.GoTLSSupported(t, config.New()) { t.Skip("GoTLS not supported for this setup") } diff --git a/pkg/network/usm/postgres_monitor_test.go b/pkg/network/usm/postgres_monitor_test.go index 47a891cd4040b..a74ef203f3721 100644 --- a/pkg/network/usm/postgres_monitor_test.go +++ b/pkg/network/usm/postgres_monitor_test.go @@ -137,7 +137,7 @@ func (s *postgresProtocolParsingSuite) TestDecoding() { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if tt.isTLS && !gotlstestutil.GoTLSSupported(config.New()) { + if tt.isTLS && !gotlstestutil.GoTLSSupported(t, config.New()) { t.Skip("GoTLS not supported for this setup") } testDecoding(t, tt.isTLS) diff --git a/pkg/network/usm/tests/tracer_usm_linux_test.go b/pkg/network/usm/tests/tracer_usm_linux_test.go index 9399e6b1cb210..2da79eadbf80a 100644 --- a/pkg/network/usm/tests/tracer_usm_linux_test.go +++ b/pkg/network/usm/tests/tracer_usm_linux_test.go @@ -118,7 +118,7 @@ func skipIfUsingNAT(t *testing.T, ctx testContext) { // skipIfGoTLSNotSupported skips the test if GoTLS is not supported. func skipIfGoTLSNotSupported(t *testing.T, _ testContext) { - if !gotlstestutil.GoTLSSupported(config.New()) { + if !gotlstestutil.GoTLSSupported(t, config.New()) { t.Skip("GoTLS is not supported") } } @@ -183,7 +183,7 @@ func (s *USMSuite) TestProtocolClassification() { cfg.EnableNativeTLSMonitoring = true cfg.EnableHTTPMonitoring = true cfg.EnablePostgresMonitoring = true - cfg.EnableGoTLSSupport = gotlstestutil.GoTLSSupported(cfg) + cfg.EnableGoTLSSupport = gotlstestutil.GoTLSSupported(t, cfg) cfg.BypassEnabled = true tr, err := tracer.NewTracer(cfg, nil) require.NoError(t, err) diff --git a/pkg/network/usm/usm_grpc_monitor_test.go b/pkg/network/usm/usm_grpc_monitor_test.go index 73e3a5de28f2e..35ae34a9d46d4 100644 --- a/pkg/network/usm/usm_grpc_monitor_test.go +++ b/pkg/network/usm/usm_grpc_monitor_test.go @@ -71,7 +71,7 @@ func TestGRPCScenarios(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - if tc.isTLS && !gotlsutils.GoTLSSupported(config.New()) { + if tc.isTLS && !gotlsutils.GoTLSSupported(t, config.New()) { t.Skip("GoTLS not supported for this setup") } suite.Run(t, &usmGRPCSuite{isTLS: tc.isTLS}) diff --git a/pkg/network/usm/usm_http2_monitor_test.go b/pkg/network/usm/usm_http2_monitor_test.go index 825c842fe68cd..46c3188b09833 100644 --- a/pkg/network/usm/usm_http2_monitor_test.go +++ b/pkg/network/usm/usm_http2_monitor_test.go @@ -108,7 +108,7 @@ func TestHTTP2Scenarios(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - if tc.isTLS && !gotlsutils.GoTLSSupported(config.New()) { + if tc.isTLS && !gotlsutils.GoTLSSupported(t, config.New()) { t.Skip("GoTLS not supported for this setup") } suite.Run(t, &usmHTTP2Suite{isTLS: tc.isTLS}) From ba54a8d16c6cd32e2918fd6f37359bb5c5b57983 Mon Sep 17 00:00:00 2001 From: Branden Clark Date: Mon, 9 Sep 2024 10:23:25 -0400 Subject: [PATCH 083/128] add fetch-driver-msm invoke task (#28940) --- tasks/libs/common/utils.py | 36 +++++++++++++--- tasks/msi.py | 87 +++++++++++++++++++++++++++++++++++++- 2 files changed, 117 insertions(+), 6 deletions(-) diff --git a/tasks/libs/common/utils.py b/tasks/libs/common/utils.py index f2de7730ed88d..8b2e04244bb52 100644 --- a/tasks/libs/common/utils.py +++ b/tasks/libs/common/utils.py @@ -8,6 +8,7 @@ import platform import re import sys +import tempfile import time import traceback from collections import Counter @@ -18,15 +19,12 @@ from subprocess import CalledProcessError, check_output from types import SimpleNamespace +import requests from invoke.context import Context from invoke.exceptions import Exit from tasks.libs.common.color import Color, color_message -from tasks.libs.common.constants import ( - ALLOWED_REPO_ALL_BRANCHES, - DEFAULT_BRANCH, - REPO_PATH, -) +from tasks.libs.common.constants import ALLOWED_REPO_ALL_BRANCHES, DEFAULT_BRANCH, REPO_PATH from tasks.libs.common.git import get_commit_sha from tasks.libs.owners.parsing import search_owners from tasks.libs.releasing.version import get_version @@ -699,3 +697,31 @@ def team_to_label(team): 'asm-go': "agent-security", } return dico.get(team, team) + + +@contextmanager +def download_to_tempfile(url, checksum=None): + """ + Download a file from @url to a temporary file and yields the path. + + The temporary file is removed when the context manager exits. + + if @checksum is provided it will be updated with each chunk of the file + """ + fd, tmp_path = tempfile.mkstemp() + try: + with requests.get(url, stream=True) as r: + r.raise_for_status() + with os.fdopen(fd, "wb") as f: + # fd will be closed by context manager, so we no longer need it + fd = None + for chunk in r.iter_content(chunk_size=8192): + if checksum: + checksum.update(chunk) + f.write(chunk) + yield tmp_path + finally: + if fd is not None: + os.close(fd) + if os.path.exists(tmp_path): + os.remove(tmp_path) diff --git a/tasks/msi.py b/tasks/msi.py index 5574f67debb98..ea838f27817c5 100644 --- a/tasks/msi.py +++ b/tasks/msi.py @@ -2,6 +2,7 @@ msi namespaced tasks """ +import hashlib import mmap import os import shutil @@ -11,7 +12,7 @@ from invoke import task from invoke.exceptions import Exit, UnexpectedExit -from tasks.libs.common.utils import timed +from tasks.libs.common.utils import download_to_tempfile, timed from tasks.libs.releasing.version import get_version, load_release_versions # Windows only import @@ -29,6 +30,8 @@ BUILD_ROOT_DIR = os.path.join('C:\\', "dev", "msi", "DatadogAgentInstaller") BUILD_SOURCE_DIR = os.path.join(BUILD_ROOT_DIR, "src") BUILD_OUTPUT_DIR = os.path.join(BUILD_ROOT_DIR, "output") +# Match to AgentInstaller.cs BinSource +AGENT_BIN_SOURCE_DIR = os.path.join('C:\\', 'opt', 'datadog-agent', 'bin', 'agent') NUGET_PACKAGES_DIR = os.path.join(BUILD_ROOT_DIR, 'packages') NUGET_CONFIG_FILE = os.path.join(BUILD_ROOT_DIR, 'NuGet.config') @@ -433,3 +436,85 @@ def MsiClosing(obj): yield obj finally: obj.Close() + + +def get_msm_info(ctx, release_version): + """ + Get the merge module info from the release.json for the given release_version + """ + env = load_release_versions(ctx, release_version) + base_url = "https://s3.amazonaws.com/dd-windowsfilter/builds" + msm_info = {} + if 'WINDOWS_DDNPM_VERSION' in env: + info = { + 'filename': 'DDNPM.msm', + 'build': env['WINDOWS_DDNPM_DRIVER'], + 'version': env['WINDOWS_DDNPM_VERSION'], + 'shasum': env['WINDOWS_DDNPM_SHASUM'], + } + info['url'] = f"{base_url}/{info['build']}/ddnpminstall-{info['version']}.msm" + msm_info['DDNPM'] = info + if 'WINDOWS_DDPROCMON_VERSION' in env: + info = { + 'filename': 'DDPROCMON.msm', + 'build': env['WINDOWS_DDPROCMON_DRIVER'], + 'version': env['WINDOWS_DDPROCMON_VERSION'], + 'shasum': env['WINDOWS_DDPROCMON_SHASUM'], + } + info['url'] = f"{base_url}/{info['build']}/ddprocmoninstall-{info['version']}.msm" + msm_info['DDPROCMON'] = info + if 'WINDOWS_APMINJECT_VERSION' in env: + info = { + 'filename': 'ddapminstall.msm', + 'build': env['WINDOWS_APMINJECT_MODULE'], + 'version': env['WINDOWS_APMINJECT_VERSION'], + 'shasum': env['WINDOWS_APMINJECT_SHASUM'], + } + info['url'] = f"{base_url}/{info['build']}/ddapminstall-{info['version']}.msm" + msm_info['APMINJECT'] = info + return msm_info + + +@task( + iterable=['drivers'], + help={ + 'drivers': 'List of drivers to fetch (default: DDNPM, DDPROCMON, APMINJECT)', + 'release_version': 'Release version to fetch drivers from (default: nightly-a7)', + }, +) +def fetch_driver_msm(ctx, drivers=None, release_version=None): + """ + Fetch the driver merge modules (.msm) that are consumed by the Agent MSI. + + Defaults to the versions provided in the @release_version section of release.json + """ + ALLOWED_DRIVERS = ['DDNPM', 'DDPROCMON', 'APMINJECT'] + if not release_version: + release_version = 'nightly-a7' + + msm_info = get_msm_info(ctx, release_version) + if not drivers: + # if user did not specify drivers, use the ones in the release.json + drivers = msm_info.keys() + + for driver in drivers: + driver = driver.upper() + if driver not in ALLOWED_DRIVERS: + raise Exit(f"Invalid driver: {driver}, choose from {ALLOWED_DRIVERS}") + + info = msm_info[driver] + url = info['url'] + shasum = info['shasum'] + path = os.path.join(AGENT_BIN_SOURCE_DIR, info['filename']) + + # download from url with requests package + checksum = hashlib.sha256() + with download_to_tempfile(url, checksum) as tmp_path: + # check sha256 + if checksum.hexdigest().lower() != shasum.lower(): + raise Exit(f"Checksum mismatch for {url}") + # move to final path + shutil.move(tmp_path, path) + + print(f"Updated {driver}") + print(f"\t-> Downloaded {url} to {path}") From e6c04b1c967beb68a2110cbb6aecd4b1ee154ef9 Mon Sep 17 00:00:00 2001 From: Daniel Lavie Date: Mon, 9 Sep 2024 18:29:55 +0300 Subject: [PATCH 084/128] USMON-1152: Remove Kafka monitor tests optimization (#29143) --- pkg/network/usm/kafka_monitor_test.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/pkg/network/usm/kafka_monitor_test.go b/pkg/network/usm/kafka_monitor_test.go index bbcedaecb2be6..1c3c5e12702ca 100644 --- a/pkg/network/usm/kafka_monitor_test.go +++ b/pkg/network/usm/kafka_monitor_test.go @@ -529,19 +529,17 @@ func (s *KafkaProtocolParsingSuite) testKafkaProtocolParsing(t *testing.T, tls b require.NoError(t, proxy.WaitForConnectionReady(unixPath)) cfg := getDefaultTestConfiguration(tls) - monitor := newKafkaMonitor(t, cfg) - if tls && cfg.EnableGoTLSSupport { - utils.WaitForProgramsToBeTraced(t, "go-tls", proxyProcess.Process.Pid, utils.ManualTracingFallbackEnabled) - } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { t.Cleanup(func() { for _, client := range tt.context.clients { client.Client.Close() } - cleanProtocolMaps(t, "kafka", monitor.ebpfProgram.Manager.Manager) }) - + monitor := newKafkaMonitor(t, cfg) + if tls && cfg.EnableGoTLSSupport { + utils.WaitForProgramsToBeTraced(t, "go-tls", proxyProcess.Process.Pid, utils.ManualTracingFallbackEnabled) + } tt.testBody(t, &tt.context, monitor) }) } From 0dea3e9fd5871f9e399503d5b1b7ed344f84c5a0 Mon Sep 17 00:00:00 2001 From: shreyamalpani Date: Mon, 9 Sep 2024 12:02:41 -0400 Subject: [PATCH 085/128] [SVLS-5257] Add lambda file descriptor enhanced metrics (#29026) --- pkg/serverless/metrics/enhanced_metrics.go | 76 +++++++++++++++ .../metrics/enhanced_metrics_test.go | 58 +++++++++++ pkg/serverless/proc/proc.go | 85 ++++++++++++++++ pkg/serverless/proc/proc_test.go | 29 ++++++ .../invalid_malformed/31/limits | 17 ++++ .../invalid_malformed/9/limits | 17 ++++ .../file-descriptor/invalid_missing/31/limits | 16 ++++ .../file-descriptor/invalid_missing/9/limits | 16 ++++ .../testData/file-descriptor/valid/31/fd/1 | 0 .../testData/file-descriptor/valid/31/fd/2 | 0 .../testData/file-descriptor/valid/31/limits | 17 ++++ .../testData/file-descriptor/valid/9/fd/1 | 0 .../testData/file-descriptor/valid/9/fd/2 | 0 .../testData/file-descriptor/valid/9/fd/3 | 0 .../testData/file-descriptor/valid/9/limits | 17 ++++ pkg/serverless/serverless.go | 7 +- .../serverless/snapshots/error-csharp | 96 +++++++++++++++++++ .../serverless/snapshots/error-java | 96 +++++++++++++++++++ .../serverless/snapshots/error-node | 96 +++++++++++++++++++ .../serverless/snapshots/error-proxy | 96 +++++++++++++++++++ .../serverless/snapshots/error-python | 96 +++++++++++++++++++ .../serverless/snapshots/metric-csharp | 96 +++++++++++++++++++ .../serverless/snapshots/metric-go | 96 +++++++++++++++++++ .../serverless/snapshots/metric-java | 96 +++++++++++++++++++ .../serverless/snapshots/metric-node | 96 +++++++++++++++++++ .../serverless/snapshots/metric-proxy | 96 +++++++++++++++++++ .../serverless/snapshots/metric-python | 96 +++++++++++++++++++ 27 files changed, 1409 insertions(+), 2 deletions(-) create mode 100644 pkg/serverless/proc/testData/file-descriptor/invalid_malformed/31/limits create mode 100644 pkg/serverless/proc/testData/file-descriptor/invalid_malformed/9/limits create mode 100644 pkg/serverless/proc/testData/file-descriptor/invalid_missing/31/limits create mode 100644 pkg/serverless/proc/testData/file-descriptor/invalid_missing/9/limits create mode 100644 pkg/serverless/proc/testData/file-descriptor/valid/31/fd/1 create mode 100644 pkg/serverless/proc/testData/file-descriptor/valid/31/fd/2 create mode 100644 pkg/serverless/proc/testData/file-descriptor/valid/31/limits create mode 100644 pkg/serverless/proc/testData/file-descriptor/valid/9/fd/1 create mode 100644 pkg/serverless/proc/testData/file-descriptor/valid/9/fd/2 create mode 100644 pkg/serverless/proc/testData/file-descriptor/valid/9/fd/3 create mode 100644 pkg/serverless/proc/testData/file-descriptor/valid/9/limits diff --git a/pkg/serverless/metrics/enhanced_metrics.go b/pkg/serverless/metrics/enhanced_metrics.go index c7dd8e62bcd22..3d81c71fce4a2 100644 --- a/pkg/serverless/metrics/enhanced_metrics.go +++ b/pkg/serverless/metrics/enhanced_metrics.go @@ -61,6 +61,8 @@ const ( totalNetworkMetric = "aws.lambda.enhanced.total_network" tmpUsedMetric = "aws.lambda.enhanced.tmp_used" tmpMaxMetric = "aws.lambda.enhanced.tmp_max" + fdMaxMetric = "aws.lambda.enhanced.fd_max" + fdUseMetric = "aws.lambda.enhanced.fd_use" enhancedMetricsEnvVar = "DD_ENHANCED_METRICS" // Bottlecap @@ -564,6 +566,80 @@ func SendTmpEnhancedMetrics(sendMetrics chan bool, tags []string, metricAgent *S } +type generateFdEnhancedMetricsArgs struct { + FdMax float64 + FdUse float64 + Tags []string + Demux aggregator.Demultiplexer + Time float64 +} + +// generateFdEnhancedMetrics generates enhanced metrics for the maximum number of file descriptors available and in use +func generateFdEnhancedMetrics(args generateFdEnhancedMetricsArgs) { + args.Demux.AggregateSample(metrics.MetricSample{ + Name: fdMaxMetric, + Value: args.FdMax, + Mtype: metrics.DistributionType, + Tags: args.Tags, + SampleRate: 1, + Timestamp: args.Time, + }) + args.Demux.AggregateSample(metrics.MetricSample{ + Name: fdUseMetric, + Value: args.FdUse, + Mtype: metrics.DistributionType, + Tags: args.Tags, + SampleRate: 1, + Timestamp: args.Time, + }) +} + +func SendFdEnhancedMetrics(sendMetrics chan bool, tags []string, metricAgent *ServerlessMetricAgent) { + if enhancedMetricsDisabled { + return + } + + fdMaxData, err := proc.GetFileDescriptorMaxData() + if err != nil { + log.Debug("Could not emit file descriptor enhanced metrics. %v", err) + return + } + + fdUseData, err := proc.GetFileDescriptorUseData() + if err != nil { + log.Debugf("Could not emit file descriptor enhanced metrics. %v", err) + return + } + + fdMax := fdMaxData.MaximumFileHandles + fdUse := fdUseData.UseFileHandles + + ticker := time.NewTicker(1 * time.Millisecond) + defer ticker.Stop() + for { + select { + case _, open := <-sendMetrics: + if !open { + generateFdEnhancedMetrics(generateFdEnhancedMetricsArgs{ + FdMax: fdMax, + FdUse: fdUse, + Tags: tags, + Demux: metricAgent.Demux, + Time: float64(time.Now().UnixNano()) / float64(time.Second), + }) + return + } + case <-ticker.C: + fdUseData, err := proc.GetFileDescriptorUseData() + if err != nil { + log.Debugf("Could not emit file descriptor enhanced metrics. %v", err) + return + } + fdUse = math.Max(fdUse, fdUseData.UseFileHandles) + } + } +} + // incrementEnhancedMetric sends an enhanced metric with a value of 1 to the metrics channel func incrementEnhancedMetric(name string, tags []string, timestamp float64, demux aggregator.Demultiplexer, force bool) { // TODO - pass config here, instead of directly looking up var diff --git a/pkg/serverless/metrics/enhanced_metrics_test.go b/pkg/serverless/metrics/enhanced_metrics_test.go index 6281b1326180f..1c274229ba145 100644 --- a/pkg/serverless/metrics/enhanced_metrics_test.go +++ b/pkg/serverless/metrics/enhanced_metrics_test.go @@ -730,6 +730,64 @@ func TestSendTmpEnhancedMetricsDisabled(t *testing.T) { enhancedMetricsDisabled = false } +func TestSendFdEnhancedMetrics(t *testing.T) { + demux := createDemultiplexer(t) + tags := []string{"functionname:test-function"} + now := float64(time.Now().UnixNano()) / float64(time.Second) + args := generateFdEnhancedMetricsArgs{ + FdMax: 1024, + FdUse: 26, + Tags: tags, + Demux: demux, + Time: now, + } + go generateFdEnhancedMetrics(args) + generatedMetrics, timedMetrics := demux.WaitForNumberOfSamples(3, 0, 100*time.Millisecond) + assert.Equal(t, []metrics.MetricSample{ + { + Name: fdMaxMetric, + Value: 1024, + Mtype: metrics.DistributionType, + Tags: tags, + SampleRate: 1, + Timestamp: now, + }, + { + Name: fdUseMetric, + Value: 26, + Mtype: metrics.DistributionType, + Tags: tags, + SampleRate: 1, + Timestamp: now, + }, + }, + generatedMetrics, + ) + assert.Len(t, timedMetrics, 0) +} + +func TestSendFdEnhancedMetricsDisabled(t *testing.T) { + var wg sync.WaitGroup + enhancedMetricsDisabled = true + demux := createDemultiplexer(t) + metricAgent := ServerlessMetricAgent{Demux: demux} + tags := []string{"functionname:test-function"} + + wg.Add(1) + go func() { + defer wg.Done() + SendFdEnhancedMetrics(make(chan bool), tags, &metricAgent) + }() + + generatedMetrics, timedMetrics := demux.WaitForNumberOfSamples(1, 0, 100*time.Millisecond) + + assert.Len(t, generatedMetrics, 0) + assert.Len(t, timedMetrics, 0) + + wg.Wait() + enhancedMetricsDisabled = false +} + func TestSendFailoverReasonMetric(t *testing.T) { demux := createDemultiplexer(t) tags := []string{"reason:test-reason"} diff --git a/pkg/serverless/proc/proc.go b/pkg/serverless/proc/proc.go index 76b2af63f3337..b8e612de91938 100644 --- a/pkg/serverless/proc/proc.go +++ b/pkg/serverless/proc/proc.go @@ -7,10 +7,12 @@ package proc import ( + "bufio" "bytes" "errors" "fmt" "io" + "math" "os" "strconv" "strings" @@ -22,6 +24,9 @@ const ( ProcStatPath = "/proc/stat" ProcUptimePath = "/proc/uptime" ProcNetDevPath = "/proc/net/dev" + ProcPath = "/proc" + PidLimitsPathFormat = "/%d/limits" + PidFdPathFormat = "/%d/fd" lambdaNetworkInterface = "vinternal_1" ) @@ -196,3 +201,83 @@ func getNetworkData(path string) (*NetworkData, error) { } } + +type FileDescriptorMaxData struct { + MaximumFileHandles float64 +} + +// GetFileDescriptorMaxData returns the maximum limit of file descriptors the function can use +func GetFileDescriptorMaxData() (*FileDescriptorMaxData, error) { + return getFileDescriptorMaxData(ProcPath) +} + +func getFileDescriptorMaxData(path string) (*FileDescriptorMaxData, error) { + pids := getPidList(path) + fdMax := math.Inf(1) + + for _, pid := range pids { + limitsPath := fmt.Sprint(path + fmt.Sprintf(PidLimitsPathFormat, pid)) + file, err := os.Open(limitsPath) + if err != nil { + return nil, err + } + defer file.Close() + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, "Max open files") { + fields := strings.Fields(line) + if len(fields) < 6 { + log.Debugf("file descriptor max data not found in file '%s'", limitsPath) + break + } + + fdMaxPidStr := fields[3] + fdMaxPid, err := strconv.Atoi(fdMaxPidStr) + if err != nil { + log.Debugf("file descriptor max data not found in file '%s'", limitsPath) + break + } + + fdMax = math.Min(float64(fdMax), float64(fdMaxPid)) + break + } + } + } + + if fdMax != math.Inf(1) { + return &FileDescriptorMaxData{ + MaximumFileHandles: fdMax, + }, nil + } + + return nil, fmt.Errorf("file descriptor max data not found") +} + +type FileDescriptorUseData struct { + UseFileHandles float64 +} + +// GetFileDescriptorUseData returns the maximum number of file descriptors the function has used at a time +func GetFileDescriptorUseData() (*FileDescriptorUseData, error) { + return getFileDescriptorUseData(ProcPath) +} + +func getFileDescriptorUseData(path string) (*FileDescriptorUseData, error) { + pids := getPidList(path) + fdUse := 0 + + for _, pid := range pids { + fdPath := fmt.Sprint(path + fmt.Sprintf(PidFdPathFormat, pid)) + files, err := os.ReadDir(fdPath) + if err != nil { + return nil, fmt.Errorf("file descriptor use data not found in file '%s'", fdPath) + } + fdUse += len(files) + } + + return &FileDescriptorUseData{ + UseFileHandles: float64(fdUse), + }, nil +} diff --git a/pkg/serverless/proc/proc_test.go b/pkg/serverless/proc/proc_test.go index 1f08cd1e0f9f5..6b445db35596c 100644 --- a/pkg/serverless/proc/proc_test.go +++ b/pkg/serverless/proc/proc_test.go @@ -143,3 +143,32 @@ func TestGetNetworkData(t *testing.T) { assert.NotNil(t, err) assert.Nil(t, networkData) } + +func TestGetFileDescriptorMaxData(t *testing.T) { + path := "./testData/file-descriptor/valid" + fileDescriptorMaxData, err := getFileDescriptorMaxData(path) + assert.Nil(t, err) + assert.Equal(t, float64(1024), fileDescriptorMaxData.MaximumFileHandles) + + path = "./testData/file-descriptor/invalid_malformed" + fileDescriptorMaxData, err = getFileDescriptorMaxData(path) + assert.NotNil(t, err) + assert.Nil(t, fileDescriptorMaxData) + + path = "./testData/file-descriptor/invalid_missing" + fileDescriptorMaxData, err = getFileDescriptorMaxData(path) + assert.NotNil(t, err) + assert.Nil(t, fileDescriptorMaxData) +} + +func TestGetFileDescriptorUseData(t *testing.T) { + path := "./testData/file-descriptor/valid" + fileDescriptorUseData, err := getFileDescriptorUseData(path) + assert.Nil(t, err) + assert.Equal(t, float64(5), fileDescriptorUseData.UseFileHandles) + + path = "./testData/file-descriptor/invalid_missing" + fileDescriptorUseData, err = getFileDescriptorUseData(path) + assert.NotNil(t, err) + assert.Nil(t, fileDescriptorUseData) +} diff --git a/pkg/serverless/proc/testData/file-descriptor/invalid_malformed/31/limits b/pkg/serverless/proc/testData/file-descriptor/invalid_malformed/31/limits new file mode 100644 index 0000000000000..45c06574a388c --- /dev/null +++ b/pkg/serverless/proc/testData/file-descriptor/invalid_malformed/31/limits @@ -0,0 +1,17 @@ +Limit Soft Limit Hard Limit Units +Max cpu time unlimited unlimited seconds +Max file size unlimited unlimited bytes +Max data size unlimited unlimited bytes +Max stack size 8388608 unlimited bytes +Max core file size unlimited unlimited bytes +Max resident set unlimited unlimited bytes +Max processes 1024 1024 processes +Max open files 1024 1024 +Max locked memory 65536 65536 bytes +Max address space unlimited unlimited bytes +Max file locks unlimited unlimited locks +Max pending signals 4622 4622 signals +Max msgqueue size 819200 819200 bytes +Max nice priority 0 0 +Max realtime priority 0 0 +Max realtime timeout unlimited unlimited us diff --git a/pkg/serverless/proc/testData/file-descriptor/invalid_malformed/9/limits b/pkg/serverless/proc/testData/file-descriptor/invalid_malformed/9/limits new file mode 100644 index 0000000000000..3ad780c33f48d --- /dev/null +++ b/pkg/serverless/proc/testData/file-descriptor/invalid_malformed/9/limits @@ -0,0 +1,17 @@ +Limit Soft Limit Hard Limit Units +Max cpu time unlimited unlimited seconds +Max file size unlimited unlimited bytes +Max data size unlimited unlimited bytes +Max stack size 8388608 unlimited bytes +Max core file size unlimited unlimited bytes +Max resident set unlimited unlimited bytes +Max processes 1024 1024 processes +Max open files 1024 +Max locked memory 65536 65536 bytes +Max address space unlimited unlimited bytes +Max file locks unlimited unlimited locks +Max pending signals 4622 4622 signals +Max msgqueue size 819200 819200 bytes +Max nice priority 0 0 +Max realtime priority 0 0 +Max realtime timeout unlimited unlimited us diff --git a/pkg/serverless/proc/testData/file-descriptor/invalid_missing/31/limits b/pkg/serverless/proc/testData/file-descriptor/invalid_missing/31/limits new file mode 100644 index 0000000000000..34925a8f557f9 --- /dev/null +++ b/pkg/serverless/proc/testData/file-descriptor/invalid_missing/31/limits @@ -0,0 +1,16 @@ +Limit Soft Limit Hard Limit Units +Max cpu time unlimited unlimited seconds +Max file size unlimited unlimited bytes +Max data size unlimited unlimited bytes +Max stack size 8388608 unlimited bytes +Max core file size unlimited unlimited bytes +Max resident set unlimited unlimited bytes +Max processes 1024 1024 processes +Max locked memory 65536 65536 bytes +Max address space unlimited unlimited bytes +Max file locks unlimited unlimited locks +Max pending signals 4622 4622 signals +Max msgqueue size 819200 819200 bytes +Max nice priority 0 0 +Max realtime priority 0 0 +Max realtime timeout unlimited unlimited us diff --git a/pkg/serverless/proc/testData/file-descriptor/invalid_missing/9/limits b/pkg/serverless/proc/testData/file-descriptor/invalid_missing/9/limits new file mode 100644 index 0000000000000..17e615740c934 --- /dev/null +++ b/pkg/serverless/proc/testData/file-descriptor/invalid_missing/9/limits @@ -0,0 +1,16 @@ +Limit Soft Limit Hard Limit Units +Max cpu time unlimited unlimited seconds +Max file size unlimited unlimited bytes +Max data size unlimited unlimited bytes +Max stack size 8388608 unlimited bytes +Max core file size unlimited unlimited bytes +Max resident set unlimited unlimited bytes +Max processes 1024 1024 processes +Max locked memory 65536 65536 bytes +Max address space unlimited unlimited bytes +Max file locks unlimited unlimited locks +Max pending signals 4622 4622 signals +Max msgqueue size 819200 819200 bytes +Max nice priority 0 0 +Max realtime priority 0 0 +Max realtime timeout unlimited unlimited us diff --git a/pkg/serverless/proc/testData/file-descriptor/valid/31/fd/1 b/pkg/serverless/proc/testData/file-descriptor/valid/31/fd/1 new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pkg/serverless/proc/testData/file-descriptor/valid/31/fd/2 b/pkg/serverless/proc/testData/file-descriptor/valid/31/fd/2 new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pkg/serverless/proc/testData/file-descriptor/valid/31/limits b/pkg/serverless/proc/testData/file-descriptor/valid/31/limits new file mode 100644 index 0000000000000..664f04c884fad --- /dev/null +++ b/pkg/serverless/proc/testData/file-descriptor/valid/31/limits @@ -0,0 +1,17 @@ +Limit Soft Limit Hard Limit Units +Max cpu time unlimited unlimited seconds +Max file size unlimited unlimited bytes +Max data size unlimited unlimited bytes +Max stack size 8388608 unlimited bytes +Max core file size unlimited unlimited bytes +Max resident set unlimited unlimited bytes +Max processes 1024 1024 processes +Max open files 1024 1024 files +Max locked memory 65536 65536 bytes +Max address space unlimited unlimited bytes +Max file locks unlimited unlimited locks +Max pending signals 4622 4622 signals +Max msgqueue size 819200 819200 bytes +Max nice priority 0 0 +Max realtime priority 0 0 +Max realtime timeout unlimited unlimited us diff --git a/pkg/serverless/proc/testData/file-descriptor/valid/9/fd/1 b/pkg/serverless/proc/testData/file-descriptor/valid/9/fd/1 new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pkg/serverless/proc/testData/file-descriptor/valid/9/fd/2 b/pkg/serverless/proc/testData/file-descriptor/valid/9/fd/2 new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pkg/serverless/proc/testData/file-descriptor/valid/9/fd/3 b/pkg/serverless/proc/testData/file-descriptor/valid/9/fd/3 new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pkg/serverless/proc/testData/file-descriptor/valid/9/limits b/pkg/serverless/proc/testData/file-descriptor/valid/9/limits new file mode 100644 index 0000000000000..664f04c884fad --- /dev/null +++ b/pkg/serverless/proc/testData/file-descriptor/valid/9/limits @@ -0,0 +1,17 @@ +Limit Soft Limit Hard Limit Units +Max cpu time unlimited unlimited seconds +Max file size unlimited unlimited bytes +Max data size unlimited unlimited bytes +Max stack size 8388608 unlimited bytes +Max core file size unlimited unlimited bytes +Max resident set unlimited unlimited bytes +Max processes 1024 1024 processes +Max open files 1024 1024 files +Max locked memory 65536 65536 bytes +Max address space unlimited unlimited bytes +Max file locks unlimited unlimited locks +Max pending signals 4622 4622 signals +Max msgqueue size 819200 819200 bytes +Max nice priority 0 0 +Max realtime priority 0 0 +Max realtime timeout unlimited unlimited us diff --git a/pkg/serverless/serverless.go b/pkg/serverless/serverless.go index 992ca51d55bda..2528e71d81e7a 100644 --- a/pkg/serverless/serverless.go +++ b/pkg/serverless/serverless.go @@ -161,6 +161,8 @@ func callInvocationHandler(daemon *daemon.Daemon, arn string, deadlineMs int64, cpuOffsetData, cpuOffsetErr := proc.GetCPUData() uptimeOffset, uptimeOffsetErr := proc.GetUptime() networkOffsetData, networkOffsetErr := proc.GetNetworkData() + sendFdMetrics := make(chan bool) + go metrics.SendFdEnhancedMetrics(sendFdMetrics, daemon.ExtraTags.Tags, daemon.MetricAgent) sendTmpMetrics := make(chan bool) go metrics.SendTmpEnhancedMetrics(sendTmpMetrics, daemon.ExtraTags.Tags, daemon.MetricAgent) @@ -179,16 +181,17 @@ func callInvocationHandler(daemon *daemon.Daemon, arn string, deadlineMs int64, case <-doneChannel: break } - sendSystemEnhancedMetrics(daemon, cpuOffsetErr == nil && uptimeOffsetErr == nil, networkOffsetErr == nil, uptimeOffset, cpuOffsetData, networkOffsetData, sendTmpMetrics) + sendSystemEnhancedMetrics(daemon, cpuOffsetErr == nil && uptimeOffsetErr == nil, networkOffsetErr == nil, uptimeOffset, cpuOffsetData, networkOffsetData, sendTmpMetrics, sendFdMetrics) } -func sendSystemEnhancedMetrics(daemon *daemon.Daemon, emitCPUMetrics, emitNetworkMetrics bool, uptimeOffset float64, cpuOffsetData *proc.CPUData, networkOffsetData *proc.NetworkData, sendTmpMetrics chan bool) { +func sendSystemEnhancedMetrics(daemon *daemon.Daemon, emitCPUMetrics, emitNetworkMetrics bool, uptimeOffset float64, cpuOffsetData *proc.CPUData, networkOffsetData *proc.NetworkData, sendTmpMetrics chan bool, sendFdMetrics chan bool) { if daemon.MetricAgent == nil { log.Debug("Could not send system enhanced metrics") return } close(sendTmpMetrics) + close(sendFdMetrics) if emitCPUMetrics { metrics.SendCPUEnhancedMetrics(cpuOffsetData, uptimeOffset, daemon.ExtraTags.Tags, daemon.MetricAgent.Demux) diff --git a/test/integration/serverless/snapshots/error-csharp b/test/integration/serverless/snapshots/error-csharp index c030934e86cae..b96bb73eee4ce 100644 --- a/test/integration/serverless/snapshots/error-csharp +++ b/test/integration/serverless/snapshots/error-csharp @@ -533,6 +533,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-csharp", + "functionname:integration-tests-extension-XXXXXX-error-csharp", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-csharp", + "runtime:dotnet6", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-csharp", + "functionname:integration-tests-extension-XXXXXX-error-csharp", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-csharp", + "runtime:dotnet6", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-csharp", + "functionname:integration-tests-extension-XXXXXX-error-csharp", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-csharp", + "runtime:dotnet6", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-csharp", + "functionname:integration-tests-extension-XXXXXX-error-csharp", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-csharp", + "runtime:dotnet6", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], diff --git a/test/integration/serverless/snapshots/error-java b/test/integration/serverless/snapshots/error-java index 9aaf96d543760..754e7f0733ffd 100644 --- a/test/integration/serverless/snapshots/error-java +++ b/test/integration/serverless/snapshots/error-java @@ -533,6 +533,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-java", + "functionname:integration-tests-extension-XXXXXX-error-java", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-java", + "runtime:java8.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-java", + "functionname:integration-tests-extension-XXXXXX-error-java", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-java", + "runtime:java8.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-java", + "functionname:integration-tests-extension-XXXXXX-error-java", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-java", + "runtime:java8.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-java", + "functionname:integration-tests-extension-XXXXXX-error-java", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-java", + "runtime:java8.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], diff --git a/test/integration/serverless/snapshots/error-node b/test/integration/serverless/snapshots/error-node index 58d2a83a13bd6..97b02cd4fd0dd 100644 --- a/test/integration/serverless/snapshots/error-node +++ b/test/integration/serverless/snapshots/error-node @@ -537,6 +537,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-node", + "functionname:integration-tests-extension-XXXXXX-error-node", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-node", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-node", + "functionname:integration-tests-extension-XXXXXX-error-node", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-node", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-node", + "functionname:integration-tests-extension-XXXXXX-error-node", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-node", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-node", + "functionname:integration-tests-extension-XXXXXX-error-node", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-node", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], diff --git a/test/integration/serverless/snapshots/error-proxy b/test/integration/serverless/snapshots/error-proxy index caf6b1aeedfb3..58e810c64e07a 100644 --- a/test/integration/serverless/snapshots/error-proxy +++ b/test/integration/serverless/snapshots/error-proxy @@ -533,6 +533,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-proxy", + "functionname:integration-tests-extension-XXXXXX-error-proxy", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-proxy", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-proxy", + "functionname:integration-tests-extension-XXXXXX-error-proxy", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-proxy", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-proxy", + "functionname:integration-tests-extension-XXXXXX-error-proxy", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-proxy", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-proxy", + "functionname:integration-tests-extension-XXXXXX-error-proxy", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-proxy", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], diff --git a/test/integration/serverless/snapshots/error-python b/test/integration/serverless/snapshots/error-python index e2c1e888a085c..e7bd220d86bf5 100644 --- a/test/integration/serverless/snapshots/error-python +++ b/test/integration/serverless/snapshots/error-python @@ -539,6 +539,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-python", + "functionname:integration-tests-extension-XXXXXX-error-python", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-python", + "runtime:python3.8", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-python", + "functionname:integration-tests-extension-XXXXXX-error-python", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-python", + "runtime:python3.8", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-python", + "functionname:integration-tests-extension-XXXXXX-error-python", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-python", + "runtime:python3.8", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-python", + "functionname:integration-tests-extension-XXXXXX-error-python", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-python", + "runtime:python3.8", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], diff --git a/test/integration/serverless/snapshots/metric-csharp b/test/integration/serverless/snapshots/metric-csharp index e8212c0f4a253..87d214181a82a 100644 --- a/test/integration/serverless/snapshots/metric-csharp +++ b/test/integration/serverless/snapshots/metric-csharp @@ -485,6 +485,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-csharp", + "functionname:integration-tests-extension-XXXXXX-metric-csharp", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-csharp", + "runtime:dotnet6", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-csharp", + "functionname:integration-tests-extension-XXXXXX-metric-csharp", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-csharp", + "runtime:dotnet6", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-csharp", + "functionname:integration-tests-extension-XXXXXX-metric-csharp", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-csharp", + "runtime:dotnet6", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-csharp", + "functionname:integration-tests-extension-XXXXXX-metric-csharp", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-csharp", + "runtime:dotnet6", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], diff --git a/test/integration/serverless/snapshots/metric-go b/test/integration/serverless/snapshots/metric-go index 24046cf6d6953..950cacf5bd7b6 100644 --- a/test/integration/serverless/snapshots/metric-go +++ b/test/integration/serverless/snapshots/metric-go @@ -485,6 +485,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-go", + "functionname:integration-tests-extension-XXXXXX-metric-go", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-go", + "runtime:provided.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-go", + "functionname:integration-tests-extension-XXXXXX-metric-go", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-go", + "runtime:provided.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-go", + "functionname:integration-tests-extension-XXXXXX-metric-go", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-go", + "runtime:provided.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-go", + "functionname:integration-tests-extension-XXXXXX-metric-go", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-go", + "runtime:provided.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], diff --git a/test/integration/serverless/snapshots/metric-java b/test/integration/serverless/snapshots/metric-java index bbb3debe64b28..168e525ffc408 100644 --- a/test/integration/serverless/snapshots/metric-java +++ b/test/integration/serverless/snapshots/metric-java @@ -485,6 +485,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-java", + "functionname:integration-tests-extension-XXXXXX-metric-java", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-java", + "runtime:java8.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-java", + "functionname:integration-tests-extension-XXXXXX-metric-java", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-java", + "runtime:java8.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-java", + "functionname:integration-tests-extension-XXXXXX-metric-java", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-java", + "runtime:java8.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-java", + "functionname:integration-tests-extension-XXXXXX-metric-java", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-java", + "runtime:java8.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], diff --git a/test/integration/serverless/snapshots/metric-node b/test/integration/serverless/snapshots/metric-node index 8bc4b04fa27e1..c74d07d228aea 100644 --- a/test/integration/serverless/snapshots/metric-node +++ b/test/integration/serverless/snapshots/metric-node @@ -485,6 +485,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-node", + "functionname:integration-tests-extension-XXXXXX-metric-node", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-node", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-node", + "functionname:integration-tests-extension-XXXXXX-metric-node", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-node", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-node", + "functionname:integration-tests-extension-XXXXXX-metric-node", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-node", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-node", + "functionname:integration-tests-extension-XXXXXX-metric-node", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-node", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], diff --git a/test/integration/serverless/snapshots/metric-proxy b/test/integration/serverless/snapshots/metric-proxy index 260310cc549e9..9156964619d76 100644 --- a/test/integration/serverless/snapshots/metric-proxy +++ b/test/integration/serverless/snapshots/metric-proxy @@ -485,6 +485,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-proxy", + "functionname:integration-tests-extension-XXXXXX-metric-proxy", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-proxy", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-proxy", + "functionname:integration-tests-extension-XXXXXX-metric-proxy", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-proxy", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-proxy", + "functionname:integration-tests-extension-XXXXXX-metric-proxy", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-proxy", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-proxy", + "functionname:integration-tests-extension-XXXXXX-metric-proxy", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-proxy", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], diff --git a/test/integration/serverless/snapshots/metric-python b/test/integration/serverless/snapshots/metric-python index f2c2492f9ae59..65d8ec77aa3f6 100644 --- a/test/integration/serverless/snapshots/metric-python +++ b/test/integration/serverless/snapshots/metric-python @@ -485,6 +485,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-python", + "functionname:integration-tests-extension-XXXXXX-metric-python", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-python", + "runtime:python3.8", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-python", + "functionname:integration-tests-extension-XXXXXX-metric-python", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-python", + "runtime:python3.8", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-python", + "functionname:integration-tests-extension-XXXXXX-metric-python", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-python", + "runtime:python3.8", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-python", + "functionname:integration-tests-extension-XXXXXX-metric-python", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-python", + "runtime:python3.8", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], From 930842ffb969255207a33c4e9ea37165903e1e95 Mon Sep 17 00:00:00 2001 From: Gustavo Caso Date: Mon, 9 Sep 2024 18:18:27 +0200 Subject: [PATCH 086/128] replace bool function argument with an early return (#29135) --- comp/forwarder/defaultforwarder/forwarder.go | 20 ++++++++++--------- .../forwarders/forwardersimpl/forwarders.go | 2 +- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/comp/forwarder/defaultforwarder/forwarder.go b/comp/forwarder/defaultforwarder/forwarder.go index 9769bbaeb2ab0..64cc62cd636df 100644 --- a/comp/forwarder/defaultforwarder/forwarder.go +++ b/comp/forwarder/defaultforwarder/forwarder.go @@ -33,16 +33,23 @@ type provides struct { } func newForwarder(dep dependencies) provides { + if dep.Params.useNoopForwarder { + return provides{ + Comp: NoopForwarder{}, + } + } + options := createOptions(dep.Params, dep.Config, dep.Log) - return NewForwarder(dep.Config, dep.Log, dep.Lc, true, options, dep.Params.useNoopForwarder) + + return NewForwarder(dep.Config, dep.Log, dep.Lc, true, options) } func createOptions(params Params, config config.Component, log log.Component) *Options { var options *Options + keysPerDomain := getMultipleEndpoints(config, log) if !params.withResolver { - options = NewOptions(config, log, getMultipleEndpoints(config, log)) + options = NewOptions(config, log, keysPerDomain) } else { - keysPerDomain := getMultipleEndpoints(config, log) options = NewOptionsWithResolvers(config, log, resolver.NewSingleDomainResolvers(keysPerDomain)) } // Override the DisableAPIKeyChecking only if WithFeatures was called @@ -66,12 +73,7 @@ func getMultipleEndpoints(config config.Component, log log.Component) map[string // NewForwarder returns a new forwarder component. // //nolint:revive -func NewForwarder(config config.Component, log log.Component, lc fx.Lifecycle, ignoreLifeCycleError bool, options *Options, useNoopForwarder bool) provides { - if useNoopForwarder { - return provides{ - Comp: NoopForwarder{}, - } - } +func NewForwarder(config config.Component, log log.Component, lc fx.Lifecycle, ignoreLifeCycleError bool, options *Options) provides { forwarder := NewDefaultForwarder(config, log, options) lc.Append(fx.Hook{ diff --git a/comp/process/forwarders/forwardersimpl/forwarders.go b/comp/process/forwarders/forwardersimpl/forwarders.go index 2e81a8b290d8f..97c22f21f1480 100644 --- a/comp/process/forwarders/forwardersimpl/forwarders.go +++ b/comp/process/forwarders/forwardersimpl/forwarders.go @@ -73,7 +73,7 @@ func newForwarders(deps dependencies) (forwarders.Component, error) { } func createForwarder(deps dependencies, options *defaultforwarder.Options) defaultforwarder.Component { - return defaultforwarder.NewForwarder(deps.Config, deps.Logger, deps.Lc, false, options, false).Comp + return defaultforwarder.NewForwarder(deps.Config, deps.Logger, deps.Lc, false, options).Comp } func createParams(config config.Component, log log.Component, queueBytes int, endpoints []apicfg.Endpoint) *defaultforwarder.Options { From 9d1169042aaeb82bdd14e049a399220592765600 Mon Sep 17 00:00:00 2001 From: Nicolas Schweitzer Date: Mon, 9 Sep 2024 18:30:23 +0200 Subject: [PATCH 087/128] feat(ci): shorten ssm names (#29081) --- .gitlab-ci.yml | 98 +++++++++---------- .gitlab/.pre/cancel-prev-pipelines.yml | 2 +- .gitlab/.pre/test_gitlab_configuration.yml | 4 +- .gitlab/choco_deploy/choco_deploy.yml | 2 +- .../container_publish_job_templates.yml | 2 +- .gitlab/common/shared.yml | 22 ++--- .gitlab/container_build/docker_linux.yml | 4 +- .gitlab/container_build/docker_windows.yml | 4 +- .gitlab/container_build/fakeintake.yml | 4 +- .gitlab/deploy_packages/winget.yml | 2 +- .gitlab/e2e/e2e.yml | 8 +- .gitlab/e2e_install_packages/common.yml | 6 +- .gitlab/e2e_k8s/e2e_k8s.yml | 12 +-- .../functional_test/regression_detector.yml | 10 +- .../install_script_testing.yml | 2 +- .gitlab/integration_test/windows.yml | 2 +- .../internal_image_deploy.yml | 8 +- .../internal_kubernetes_deploy.yml | 2 +- .../rc_kubernetes_deploy.yml | 2 +- .gitlab/kernel_matrix_testing/common.yml | 26 ++--- .../kernel_matrix_testing/security_agent.yml | 2 +- .../kernel_matrix_testing/system_probe.yml | 10 +- .gitlab/kitchen_deploy/kitchen_deploy.yml | 8 +- .gitlab/maintenance_jobs/docker.yml | 4 +- .gitlab/maintenance_jobs/kitchen.yml | 8 +- .gitlab/notify/notify.yml | 22 ++--- .gitlab/package_build/installer.yml | 2 +- .gitlab/package_build/windows.yml | 4 +- .gitlab/packaging/rpm.yml | 8 +- .gitlab/pkg_metrics/pkg_metrics.yml | 2 +- .gitlab/post_rc_build/post_rc_tasks.yml | 2 +- .gitlab/setup/setup.yml | 16 +-- .gitlab/source_test/golang_deps_diff.yml | 4 +- .gitlab/source_test/linux.yml | 4 +- .gitlab/source_test/windows.yml | 6 +- .gitlab/trigger_release/trigger_release.yml | 2 +- tasks/libs/common/omnibus.py | 12 +-- tasks/pipeline.py | 2 +- tasks/unit_tests/linter_tests.py | 4 +- tasks/unit_tests/omnibus_tests.py | 2 +- tasks/unit_tests/testdata/fake_gitlab-ci.yml | 12 +-- tasks/winbuildscripts/unittests.ps1 | 6 +- test/kitchen/tasks/clean.sh | 8 +- test/kitchen/tasks/run-test-kitchen.sh | 10 +- test/kitchen/tasks/show-strays.sh | 8 +- tools/ci/docker-login.ps1 | 4 +- tools/ci/junit_upload.sh | 4 +- 47 files changed, 199 insertions(+), 199 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 3c16b1839c04c..99a4d4ff8e646 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -196,59 +196,59 @@ variables: # List of parameters retrieved from AWS SSM # They must be defined as environment variables in the GitLab CI/CD settings, to ease rotation if needed - AGENT_QA_PROFILE_SSM_NAME: ci.datadog-agent.agent-qa-profile # agent-devx-infra - API_KEY_ORG2_SSM_NAME: ci.datadog-agent.datadog_api_key_org2 # agent-devx-infra - API_KEY_DDDEV_SSM_NAME: ci.datadog-agent.datadog_api_key # agent-devx-infra - APP_KEY_ORG2_SSM_NAME: ci.datadog-agent.datadog_app_key_org2 # agent-devx-infra - CHANGELOG_COMMIT_SHA_SSM_NAME: ci.datadog-agent.gitlab_changelog_commit_sha # agent-devx-infra - CHOCOLATEY_API_KEY_SSM_NAME: ci.datadog-agent.chocolatey_api_key # windows-agent - CODECOV_TOKEN_SSM_NAME: ci.datadog-agent.codecov_token # agent-devx-infra - DEB_GPG_KEY_SSM_NAME: ci.datadog-agent.deb_signing_private_key_${DEB_GPG_KEY_ID} # agent-delivery - DEB_SIGNING_PASSPHRASE_SSM_NAME: ci.datadog-agent.deb_signing_key_passphrase_${DEB_GPG_KEY_ID} # agent-delivery - DOCKER_REGISTRY_LOGIN_SSM_KEY: ci.datadog-agent.docker_hub_login # container-integrations - DOCKER_REGISTRY_PWD_SSM_KEY: ci.datadog-agent.docker_hub_pwd # container-integrations - E2E_TESTS_API_KEY_SSM_NAME: ci.datadog-agent.e2e_tests_api_key # agent-devx-loops - E2E_TESTS_APP_KEY_SSM_NAME: ci.datadog-agent.e2e_tests_app_key # agent-devx-loops - E2E_TESTS_RC_KEY_SSM_NAME: ci.datadog-agent.e2e_tests_rc_key # agent-devx-loops + AGENT_QA_PROFILE: ci.datadog-agent.agent-qa-profile # agent-devx-infra + API_KEY_ORG2: ci.datadog-agent.datadog_api_key_org2 # agent-devx-infra + API_KEY_DDDEV: ci.datadog-agent.datadog_api_key # agent-devx-infra + APP_KEY_ORG2: ci.datadog-agent.datadog_app_key_org2 # agent-devx-infra + CHANGELOG_COMMIT_SHA: ci.datadog-agent.gitlab_changelog_commit_sha # agent-devx-infra + CHOCOLATEY_API_KEY: ci.datadog-agent.chocolatey_api_key # windows-agent + CODECOV_TOKEN: ci.datadog-agent.codecov_token # agent-devx-infra + DEB_GPG_KEY: ci.datadog-agent.deb_signing_private_key_${DEB_GPG_KEY_ID} # agent-delivery + DEB_SIGNING_PASSPHRASE: ci.datadog-agent.deb_signing_key_passphrase_${DEB_GPG_KEY_ID} # agent-delivery + DOCKER_REGISTRY_LOGIN: ci.datadog-agent.docker_hub_login # container-integrations + DOCKER_REGISTRY_PWD: ci.datadog-agent.docker_hub_pwd # container-integrations + E2E_TESTS_API_KEY: ci.datadog-agent.e2e_tests_api_key # agent-devx-loops + E2E_TESTS_APP_KEY: ci.datadog-agent.e2e_tests_app_key # agent-devx-loops + E2E_TESTS_RC_KEY: ci.datadog-agent.e2e_tests_rc_key # agent-devx-loops E2E_TESTS_AZURE_CLIENT_ID: ci.datadog-agent.e2e_tests_azure_client_id # agent-devx-loops E2E_TESTS_AZURE_CLIENT_SECRET: ci.datadog-agent.e2e_tests_azure_client_secret # agent-devx-loops E2E_TESTS_AZURE_TENANT_ID: ci.datadog-agent.e2e_tests_azure_tenant_id # agent-devx-loops E2E_TESTS_AZURE_SUBSCRIPTION_ID: ci.datadog-agent.e2e_tests_azure_subscription_id # agent-devx-loops - KITCHEN_EC2_SSH_KEY_SSM_NAME: ci.datadog-agent.aws_ec2_kitchen_ssh_key # agent-devx-loops - KITCHEN_AZURE_CLIENT_ID_SSM_NAME: ci.datadog-agent.azure_kitchen_client_id # agent-devx-loops - KITCHEN_AZURE_CLIENT_SECRET_SSM_NAME: ci.datadog-agent.azure_kitchen_client_secret # agent-devx-loops - KITCHEN_AZURE_SUBSCRIPTION_ID_SSM_NAME: ci.datadog-agent.azure_kitchen_subscription_id # agent-devx-loops - KITCHEN_AZURE_TENANT_ID_SSM_NAME: ci.datadog-agent.azure_kitchen_tenant_id # agent-devx-loops - GITHUB_PR_COMMENTER_APP_KEY_SSM_NAME: pr-commenter.github_app_key # agent-devx-infra - GITHUB_PR_COMMENTER_INTEGRATION_ID_SSM_NAME: pr-commenter.github_integration_id # agent-devx-infra - GITHUB_PR_COMMENTER_INSTALLATION_ID_SSM_NAME: pr-commenter.github_installation_id # agent-devx-infra - GITLAB_SCHEDULER_TOKEN_SSM_NAME: ci.datadog-agent.gitlab_pipelines_scheduler_token # ci-cd - GITLAB_READ_API_TOKEN_SSM_NAME: ci.datadog-agent.gitlab_read_api_token # ci-cd - GITLAB_FULL_API_TOKEN_SSM_NAME: ci.datadog-agent.gitlab_full_api_token # ci-cd - INSTALL_SCRIPT_API_KEY_SSM_NAME: ci.agent-linux-install-script.datadog_api_key_2 # agent-delivery - JIRA_READ_API_TOKEN_SSM_NAME: ci.datadog-agent.jira_read_api_token # agent-devx-infra - AGENT_GITHUB_APP_ID_SSM_NAME: ci.datadog-agent.platform-github-app-id # agent-devx-infra - AGENT_GITHUB_INSTALLATION_ID_SSM_NAME: ci.datadog-agent.platform-github-app-installation-id # agent-devx-infra - AGENT_GITHUB_KEY_SSM_NAME: ci.datadog-agent.platform-github-app-key # agent-devx-infra - MACOS_GITHUB_APP_ID_SSM_NAME: ci.datadog-agent.macos_github_app_id # agent-devx-infra - MACOS_GITHUB_INSTALLATION_ID_SSM_NAME: ci.datadog-agent.macos_github_installation_id # agent-devx-infra - MACOS_GITHUB_KEY_SSM_NAME: ci.datadog-agent.macos_github_key_b64 # agent-devx-infra - MACOS_GITHUB_APP_ID_2_SSM_NAME: ci.datadog-agent.macos_github_app_id_2 # agent-devx-infra - MACOS_GITHUB_INSTALLATION_ID_2_SSM_NAME: ci.datadog-agent.macos_github_installation_id_2 # agent-devx-infra - MACOS_GITHUB_KEY_2_SSM_NAME: ci.datadog-agent.macos_github_key_b64_2 # agent-devx-infra - RPM_GPG_KEY_SSM_NAME: ci.datadog-agent.rpm_signing_private_key_${RPM_GPG_KEY_ID} # agent-delivery - RPM_SIGNING_PASSPHRASE_SSM_NAME: ci.datadog-agent.rpm_signing_key_passphrase_${RPM_GPG_KEY_ID} # agent-delivery - SLACK_AGENT_CI_TOKEN_SSM_NAME: ci.datadog-agent.slack_agent_ci_token # agent-devx-infra - SMP_ACCOUNT_ID_SSM_NAME: ci.datadog-agent.single-machine-performance-account-id # single-machine-performance - SMP_AGENT_TEAM_ID_SSM_NAME: ci.datadog-agent.single-machine-performance-agent-team-id # single-machine-performance - SMP_API_SSM_NAME: ci.datadog-agent.single-machine-performance-api # single-machine-performance - SMP_BOT_ACCESS_KEY_SSM_NAME: ci.datadog-agent.single-machine-performance-bot-access-key # single-machine-performance - SMP_BOT_ACCESS_KEY_ID_SSM_NAME: ci.datadog-agent.single-machine-performance-bot-access-key-id # single-machine-performance - SSH_KEY_SSM_NAME: ci.datadog-agent.ssh_key # system-probe - SSH_KEY_RSA_SSM_NAME: ci.datadog-agent.ssh_key_rsa # agent-devx-loops - SSH_PUBLIC_KEY_RSA_SSM_NAME: ci.datadog-agent.ssh_public_key_rsa # agent-devx-loops - VCPKG_BLOB_SAS_URL_SSM_NAME: ci.datadog-agent-buildimages.vcpkg_blob_sas_url # windows-agent - WINGET_PAT_SSM_NAME: ci.datadog-agent.winget_pat # windows-agent + KITCHEN_EC2_SSH_KEY: ci.datadog-agent.aws_ec2_kitchen_ssh_key # agent-devx-loops + KITCHEN_AZURE_CLIENT_ID: ci.datadog-agent.azure_kitchen_client_id # agent-devx-loops + KITCHEN_AZURE_CLIENT_SECRET: ci.datadog-agent.azure_kitchen_client_secret # agent-devx-loops + KITCHEN_AZURE_SUBSCRIPTION_ID: ci.datadog-agent.azure_kitchen_subscription_id # agent-devx-loops + KITCHEN_AZURE_TENANT_ID: ci.datadog-agent.azure_kitchen_tenant_id # agent-devx-loops + GITHUB_PR_COMMENTER_APP_KEY: pr-commenter.github_app_key # agent-devx-infra + GITHUB_PR_COMMENTER_INTEGRATION_ID: pr-commenter.github_integration_id # agent-devx-infra + GITHUB_PR_COMMENTER_INSTALLATION_ID: pr-commenter.github_installation_id # agent-devx-infra + GITLAB_SCHEDULER_TOKEN: ci.datadog-agent.gitlab_pipelines_scheduler_token # ci-cd + GITLAB_READ_API_TOKEN: ci.datadog-agent.gitlab_read_api_token # ci-cd + GITLAB_FULL_API_TOKEN: ci.datadog-agent.gitlab_full_api_token # ci-cd + INSTALL_SCRIPT_API_KEY: ci.agent-linux-install-script.datadog_api_key_2 # agent-delivery + JIRA_READ_API_TOKEN: ci.datadog-agent.jira_read_api_token # agent-devx-infra + AGENT_GITHUB_APP_ID: ci.datadog-agent.platform-github-app-id # agent-devx-infra + AGENT_GITHUB_INSTALLATION_ID: ci.datadog-agent.platform-github-app-installation-id # agent-devx-infra + AGENT_GITHUB_KEY: ci.datadog-agent.platform-github-app-key # agent-devx-infra + MACOS_GITHUB_APP_ID: ci.datadog-agent.macos_github_app_id # agent-devx-infra + MACOS_GITHUB_INSTALLATION_ID: ci.datadog-agent.macos_github_installation_id # agent-devx-infra + MACOS_GITHUB_KEY: ci.datadog-agent.macos_github_key_b64 # agent-devx-infra + MACOS_GITHUB_APP_ID_2: ci.datadog-agent.macos_github_app_id_2 # agent-devx-infra + MACOS_GITHUB_INSTALLATION_ID_2: ci.datadog-agent.macos_github_installation_id_2 # agent-devx-infra + MACOS_GITHUB_KEY_2: ci.datadog-agent.macos_github_key_b64_2 # agent-devx-infra + RPM_GPG_KEY: ci.datadog-agent.rpm_signing_private_key_${RPM_GPG_KEY_ID} # agent-delivery + RPM_SIGNING_PASSPHRASE: ci.datadog-agent.rpm_signing_key_passphrase_${RPM_GPG_KEY_ID} # agent-delivery + SLACK_AGENT_CI_TOKEN: ci.datadog-agent.slack_agent_ci_token # agent-devx-infra + SMP_ACCOUNT_ID: ci.datadog-agent.single-machine-performance-account-id # single-machine-performance + SMP_AGENT_TEAM_ID: ci.datadog-agent.single-machine-performance-agent-team-id # single-machine-performance + SMP_API: ci.datadog-agent.single-machine-performance-api # single-machine-performance + SMP_BOT_ACCESS_KEY: ci.datadog-agent.single-machine-performance-bot-access-key # single-machine-performance + SMP_BOT_ACCESS_KEY_ID: ci.datadog-agent.single-machine-performance-bot-access-key-id # single-machine-performance + SSH_KEY: ci.datadog-agent.ssh_key # system-probe + SSH_KEY_RSA: ci.datadog-agent.ssh_key_rsa # agent-devx-loops + SSH_PUBLIC_KEY_RSA: ci.datadog-agent.ssh_public_key_rsa # agent-devx-loops + VCPKG_BLOB_SAS_URL: ci.datadog-agent-buildimages.vcpkg_blob_sas_url # windows-agent + WINGET_PAT: ci.datadog-agent.winget_pat # windows-agent DD_PKG_VERSION: "latest" diff --git a/.gitlab/.pre/cancel-prev-pipelines.yml b/.gitlab/.pre/cancel-prev-pipelines.yml index 2bb87278b70ec..c743ce4b73df7 100644 --- a/.gitlab/.pre/cancel-prev-pipelines.yml +++ b/.gitlab/.pre/cancel-prev-pipelines.yml @@ -14,5 +14,5 @@ cancel-prev-pipelines: when: never - when: on_success script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME) + - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN) - inv pipeline.auto-cancel-previous-pipelines diff --git a/.gitlab/.pre/test_gitlab_configuration.yml b/.gitlab/.pre/test_gitlab_configuration.yml index 300d4f1a0999d..85a4dbecfe0cd 100644 --- a/.gitlab/.pre/test_gitlab_configuration.yml +++ b/.gitlab/.pre/test_gitlab_configuration.yml @@ -5,7 +5,7 @@ test_gitlab_configuration: rules: - !reference [.on_gitlab_changes] script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_FULL_API_TOKEN_SSM_NAME) + - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_FULL_API_TOKEN) - inv -e linter.gitlab-ci - inv -e linter.job-change-path - inv -e linter.gitlab-change-paths @@ -19,7 +19,7 @@ test_gitlab_compare_to: - !reference [.on_gitlab_changes] script: - source /root/.bashrc - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_FULL_API_TOKEN_SSM_NAME) + - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_FULL_API_TOKEN) - !reference [.setup_agent_github_app] - pip install -r tasks/requirements.txt - inv pipeline.compare-to-itself diff --git a/.gitlab/choco_deploy/choco_deploy.yml b/.gitlab/choco_deploy/choco_deploy.yml index 9a8bae71f9755..f8ace52c393cc 100644 --- a/.gitlab/choco_deploy/choco_deploy.yml +++ b/.gitlab/choco_deploy/choco_deploy.yml @@ -11,7 +11,7 @@ publish_choco_7_x64: ARCH: "x64" before_script: - $tmpfile = [System.IO.Path]::GetTempFileName() - - (& "$CI_PROJECT_DIR\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:CHOCOLATEY_API_KEY_SSM_NAME" "$tmpfile") + - (& "$CI_PROJECT_DIR\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:CHOCOLATEY_API_KEY" "$tmpfile") - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } - $chocolateyApiKey=$(cat "$tmpfile") - Remove-Item "$tmpfile" diff --git a/.gitlab/common/container_publish_job_templates.yml b/.gitlab/common/container_publish_job_templates.yml index 76b44efeba6fc..c59618528f045 100644 --- a/.gitlab/common/container_publish_job_templates.yml +++ b/.gitlab/common/container_publish_job_templates.yml @@ -13,7 +13,7 @@ IMG_VARIABLES: "" IMG_SIGNING: "" script: # We can't use the 'trigger' keyword on manual jobs, otherwise they can't be run if the pipeline fails and is retried - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME) + - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN) - | if [[ "$BUCKET_BRANCH" == "nightly" && ( "$IMG_SOURCES" =~ "$SRC_AGENT" || "$IMG_SOURCES" =~ "$SRC_DCA" || "$IMG_SOURCES" =~ "$SRC_CWS_INSTRUMENTATION" || "$IMG_VARIABLES" =~ "$SRC_AGENT" || "$IMG_VARIABLES" =~ "$SRC_DCA" || "$IMG_VARIABLES" =~ "$SRC_CWS_INSTRUMENTATION" ) ]]; then export ECR_RELEASE_SUFFIX="-nightly" diff --git a/.gitlab/common/shared.yml b/.gitlab/common/shared.yml index 1df106e9b4c08..d85d04434062b 100644 --- a/.gitlab/common/shared.yml +++ b/.gitlab/common/shared.yml @@ -21,30 +21,30 @@ .setup_deb_signing_key: &setup_deb_signing_key - set +x - - DEB_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DEB_GPG_KEY_SSM_NAME) + - DEB_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DEB_GPG_KEY) - printf -- "${DEB_GPG_KEY}" | gpg --import --batch - - export DEB_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DEB_SIGNING_PASSPHRASE_SSM_NAME) + - export DEB_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DEB_SIGNING_PASSPHRASE) .setup_macos_github_app: # GitHub App rate-limits are per-app. # This balances the requests made to GitHub between the two apps we have set up. - | if [[ "$(( RANDOM % 2 ))" == "1" ]]; then - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_KEY_SSM_NAME) - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_APP_ID_SSM_NAME) - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_INSTALLATION_ID_SSM_NAME) + export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_KEY) + export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_APP_ID) + export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_INSTALLATION_ID) echo "Using GitHub App instance 1" else - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_KEY_2_SSM_NAME) - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_APP_ID_2_SSM_NAME) - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_INSTALLATION_ID_2_SSM_NAME) + export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_KEY_2) + export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_APP_ID_2) + export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_INSTALLATION_ID_2) echo "Using GitHub App instance 2" fi .setup_agent_github_app: - - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $AGENT_GITHUB_KEY_SSM_NAME) - - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $AGENT_GITHUB_APP_ID_SSM_NAME) - - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $AGENT_GITHUB_INSTALLATION_ID_SSM_NAME) + - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $AGENT_GITHUB_KEY) + - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $AGENT_GITHUB_APP_ID) + - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $AGENT_GITHUB_INSTALLATION_ID) - echo "Using agent GitHub App" # Install `dd-pkg` and lint packages produced by Omnibus, supports only deb and rpm packages diff --git a/.gitlab/container_build/docker_linux.yml b/.gitlab/container_build/docker_linux.yml index d12b6894b57de..bf06dc495cc04 100644 --- a/.gitlab/container_build/docker_linux.yml +++ b/.gitlab/container_build/docker_linux.yml @@ -13,8 +13,8 @@ fi - TARGET_TAG=${IMAGE}${ECR_RELEASE_SUFFIX}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}$TAG_SUFFIX-$ARCH # DockerHub login for build to limit rate limit when pulling base images - - DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_LOGIN_SSM_KEY) - - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_PWD_SSM_KEY | docker login --username "$DOCKER_REGISTRY_LOGIN" --password-stdin "$DOCKER_REGISTRY_URL" + - DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_LOGIN) + - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_PWD | docker login --username "$DOCKER_REGISTRY_LOGIN" --password-stdin "$DOCKER_REGISTRY_URL" # Build image, use target none label to avoid replication - docker buildx build --no-cache --push --pull --platform linux/$ARCH --build-arg CIBUILD=true --build-arg GENERAL_ARTIFACTS_CACHE_BUCKET_URL=${GENERAL_ARTIFACTS_CACHE_BUCKET_URL} $BUILD_ARG --build-arg DD_GIT_REPOSITORY_URL=https://github.com/DataDog/datadog-agent --build-arg DD_GIT_COMMIT_SHA=${CI_COMMIT_SHA} --file $BUILD_CONTEXT/Dockerfile --tag ${TARGET_TAG} --label "org.opencontainers.image.created=$(date --rfc-3339=seconds)" --label "org.opencontainers.image.authors=Datadog " --label "org.opencontainers.image.source=https://github.com/DataDog/datadog-agent" --label "org.opencontainers.image.version=$(inv agent.version)" --label "org.opencontainers.image.revision=${CI_COMMIT_SHA}" --label "org.opencontainers.image.vendor=Datadog, Inc." --label "target=none" $BUILD_CONTEXT # Squash image diff --git a/.gitlab/container_build/docker_windows.yml b/.gitlab/container_build/docker_windows.yml index 6d7a365b22ee4..af2a6a84bfde6 100644 --- a/.gitlab/container_build/docker_windows.yml +++ b/.gitlab/container_build/docker_windows.yml @@ -29,8 +29,8 @@ -e SIGN_WINDOWS_DD_WCS=true -e CI_PIPELINE_ID=${CI_PIPELINE_ID} -e CI_PROJECT_NAME=${CI_PROJECT_NAME} - -e DOCKER_REGISTRY_LOGIN_SSM_KEY=${DOCKER_REGISTRY_LOGIN_SSM_KEY} - -e DOCKER_REGISTRY_PWD_SSM_KEY=${DOCKER_REGISTRY_PWD_SSM_KEY} + -e DOCKER_REGISTRY_LOGIN=${DOCKER_REGISTRY_LOGIN} + -e DOCKER_REGISTRY_PWD=${DOCKER_REGISTRY_PWD} -v "$(Get-Location):C:\mnt" -v \\.\pipe\docker_engine:\\.\pipe\docker_engine 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/windows_${Env:VARIANT}_x64${Env:DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${Env:DATADOG_AGENT_WINBUILDIMAGES} powershell diff --git a/.gitlab/container_build/fakeintake.yml b/.gitlab/container_build/fakeintake.yml index 334d6e73e78ca..da3180eb025b9 100644 --- a/.gitlab/container_build/fakeintake.yml +++ b/.gitlab/container_build/fakeintake.yml @@ -15,7 +15,7 @@ docker_build_fakeintake: BUILD_CONTEXT: . script: # DockerHub login for build to limit rate limit when pulling base images - - DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_LOGIN_SSM_KEY) - - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_PWD_SSM_KEY | docker login --username "$DOCKER_REGISTRY_LOGIN" --password-stdin "$DOCKER_REGISTRY_URL" + - DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_LOGIN) + - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_PWD | docker login --username "$DOCKER_REGISTRY_LOGIN" --password-stdin "$DOCKER_REGISTRY_URL" - docker buildx build --push --pull --platform ${PLATFORMS} --file ${DOCKERFILE} --tag ${TARGET} $BUILD_CONTEXT retry: 2 diff --git a/.gitlab/deploy_packages/winget.yml b/.gitlab/deploy_packages/winget.yml index a29aa20668ced..11e4945731ac6 100644 --- a/.gitlab/deploy_packages/winget.yml +++ b/.gitlab/deploy_packages/winget.yml @@ -11,7 +11,7 @@ publish_winget_7_x64: ARCH: "x64" before_script: - $tmpfile = [System.IO.Path]::GetTempFileName() - - (& "$CI_PROJECT_DIR\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:WINGET_PAT_SSM_NAME" "$tmpfile") + - (& "$CI_PROJECT_DIR\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:WINGET_PAT" "$tmpfile") - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } - $wingetPat=$(cat "$tmpfile") - Remove-Item "$tmpfile" diff --git a/.gitlab/e2e/e2e.yml b/.gitlab/e2e/e2e.yml index 785ff989ce1d1..e62ca3fd3f58b 100644 --- a/.gitlab/e2e/e2e.yml +++ b/.gitlab/e2e/e2e.yml @@ -11,11 +11,11 @@ - !reference [.retrieve_linux_go_e2e_deps] # Setup AWS Credentials - mkdir -p ~/.aws - - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $AGENT_QA_PROFILE_SSM_NAME >> ~/.aws/config + - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $AGENT_QA_PROFILE >> ~/.aws/config - export AWS_PROFILE=agent-qa-ci # Now all `aws` commands target the agent-qa profile - - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SSH_PUBLIC_KEY_RSA_SSM_NAME > $E2E_PUBLIC_KEY_PATH - - touch $E2E_PRIVATE_KEY_PATH && chmod 600 $E2E_PRIVATE_KEY_PATH && $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SSH_KEY_RSA_SSM_NAME > $E2E_PRIVATE_KEY_PATH + - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SSH_PUBLIC_KEY_RSA > $E2E_PUBLIC_KEY_PATH + - touch $E2E_PRIVATE_KEY_PATH && chmod 600 $E2E_PRIVATE_KEY_PATH && $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SSH_KEY_RSA > $E2E_PRIVATE_KEY_PATH # Use S3 backend - pulumi login "s3://dd-pulumi-state?region=us-east-1&awssdk=v2&profile=$AWS_PROFILE" # Setup Azure credentials. https://www.pulumi.com/registry/packages/azure-native/installation-configuration/#set-configuration-using-pulumi-config @@ -468,7 +468,7 @@ generate-flakes-finder-pipeline: - qa_agent tags: ["arch:amd64"] script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_FULL_API_TOKEN_SSM_NAME) + - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_FULL_API_TOKEN) - inv -e testwasher.generate-flake-finder-pipeline artifacts: paths: diff --git a/.gitlab/e2e_install_packages/common.yml b/.gitlab/e2e_install_packages/common.yml index 90777eb5ec269..934bb75a67f58 100644 --- a/.gitlab/e2e_install_packages/common.yml +++ b/.gitlab/e2e_install_packages/common.yml @@ -33,7 +33,7 @@ - START_MAJOR_VERSION: [5, 6] END_MAJOR_VERSION: [6] script: - - export DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $INSTALL_SCRIPT_API_KEY_SSM_NAME) + - export DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $INSTALL_SCRIPT_API_KEY) - inv -e new-e2e-tests.run --targets $TARGETS --junit-tar "junit-${CI_JOB_ID}.tgz" ${EXTRA_PARAMS} --src-agent-version $START_MAJOR_VERSION --dest-agent-version $END_MAJOR_VERSION .new-e2e_script_upgrade7: @@ -47,7 +47,7 @@ - START_MAJOR_VERSION: [5, 6, 7] END_MAJOR_VERSION: [7] script: - - export DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $INSTALL_SCRIPT_API_KEY_SSM_NAME ) + - export DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $INSTALL_SCRIPT_API_KEY ) - inv -e new-e2e-tests.run --targets $TARGETS --junit-tar "junit-${CI_JOB_ID}.tgz" ${EXTRA_PARAMS} --src-agent-version $START_MAJOR_VERSION --dest-agent-version $END_MAJOR_VERSION .new-e2e_rpm: @@ -57,5 +57,5 @@ TEAM: agent-delivery EXTRA_PARAMS: --osversion $E2E_OSVERS --platform $E2E_PLATFORM --arch $E2E_ARCH script: - - export DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $INSTALL_SCRIPT_API_KEY_SSM_NAME) + - export DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $INSTALL_SCRIPT_API_KEY) - inv -e new-e2e-tests.run --targets $TARGETS --junit-tar "junit-${CI_JOB_ID}.tgz" ${EXTRA_PARAMS} diff --git a/.gitlab/e2e_k8s/e2e_k8s.yml b/.gitlab/e2e_k8s/e2e_k8s.yml index db52467098c4a..98ee5289466cc 100644 --- a/.gitlab/e2e_k8s/e2e_k8s.yml +++ b/.gitlab/e2e_k8s/e2e_k8s.yml @@ -11,16 +11,16 @@ variables: LANG: C.UTF-8 before_script: - - export DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_LOGIN_SSM_KEY) - - export DOCKER_REGISTRY_PWD=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_PWD_SSM_KEY) - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_DDDEV_SSM_NAME) + - export DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_LOGIN) + - export DOCKER_REGISTRY_PWD=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_PWD) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_DDDEV) .k8s-e2e-cws-cspm-init: - set +x - export DATADOG_AGENT_SITE=datadoghq.com - - export DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $E2E_TESTS_API_KEY_SSM_NAME) - - export DATADOG_AGENT_APP_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $E2E_TESTS_APP_KEY_SSM_NAME) - - export DATADOG_AGENT_RC_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $E2E_TESTS_RC_KEY_SSM_NAME) + - export DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $E2E_TESTS_API_KEY) + - export DATADOG_AGENT_APP_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $E2E_TESTS_APP_KEY) + - export DATADOG_AGENT_RC_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $E2E_TESTS_RC_KEY) .k8s_e2e_template_needs_dev: extends: .k8s_e2e_template diff --git a/.gitlab/functional_test/regression_detector.yml b/.gitlab/functional_test/regression_detector.yml index 3819ff4626d4d..f48e6651b695b 100644 --- a/.gitlab/functional_test/regression_detector.yml +++ b/.gitlab/functional_test/regression_detector.yml @@ -42,12 +42,12 @@ single-machine-performance-regression_detector: - echo "Merge base is ${SMP_MERGE_BASE}" # Setup AWS credentials for single-machine-performance AWS account - AWS_NAMED_PROFILE="single-machine-performance" - - SMP_ACCOUNT_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SMP_ACCOUNT_ID_SSM_NAME) + - SMP_ACCOUNT_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SMP_ACCOUNT_ID) - SMP_ECR_URL=${SMP_ACCOUNT_ID}.dkr.ecr.us-west-2.amazonaws.com - - SMP_AGENT_TEAM_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SMP_AGENT_TEAM_ID_SSM_NAME) - - SMP_API=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SMP_API_SSM_NAME) - - aws configure set aws_access_key_id $($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SMP_BOT_ACCESS_KEY_ID_SSM_NAME) --profile ${AWS_NAMED_PROFILE} - - aws configure set aws_secret_access_key $($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SMP_BOT_ACCESS_KEY_SSM_NAME) --profile ${AWS_NAMED_PROFILE} + - SMP_AGENT_TEAM_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SMP_AGENT_TEAM_ID) + - SMP_API=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SMP_API) + - aws configure set aws_access_key_id $($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SMP_BOT_ACCESS_KEY_ID) --profile ${AWS_NAMED_PROFILE} + - aws configure set aws_secret_access_key $($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SMP_BOT_ACCESS_KEY) --profile ${AWS_NAMED_PROFILE} - aws configure set region us-west-2 --profile ${AWS_NAMED_PROFILE} # Download smp binary and prepare it for use - aws --profile single-machine-performance s3 cp s3://smp-cli-releases/v${SMP_VERSION}/x86_64-unknown-linux-gnu/smp smp diff --git a/.gitlab/install_script_testing/install_script_testing.yml b/.gitlab/install_script_testing/install_script_testing.yml index cafe094eaa5f2..7e649bf581d2e 100644 --- a/.gitlab/install_script_testing/install_script_testing.yml +++ b/.gitlab/install_script_testing/install_script_testing.yml @@ -5,7 +5,7 @@ test_install_script: tags: ["arch:amd64"] script: - set +x - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME) + - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN) - export TESTING_APT_URL=$DEB_TESTING_S3_BUCKET - export TESTING_YUM_URL=$RPM_TESTING_S3_BUCKET - export TEST_PIPELINE_ID=$CI_PIPELINE_ID diff --git a/.gitlab/integration_test/windows.yml b/.gitlab/integration_test/windows.yml index b13c0a2d9f18e..99cbd9110c6bc 100644 --- a/.gitlab/integration_test/windows.yml +++ b/.gitlab/integration_test/windows.yml @@ -8,7 +8,7 @@ tags: ["runner:windows-docker", "windowsversion:1809"] before_script: - $tmpfile = [System.IO.Path]::GetTempFileName() - - (& "$CI_PROJECT_DIR\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:VCPKG_BLOB_SAS_URL_SSM_NAME" "$tmpfile") + - (& "$CI_PROJECT_DIR\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:VCPKG_BLOB_SAS_URL" "$tmpfile") - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } - $vcpkgBlobSaSUrl=$(cat "$tmpfile") - Remove-Item "$tmpfile" diff --git a/.gitlab/internal_image_deploy/internal_image_deploy.yml b/.gitlab/internal_image_deploy/internal_image_deploy.yml index bdf209e744f58..ff11b30f0083b 100644 --- a/.gitlab/internal_image_deploy/internal_image_deploy.yml +++ b/.gitlab/internal_image_deploy/internal_image_deploy.yml @@ -22,7 +22,7 @@ docker_trigger_internal: TMPL_SRC_REPO: ci/datadog-agent/agent RELEASE_STAGING: "true" script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME) + - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN) - if [ "$BUCKET_BRANCH" = "beta" ] || [ "$BUCKET_BRANCH" = "stable" ]; then TMPL_SRC_REPO="${TMPL_SRC_REPO}-release"; fi - | if [ "$BUCKET_BRANCH" = "nightly" ]; then @@ -68,7 +68,7 @@ docker_trigger_internal-ot: RELEASE_STAGING: "true" script: - source /root/.bashrc - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME) + - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN) - if [ "$BUCKET_BRANCH" = "beta" ] || [ "$BUCKET_BRANCH" = "stable" ]; then TMPL_SRC_REPO="${TMPL_SRC_REPO}-release"; fi - | if [ "$BUCKET_BRANCH" = "nightly" ]; then @@ -114,7 +114,7 @@ docker_trigger_cluster_agent_internal: RELEASE_STAGING: "true" RELEASE_PROD: "true" script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME) + - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN) - if [ "$BUCKET_BRANCH" = "beta" ] || [ "$BUCKET_BRANCH" = "stable" ]; then TMPL_SRC_REPO="${TMPL_SRC_REPO}-release"; fi - | if [ "$BUCKET_BRANCH" = "nightly" ]; then @@ -160,7 +160,7 @@ docker_trigger_cws_instrumentation_internal: RELEASE_STAGING: "true" RELEASE_PROD: "true" script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME) + - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN) - if [ "$BUCKET_BRANCH" = "beta" ] || [ "$BUCKET_BRANCH" = "stable" ]; then TMPL_SRC_REPO="${TMPL_SRC_REPO}-release"; fi - | if [ "$BUCKET_BRANCH" = "nightly" ]; then diff --git a/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml b/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml index 87bae387f610c..a4b3450af0d3a 100644 --- a/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml +++ b/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml @@ -36,7 +36,7 @@ internal_kubernetes_deploy_experimental: EXPLICIT_WORKFLOWS: "//workflows:beta_builds.agents_nightly.staging-deploy.publish,//workflows:beta_builds.agents_nightly.staging-validate.publish,//workflows:beta_builds.agents_nightly.prod-wait-business-hours.publish,//workflows:beta_builds.agents_nightly.prod-deploy.publish,//workflows:beta_builds.agents_nightly.prod-validate.publish,//workflows:beta_builds.agents_nightly.publish-image-confirmation.publish" BUNDLE_VERSION_OVERRIDE: "v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}" script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME) + - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN) - "inv pipeline.trigger-child-pipeline --project-name DataDog/k8s-datadog-agent-ops --git-ref main --variable OPTION_AUTOMATIC_ROLLOUT --variable EXPLICIT_WORKFLOWS diff --git a/.gitlab/internal_kubernetes_deploy/rc_kubernetes_deploy.yml b/.gitlab/internal_kubernetes_deploy/rc_kubernetes_deploy.yml index 44b48d2829115..20855d891344a 100644 --- a/.gitlab/internal_kubernetes_deploy/rc_kubernetes_deploy.yml +++ b/.gitlab/internal_kubernetes_deploy/rc_kubernetes_deploy.yml @@ -22,7 +22,7 @@ rc_kubernetes_deploy: EXPLICIT_WORKFLOWS: "//workflows:deploy_rc.agents_rc" AGENT_IMAGE_TAG: $CI_COMMIT_REF_NAME script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME) + - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN) - "inv pipeline.trigger-child-pipeline --project-name DataDog/k8s-datadog-agent-ops --git-ref main --variable OPTION_AUTOMATIC_ROLLOUT --variable EXPLICIT_WORKFLOWS diff --git a/.gitlab/kernel_matrix_testing/common.yml b/.gitlab/kernel_matrix_testing/common.yml index a75a9b9b8b04e..fa8137de57d90 100644 --- a/.gitlab/kernel_matrix_testing/common.yml +++ b/.gitlab/kernel_matrix_testing/common.yml @@ -29,7 +29,7 @@ .write_ssh_key_file: - touch $AWS_EC2_SSH_KEY_FILE && chmod 600 $AWS_EC2_SSH_KEY_FILE - - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SSH_KEY_SSM_NAME > $AWS_EC2_SSH_KEY_FILE + - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SSH_KEY > $AWS_EC2_SSH_KEY_FILE # Without the newline ssh silently fails and moves on to try other auth methods - echo "" >> $AWS_EC2_SSH_KEY_FILE - chmod 600 $AWS_EC2_SSH_KEY_FILE @@ -47,7 +47,7 @@ .kmt_new_profile: - mkdir -p ~/.aws - - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $AGENT_QA_PROFILE_SSM_NAME >> ~/.aws/config + - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $AGENT_QA_PROFILE >> ~/.aws/config - export AWS_PROFILE=agent-qa-ci .define_if_collect_complexity: @@ -60,7 +60,7 @@ - echo "COLLECT_COMPLEXITY=${COLLECT_COMPLEXITY}" .collect_outcomes_kmt: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) - export MICRO_VM_IP=$(jq --exit-status --arg TAG $TAG --arg ARCH $ARCH --arg TEST_SET $TEST_SET -r '.[$ARCH].microvms | map(select(."vmset-tags"| index($TEST_SET))) | map(select(.tag==$TAG)) | .[].ip' $CI_PROJECT_DIR/stack.output) # Collect setup-ddvm systemd service logs - mkdir -p $CI_PROJECT_DIR/logs @@ -114,7 +114,7 @@ scp $DD_AGENT_TESTING_DIR/kmt-dockers-$ARCH.tar.gz metal_instance:/opt/kernel-version-testing fi after_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) - !reference [.tag_kmt_ci_job] variables: AWS_EC2_SSH_KEY_FILE: $CI_PROJECT_DIR/ssh_key @@ -143,7 +143,7 @@ KUBERNETES_MEMORY_LIMIT: "16Gi" VMCONFIG_FILE: "${CI_PROJECT_DIR}/vmconfig-${CI_PIPELINE_ID}-${ARCH}.json" before_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) - !reference [.retrieve_linux_go_deps] - !reference [.kmt_new_profile] - !reference [.write_ssh_key_file] @@ -157,7 +157,7 @@ - jq "." $CI_PROJECT_DIR/stack.output - pulumi logout after_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) - export AWS_PROFILE=agent-qa-ci - !reference [.shared_filters_and_queries] - mkdir -p $CI_PROJECT_DIR/libvirt/log/$ARCH $CI_PROJECT_DIR/libvirt/xml $CI_PROJECT_DIR/libvirt/qemu $CI_PROJECT_DIR/libvirt/dnsmasq @@ -182,7 +182,7 @@ image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/test-infra-definitions/runner$TEST_INFRA_DEFINITIONS_BUILDIMAGES_SUFFIX:$TEST_INFRA_DEFINITIONS_BUILDIMAGES tags: ["arch:amd64"] before_script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME) + - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN) - !reference [.kmt_new_profile] script: - !reference [.shared_filters_and_queries] @@ -199,7 +199,7 @@ aws ec2 terminate-instances --instance-ids "${INSTANCE_ID}" fi after_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) - !reference [.tag_kmt_ci_job] # Manual cleanup jobs, these will be used to cleanup the instances after the tests @@ -228,7 +228,7 @@ RETRY: 2 EXTERNAL_LINKS_PATH: external_links_$CI_JOB_ID.json before_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) - !reference [.kmt_new_profile] - !reference [.write_ssh_key_file] - echo "CI_JOB_URL=${CI_JOB_URL}" >> $DD_AGENT_TESTING_DIR/job_env.txt @@ -315,9 +315,9 @@ notify_ebpf_complexity_changes: - python3 -m pip install tabulate # Required for printing the tables - python3 -m pip install -r tasks/libs/requirements-github.txt - | - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITHUB_PR_COMMENTER_APP_KEY_SSM_NAME | base64) - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITHUB_PR_COMMENTER_INTEGRATION_ID_SSM_NAME) - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITHUB_PR_COMMENTER_INSTALLATION_ID_SSM_NAME) - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_FULL_API_TOKEN_SSM_NAME) + export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITHUB_PR_COMMENTER_APP_KEY | base64) + export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITHUB_PR_COMMENTER_INTEGRATION_ID) + export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITHUB_PR_COMMENTER_INSTALLATION_ID) + export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_FULL_API_TOKEN) script: - inv -e ebpf.generate-complexity-summary-for-pr diff --git a/.gitlab/kernel_matrix_testing/security_agent.yml b/.gitlab/kernel_matrix_testing/security_agent.yml index 8df130eb83782..5f21a58ad63a0 100644 --- a/.gitlab/kernel_matrix_testing/security_agent.yml +++ b/.gitlab/kernel_matrix_testing/security_agent.yml @@ -72,7 +72,7 @@ kmt_setup_env_secagent_x64: # upload connector to metal instance - scp $CI_PROJECT_DIR/connector-${ARCH} metal_instance:/home/ubuntu/connector after_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) - !reference [.tag_kmt_ci_job] variables: AWS_EC2_SSH_KEY_FILE: $CI_PROJECT_DIR/ssh_key diff --git a/.gitlab/kernel_matrix_testing/system_probe.yml b/.gitlab/kernel_matrix_testing/system_probe.yml index f871f2aca10a1..b0d58154e22ac 100644 --- a/.gitlab/kernel_matrix_testing/system_probe.yml +++ b/.gitlab/kernel_matrix_testing/system_probe.yml @@ -28,13 +28,13 @@ upload_dependencies_sysprobe_arm64: stage: kernel_matrix_testing_prepare script: # DockerHub login for build to limit rate limit when pulling base images - - DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_LOGIN_SSM_KEY) - - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_PWD_SSM_KEY | crane auth login --username "$DOCKER_REGISTRY_LOGIN" --password-stdin "$DOCKER_REGISTRY_URL" + - DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_LOGIN) + - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_PWD | crane auth login --username "$DOCKER_REGISTRY_LOGIN" --password-stdin "$DOCKER_REGISTRY_URL" # Pull base images - mkdir $KMT_DOCKERS - inv -e system-probe.save-test-dockers --use-crane --output-dir $KMT_DOCKERS --arch $ARCH after_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) - !reference [.tag_kmt_ci_job] artifacts: expire_in: 1 day @@ -81,7 +81,7 @@ pull_test_dockers_arm64: - !reference [.setup_ssh_config] - scp $CI_PROJECT_DIR/kmt-deps/ci/$ARCH/$ARCHIVE_NAME metal_instance:/opt/kernel-version-testing/ after_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) - !reference [.tag_kmt_ci_job] variables: DEPENDENCIES: $CI_PROJECT_DIR/kmt-deps/ci/$ARCH/btfs @@ -160,7 +160,7 @@ kmt_setup_env_sysprobe_x64: # upload connector to metal instance - scp $CI_PROJECT_DIR/connector-${ARCH} metal_instance:/home/ubuntu/connector after_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) - !reference [.tag_kmt_ci_job] variables: AWS_EC2_SSH_KEY_FILE: $CI_PROJECT_DIR/ssh_key diff --git a/.gitlab/kitchen_deploy/kitchen_deploy.yml b/.gitlab/kitchen_deploy/kitchen_deploy.yml index 1ee5a881c07d1..774f459e29921 100644 --- a/.gitlab/kitchen_deploy/kitchen_deploy.yml +++ b/.gitlab/kitchen_deploy/kitchen_deploy.yml @@ -3,13 +3,13 @@ # Contains jobs which deploy Agent package to testing repsoitories that are used in kitchen tests. .setup_rpm_signing_key: &setup_rpm_signing_key - - RPM_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_GPG_KEY_SSM_NAME) + - RPM_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_GPG_KEY) - printf -- "$RPM_GPG_KEY" | gpg --import --batch - - export RPM_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_SIGNING_PASSPHRASE_SSM_NAME) + - export RPM_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_SIGNING_PASSPHRASE) .setup_apt_signing_key: &setup_apt_signing_key - - APT_SIGNING_PRIVATE_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DEB_GPG_KEY_SSM_NAME) - - APT_SIGNING_KEY_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DEB_SIGNING_PASSPHRASE_SSM_NAME) + - APT_SIGNING_PRIVATE_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DEB_GPG_KEY) + - APT_SIGNING_KEY_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DEB_SIGNING_PASSPHRASE) - printf -- "$APT_SIGNING_PRIVATE_KEY" | gpg --import --batch diff --git a/.gitlab/maintenance_jobs/docker.yml b/.gitlab/maintenance_jobs/docker.yml index 43f8bded7ae2f..899f74156feac 100644 --- a/.gitlab/maintenance_jobs/docker.yml +++ b/.gitlab/maintenance_jobs/docker.yml @@ -60,8 +60,8 @@ delete_docker_tag: TAG: "" # tag name, for example "6.9.0" ORGANIZATION: "datadog" before_script: - - DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_LOGIN_SSM_KEY) - - PASS=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_PWD_SSM_KEY) + - DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_LOGIN) + - PASS=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_PWD) - python3 -m pip install -r requirements.txt - | export DOCKER_TOKEN=`curl -s -H "Content-Type: application/json" -X POST -d '{"username": "'$DOCKER_REGISTRY_LOGIN'", "password": "'$PASS'"}' https://hub.docker.com/v2/users/login/ | python -c 'import sys, json; print(json.load(sys.stdin)["token"].strip())'` diff --git a/.gitlab/maintenance_jobs/kitchen.yml b/.gitlab/maintenance_jobs/kitchen.yml index b37355076f395..69021c398dff2 100644 --- a/.gitlab/maintenance_jobs/kitchen.yml +++ b/.gitlab/maintenance_jobs/kitchen.yml @@ -26,10 +26,10 @@ periodic_kitchen_cleanup_azure: # the job to be run one at a time. resource_group: azure_cleanup script: - - export ARM_SUBSCRIPTION_ID=`$CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_SUBSCRIPTION_ID_SSM_NAME` - - export ARM_CLIENT_ID=`$CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_ID_SSM_NAME` - - export ARM_CLIENT_SECRET=`$CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_SECRET_SSM_NAME` - - export ARM_TENANT_ID=`$CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_TENANT_ID_SSM_NAME` + - export ARM_SUBSCRIPTION_ID=`$CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_SUBSCRIPTION_ID` + - export ARM_CLIENT_ID=`$CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_ID` + - export ARM_CLIENT_SECRET=`$CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_SECRET` + - export ARM_TENANT_ID=`$CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_TENANT_ID` # Remove kitchen resources for all existing test suite prefixes - RESOURCE_GROUP_PREFIX=kitchen-chef python3 /deploy_scripts/cleanup_azure.py - RESOURCE_GROUP_PREFIX=kitchen-win python3 /deploy_scripts/cleanup_azure.py diff --git a/.gitlab/notify/notify.yml b/.gitlab/notify/notify.yml index efa437a64cdde..48831192e5307 100644 --- a/.gitlab/notify/notify.yml +++ b/.gitlab/notify/notify.yml @@ -25,8 +25,8 @@ notify: resource_group: notification timeout: 15 minutes # Added to prevent a stuck job blocking the resource_group defined above script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_READ_API_TOKEN_SSM_NAME) - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_READ_API_TOKEN) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) - python3 -m pip install -r requirements.txt -r tasks/libs/requirements-notifications.txt - | # Do not send notifications if this is a child pipeline of another repo @@ -53,8 +53,8 @@ send_pipeline_stats: when: always dependencies: [] script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_READ_API_TOKEN_SSM_NAME) - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_READ_API_TOKEN) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) - invoke -e notify.send-stats notify_github: @@ -110,10 +110,10 @@ notify_gitlab_ci_changes: - source /root/.bashrc - python3 -m pip install -r tasks/libs/requirements-github.txt - | - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITHUB_PR_COMMENTER_APP_KEY_SSM_NAME | base64) - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITHUB_PR_COMMENTER_INTEGRATION_ID_SSM_NAME) - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITHUB_PR_COMMENTER_INSTALLATION_ID_SSM_NAME) - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_FULL_API_TOKEN_SSM_NAME) + export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITHUB_PR_COMMENTER_APP_KEY | base64) + export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITHUB_PR_COMMENTER_INTEGRATION_ID) + export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITHUB_PR_COMMENTER_INSTALLATION_ID) + export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_FULL_API_TOKEN) - inv -e notify.gitlab-ci-diff --pr-comment .failure_summary_job: @@ -125,9 +125,9 @@ notify_gitlab_ci_changes: timeout: 15 minutes # Added to prevent a stuck job blocking the resource_group defined above .failure_summary_setup: - - export SLACK_API_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SLACK_AGENT_CI_TOKEN_SSM_NAME) - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_READ_API_TOKEN_SSM_NAME) - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - export SLACK_API_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SLACK_AGENT_CI_TOKEN) + - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_READ_API_TOKEN) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) - python3 -m pip install -r requirements.txt -r tasks/libs/requirements-notifications.txt # Upload failure summary data to S3 at the end of each main pipeline diff --git a/.gitlab/package_build/installer.yml b/.gitlab/package_build/installer.yml index 98232f8910e08..67b3ad894369d 100644 --- a/.gitlab/package_build/installer.yml +++ b/.gitlab/package_build/installer.yml @@ -183,7 +183,7 @@ windows-installer-amd64: -e SIGN_WINDOWS_DD_WCS=true -e S3_OMNIBUS_CACHE_BUCKET="$S3_OMNIBUS_CACHE_BUCKET" -e USE_S3_CACHING="$USE_S3_CACHING" - -e API_KEY_ORG2_SSM_NAME=${API_KEY_ORG2_SSM_NAME} + -e API_KEY_ORG2=${API_KEY_ORG2} 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/windows_1809_${ARCH}${Env:DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${Env:DATADOG_AGENT_WINBUILDIMAGES} c:\mnt\tasks\winbuildscripts\buildinstaller.bat after_script: diff --git a/.gitlab/package_build/windows.yml b/.gitlab/package_build/windows.yml index 2d45a7b0ceecd..b54b3c33eb677 100644 --- a/.gitlab/package_build/windows.yml +++ b/.gitlab/package_build/windows.yml @@ -36,7 +36,7 @@ -e GO_VERSION_CHECK="true" -e BUNDLE_MIRROR__RUBYGEMS__ORG=${BUNDLE_MIRROR__RUBYGEMS__ORG} -e PIP_INDEX_URL=${PIP_INDEX_URL} - -e API_KEY_ORG2_SSM_NAME=${API_KEY_ORG2_SSM_NAME} + -e API_KEY_ORG2=${API_KEY_ORG2} 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/windows_1809_${ARCH}${Env:DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${Env:DATADOG_AGENT_WINBUILDIMAGES} c:\mnt\tasks\winbuildscripts\buildwin.bat - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } @@ -123,7 +123,7 @@ windows_zip_agent_binaries_x64-a7: -e USE_S3_CACHING="$USE_S3_CACHING" -e BUNDLE_MIRROR__RUBYGEMS__ORG=${BUNDLE_MIRROR__RUBYGEMS__ORG} -e PIP_INDEX_URL=${PIP_INDEX_URL} - -e API_KEY_ORG2_SSM_NAME=${API_KEY_ORG2_SSM_NAME} + -e API_KEY_ORG2=${API_KEY_ORG2} 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/windows_1809_${ARCH}${Env:DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${Env:DATADOG_AGENT_WINBUILDIMAGES} c:\mnt\tasks\winbuildscripts\buildwin.bat - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } diff --git a/.gitlab/packaging/rpm.yml b/.gitlab/packaging/rpm.yml index 104e66cfa40c1..1e710de458837 100644 --- a/.gitlab/packaging/rpm.yml +++ b/.gitlab/packaging/rpm.yml @@ -8,9 +8,9 @@ script: - echo "About to build for $RELEASE_VERSION" - !reference [.cache_omnibus_ruby_deps, setup] - - RPM_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_GPG_KEY_SSM_NAME) + - RPM_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_GPG_KEY) - printf -- "$RPM_GPG_KEY" | gpg --import --batch - - export RPM_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_SIGNING_PASSPHRASE_SSM_NAME) + - export RPM_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_SIGNING_PASSPHRASE) - inv -e omnibus.build --release-version "$RELEASE_VERSION" --major-version "$AGENT_MAJOR_VERSION" --base-dir $OMNIBUS_BASE_DIR --skip-deps --target-project=${DD_PROJECT} ${OMNIBUS_EXTRA_ARGS} - ls -la $OMNIBUS_PACKAGE_DIR/ - !reference [.lint_linux_packages] @@ -137,9 +137,9 @@ installer_suse_rpm-arm64: script: - echo "About to build for $RELEASE_VERSION" - !reference [.cache_omnibus_ruby_deps, setup] - - RPM_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_GPG_KEY_SSM_NAME) + - RPM_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_GPG_KEY) - printf -- "$RPM_GPG_KEY" | gpg --import --batch - - export RPM_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_SIGNING_PASSPHRASE_SSM_NAME) + - export RPM_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_SIGNING_PASSPHRASE) - inv -e omnibus.build --release-version "$RELEASE_VERSION" --base-dir $OMNIBUS_BASE_DIR --skip-deps --flavor=iot ${OMNIBUS_EXTRA_ARGS} - ls -la $OMNIBUS_PACKAGE_DIR/ - !reference [.lint_linux_packages] diff --git a/.gitlab/pkg_metrics/pkg_metrics.yml b/.gitlab/pkg_metrics/pkg_metrics.yml index 9ea6639e0780e..38e6ce3b23c70 100644 --- a/.gitlab/pkg_metrics/pkg_metrics.yml +++ b/.gitlab/pkg_metrics/pkg_metrics.yml @@ -57,7 +57,7 @@ send_pkg_size: optional: true script: # Get API key to send metrics - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) # Allow failures: some packages are not always built, and therefore stats cannot be sent for them - set +e diff --git a/.gitlab/post_rc_build/post_rc_tasks.yml b/.gitlab/post_rc_build/post_rc_tasks.yml index e00f0e8599bdb..f39c490c1f9bb 100644 --- a/.gitlab/post_rc_build/post_rc_tasks.yml +++ b/.gitlab/post_rc_build/post_rc_tasks.yml @@ -11,7 +11,7 @@ update_rc_build_links: image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:amd64"] script: - - export ATLASSIAN_PASSWORD=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $JIRA_READ_API_TOKEN_SSM_NAME) + - export ATLASSIAN_PASSWORD=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $JIRA_READ_API_TOKEN) - export ATLASSIAN_USERNAME=robot-jira-agentplatform@datadoghq.com - python3 -m pip install -r tasks/requirements_release_tasks.txt - PATCH=$(echo "$CI_COMMIT_REF_NAME" | cut -d'.' -f3 | cut -c1) diff --git a/.gitlab/setup/setup.yml b/.gitlab/setup/setup.yml index 3d800618dde97..f70fefb111181 100644 --- a/.gitlab/setup/setup.yml +++ b/.gitlab/setup/setup.yml @@ -18,15 +18,15 @@ github_rate_limit_info: script: - python3 -m pip install -r tasks/libs/requirements-github.txt datadog_api_client # Send stats for app 1 - - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_KEY_SSM_NAME) - - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_APP_ID_SSM_NAME) - - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_INSTALLATION_ID_SSM_NAME) - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_KEY) + - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_APP_ID) + - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_INSTALLATION_ID) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) - inv github.send-rate-limit-info-datadog --pipeline-id $CI_PIPELINE_ID --app-instance 1 # Send stats for app 2 - - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_KEY_2_SSM_NAME) - - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_APP_ID_2_SSM_NAME) - - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_INSTALLATION_ID_2_SSM_NAME) - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_KEY_2) + - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_APP_ID_2) + - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_INSTALLATION_ID_2) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) - inv github.send-rate-limit-info-datadog --pipeline-id $CI_PIPELINE_ID --app-instance 2 allow_failure: true diff --git a/.gitlab/source_test/golang_deps_diff.yml b/.gitlab/source_test/golang_deps_diff.yml index 7fee28e886445..6b129d845ccf8 100644 --- a/.gitlab/source_test/golang_deps_diff.yml +++ b/.gitlab/source_test/golang_deps_diff.yml @@ -15,7 +15,7 @@ golang_deps_diff: - !reference [.retrieve_linux_go_deps] script: # Get API key to send metrics - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) - inv -e diff.go-deps --report-file=deps-report.md --report-metrics --git-ref "${CI_COMMIT_REF_NAME}" artifacts: paths: @@ -64,7 +64,7 @@ golang_deps_send_count_metrics: - !reference [.retrieve_linux_go_deps] script: # Get API key to send metrics - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) - inv -e go-deps.send-count-metrics --git-sha "${CI_COMMIT_SHA}" --git-ref "${CI_COMMIT_REF_NAME}" golang_deps_test: diff --git a/.gitlab/source_test/linux.yml b/.gitlab/source_test/linux.yml index 529ee8a9aeda1..a83becefe59bb 100644 --- a/.gitlab/source_test/linux.yml +++ b/.gitlab/source_test/linux.yml @@ -51,7 +51,7 @@ .upload_coverage: # Upload coverage files to Codecov. Never fail on coverage upload. - source /root/.bashrc - - export CODECOV_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $CODECOV_TOKEN_SSM_NAME) + - export CODECOV_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $CODECOV_TOKEN) - inv -e coverage.upload-to-codecov $COVERAGE_CACHE_FLAG || true .linux_lint: @@ -267,7 +267,7 @@ new-e2e-unit-tests: before_script: # Setup AWS Credentials - mkdir -p ~/.aws - - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $AGENT_QA_PROFILE_SSM_NAME >> ~/.aws/config + - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $AGENT_QA_PROFILE >> ~/.aws/config - export AWS_PROFILE=agent-qa-ci # Use S3 backend - pulumi login "s3://dd-pulumi-state?region=us-east-1&awssdk=v2&profile=$AWS_PROFILE" diff --git a/.gitlab/source_test/windows.yml b/.gitlab/source_test/windows.yml index f6ff9f865360e..dfa1d0e2aaeb8 100644 --- a/.gitlab/source_test/windows.yml +++ b/.gitlab/source_test/windows.yml @@ -39,11 +39,11 @@ -e EXTRA_OPTS="${FAST_TESTS_FLAG}" -e TEST_WASHER=true -e GO_TEST_SKIP_FLAKE="${GO_TEST_SKIP_FLAKE}" - -e API_KEY_ORG2_SSM_NAME="${API_KEY_ORG2_SSM_NAME}" - -e CODECOV_TOKEN_SSM_NAME="${CODECOV_TOKEN_SSM_NAME}" + -e API_KEY_ORG2="${API_KEY_ORG2}" + -e CODECOV_TOKEN="${CODECOV_TOKEN}" -e S3_PERMANENT_ARTIFACTS_URI="${S3_PERMANENT_ARTIFACTS_URI}" -e COVERAGE_CACHE_FLAG="${COVERAGE_CACHE_FLAG}" - -e GITLAB_TOKEN_SSM_NAME="${GITLAB_READ_API_TOKEN_SSM_NAME}" + -e GITLAB_TOKEN="${GITLAB_READ_API_TOKEN}" 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/windows_1809_${ARCH}${Env:DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${Env:DATADOG_AGENT_WINBUILDIMAGES} c:\mnt\tasks\winbuildscripts\unittests.bat - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } diff --git a/.gitlab/trigger_release/trigger_release.yml b/.gitlab/trigger_release/trigger_release.yml index 5084298b2adc0..3446bbf960627 100644 --- a/.gitlab/trigger_release/trigger_release.yml +++ b/.gitlab/trigger_release/trigger_release.yml @@ -19,7 +19,7 @@ # agent-release-management creates pipeline for both Agent 6 and Agent 7 # when triggered with major version 7 - export RELEASE_VERSION=$(inv agent.version --major-version 7 --url-safe --omnibus-format)-1 - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME) + - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN) - 'inv pipeline.trigger-child-pipeline --project-name "DataDog/agent-release-management" --git-ref "main" --variable ACTION --variable AUTO_RELEASE diff --git a/tasks/libs/common/omnibus.py b/tasks/libs/common/omnibus.py index c8c59478738e1..d91b2649d27ab 100644 --- a/tasks/libs/common/omnibus.py +++ b/tasks/libs/common/omnibus.py @@ -95,7 +95,7 @@ def env_filter(item): "BUILD_HOOK", "BUNDLE_MIRROR__RUBYGEMS__ORG", "BUCKET_BRANCH", - "CHANGELOG_COMMIT_SHA_SSM_NAME", + "CHANGELOG_COMMIT_SHA", "CLANG_LLVM_VER", "CHANNEL", "CHART", @@ -121,7 +121,7 @@ def env_filter(item): "HOSTNAME", "HOST_IP", "INFOPATH", - "INSTALL_SCRIPT_API_KEY_SSM_NAME", + "INSTALL_SCRIPT_API_KEY", "INTEGRATION_WHEELS_CACHE_BUCKET", "IRBRC", "KITCHEN_INFRASTRUCTURE_FLAKES_RETRY", @@ -159,11 +159,11 @@ def env_filter(item): "USERDOMAIN", "USERNAME", "USERPROFILE", - "VCPKG_BLOB_SAS_URL_SSM_NAME", + "VCPKG_BLOB_SAS_URL", "VERSION", "VM_ASSETS", "WIN_S3_BUCKET", - "WINGET_PAT_SSM_NAME", + "WINGET_PAT", "WORKFLOW", "_", "build_before", @@ -318,7 +318,7 @@ def send_build_metrics(ctx, overall_duration): } ) dd_api_key = ctx.run( - f'{aws_cmd} ssm get-parameter --region us-east-1 --name {os.environ["API_KEY_ORG2_SSM_NAME"]} --with-decryption --query "Parameter.Value" --out text', + f'{aws_cmd} ssm get-parameter --region us-east-1 --name {os.environ["API_KEY_ORG2"]} --with-decryption --query "Parameter.Value" --out text', hide=True, ).stdout.strip() headers = {'Accept': 'application/json', 'Content-Type': 'application/json', 'DD-API-KEY': dd_api_key} @@ -336,7 +336,7 @@ def send_cache_miss_event(ctx, pipeline_id, job_name, job_id): else: aws_cmd = "aws" dd_api_key = ctx.run( - f'{aws_cmd} ssm get-parameter --region us-east-1 --name {os.environ["API_KEY_ORG2_SSM_NAME"]} --with-decryption --query "Parameter.Value" --out text', + f'{aws_cmd} ssm get-parameter --region us-east-1 --name {os.environ["API_KEY_ORG2"]} --with-decryption --query "Parameter.Value" --out text', hide=True, ).stdout.strip() headers = {'Accept': 'application/json', 'Content-Type': 'application/json', 'DD-API-KEY': dd_api_key} diff --git a/tasks/pipeline.py b/tasks/pipeline.py index 8072ba025d4c5..67300a91d3507 100644 --- a/tasks/pipeline.py +++ b/tasks/pipeline.py @@ -529,7 +529,7 @@ def changelog(ctx, new_commit_sha): else: parent_dir = os.getcwd() old_commit_sha = ctx.run( - f"{parent_dir}/tools/ci/aws_ssm_get_wrapper.sh {os.environ['CHANGELOG_COMMIT_SHA_SSM_NAME']}", + f"{parent_dir}/tools/ci/aws_ssm_get_wrapper.sh {os.environ['CHANGELOG_COMMIT_SHA']}", hide=True, ).stdout.strip() if not new_commit_sha: diff --git a/tasks/unit_tests/linter_tests.py b/tasks/unit_tests/linter_tests.py index b2d05a6f8d2b0..5af0e1ffdd64f 100644 --- a/tasks/unit_tests/linter_tests.py +++ b/tasks/unit_tests/linter_tests.py @@ -32,7 +32,7 @@ def test_without_wrapper_no_env(self): def test_without_wrapper_with_env(self): with open(self.test_file, "w") as f: f.write( - " - export DD_API_KEY=$(aws ssm get-parameter --region us-east-1 --name $API_KEY_ORG2_SSM_NAME --with-decryption --query Parameter.Value --out text" + " - export DD_API_KEY=$(aws ssm get-parameter --region us-east-1 --name $API_KEY_ORG2 --with-decryption --query Parameter.Value --out text" ) matched = linter.list_get_parameter_calls(self.test_file)[0] self.assertFalse(matched.with_wrapper) @@ -49,7 +49,7 @@ def test_with_wrapper_no_env(self): def test_with_wrapper_with_env(self): with open(self.test_file, "w") as f: - f.write("export DD_APP_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $APP_KEY_ORG2_SSM_NAME)") + f.write("export DD_APP_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $APP_KEY_ORG2)") matched = linter.list_get_parameter_calls(self.test_file) self.assertListEqual([], matched) diff --git a/tasks/unit_tests/omnibus_tests.py b/tasks/unit_tests/omnibus_tests.py index 1d7438303eefd..1aeaf35e411f7 100644 --- a/tasks/unit_tests/omnibus_tests.py +++ b/tasks/unit_tests/omnibus_tests.py @@ -41,7 +41,7 @@ def _run_calls_to_string(mock_calls): 'CI_PIPELINE_ID': '', 'RELEASE_VERSION_7': 'nightly', 'S3_OMNIBUS_CACHE_BUCKET': 'omnibus-cache', - 'API_KEY_ORG2_SSM_NAME': 'api-key', + 'API_KEY_ORG2': 'api-key', }, clear=True, ) diff --git a/tasks/unit_tests/testdata/fake_gitlab-ci.yml b/tasks/unit_tests/testdata/fake_gitlab-ci.yml index a07aa2828d10c..06478a78e1dcb 100644 --- a/tasks/unit_tests/testdata/fake_gitlab-ci.yml +++ b/tasks/unit_tests/testdata/fake_gitlab-ci.yml @@ -174,15 +174,15 @@ variables: DATADOG_AGENT_EMBEDDED_PATH: /opt/datadog-agent/embedded DEB_GPG_KEY_ID: ad9589b7 DEB_GPG_KEY_NAME: "Datadog, Inc. Master key" - DEB_GPG_KEY_SSM_NAME: ci.datadog-agent.deb_signing_private_key_${DEB_GPG_KEY_ID} - DEB_SIGNING_PASSPHRASE_SSM_NAME: ci.datadog-agent.deb_signing_key_passphrase_${DEB_GPG_KEY_ID} + DEB_GPG_KEY: ci.datadog-agent.deb_signing_private_key_${DEB_GPG_KEY_ID} + DEB_SIGNING_PASSPHRASE: ci.datadog-agent.deb_signing_key_passphrase_${DEB_GPG_KEY_ID} RPM_GPG_KEY_ID: fd4bf915 RPM_GPG_KEY_NAME: "Datadog, Inc. RPM key" - RPM_GPG_KEY_SSM_NAME: ci.datadog-agent.rpm_signing_private_key_${RPM_GPG_KEY_ID} - RPM_SIGNING_PASSPHRASE_SSM_NAME: ci.datadog-agent.rpm_signing_key_passphrase_${RPM_GPG_KEY_ID} + RPM_GPG_KEY: ci.datadog-agent.rpm_signing_private_key_${RPM_GPG_KEY_ID} + RPM_SIGNING_PASSPHRASE: ci.datadog-agent.rpm_signing_key_passphrase_${RPM_GPG_KEY_ID} # docker.io authentication - DOCKER_REGISTRY_LOGIN_SSM_KEY: docker_hub_login - DOCKER_REGISTRY_PWD_SSM_KEY: docker_hub_pwd + DOCKER_REGISTRY_LOGIN: docker_hub_login + DOCKER_REGISTRY_PWD: docker_hub_pwd DOCKER_REGISTRY_URL: docker.io KITCHEN_INFRASTRUCTURE_FLAKES_RETRY: 2 CLANG_LLVM_VER: 12.0.1 diff --git a/tasks/winbuildscripts/unittests.ps1 b/tasks/winbuildscripts/unittests.ps1 index 64e95dde53a07..c0faecf550fa8 100644 --- a/tasks/winbuildscripts/unittests.ps1 +++ b/tasks/winbuildscripts/unittests.ps1 @@ -63,7 +63,7 @@ $ErrorActionPreference = "Continue" $tmpfile = [System.IO.Path]::GetTempFileName() # 1. Upload coverage reports to Codecov -& "$UT_BUILD_ROOT\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:CODECOV_TOKEN_SSM_NAME" "$tmpfile" +& "$UT_BUILD_ROOT\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:CODECOV_TOKEN" "$tmpfile" If ($LASTEXITCODE -ne "0") { exit $LASTEXITCODE } @@ -75,12 +75,12 @@ $Env:CODECOV_TOKEN=$(cat "$tmpfile") Get-ChildItem -Path "$UT_BUILD_ROOT" -Filter "junit-out-*.xml" -Recurse | ForEach-Object { Copy-Item -Path $_.FullName -Destination C:\mnt } -& "$UT_BUILD_ROOT\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:API_KEY_ORG2_SSM_NAME" "$tmpfile" +& "$UT_BUILD_ROOT\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:API_KEY_ORG2" "$tmpfile" If ($LASTEXITCODE -ne "0") { exit $LASTEXITCODE } $Env:DATADOG_API_KEY=$(cat "$tmpfile") -& "$UT_BUILD_ROOT\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:GITLAB_TOKEN_SSM_NAME" "$tmpfile" +& "$UT_BUILD_ROOT\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:GITLAB_TOKEN" "$tmpfile" If ($LASTEXITCODE -ne "0") { exit $LASTEXITCODE } diff --git a/test/kitchen/tasks/clean.sh b/test/kitchen/tasks/clean.sh index 3aa774275039f..f407d31cf2a1d 100755 --- a/test/kitchen/tasks/clean.sh +++ b/test/kitchen/tasks/clean.sh @@ -8,19 +8,19 @@ set -euo pipefail # These should not be printed out if [ -z ${AZURE_CLIENT_ID+x} ]; then - AZURE_CLIENT_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_ID_SSM_NAME) + AZURE_CLIENT_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_ID) export AZURE_CLIENT_ID fi if [ -z ${AZURE_CLIENT_SECRET+x} ]; then - AZURE_CLIENT_SECRET=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_SECRET_SSM_NAME) + AZURE_CLIENT_SECRET=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_SECRET) export AZURE_CLIENT_SECRET fi if [ -z ${AZURE_TENANT_ID+x} ]; then - AZURE_TENANT_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_TENANT_ID_SSM_NAME) + AZURE_TENANT_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_TENANT_ID) export AZURE_TENANT_ID fi if [ -z ${AZURE_SUBSCRIPTION_ID+x} ]; then - AZURE_SUBSCRIPTION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_SUBSCRIPTION_ID_SSM_NAME) + AZURE_SUBSCRIPTION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_SUBSCRIPTION_ID) export AZURE_SUBSCRIPTION_ID fi if [ -z ${DD_PIPELINE_ID+x} ]; then diff --git a/test/kitchen/tasks/run-test-kitchen.sh b/test/kitchen/tasks/run-test-kitchen.sh index a88290161363e..a1b15fe997730 100755 --- a/test/kitchen/tasks/run-test-kitchen.sh +++ b/test/kitchen/tasks/run-test-kitchen.sh @@ -54,25 +54,25 @@ if [ "$KITCHEN_PROVIDER" == "azure" ]; then # These should not be printed out set +x if [ -z ${AZURE_CLIENT_ID+x} ]; then - AZURE_CLIENT_ID=$($PARENT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_ID_SSM_NAME) + AZURE_CLIENT_ID=$($PARENT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_ID) # make sure whitespace is removed AZURE_CLIENT_ID="$(echo -e "${AZURE_CLIENT_ID}" | tr -d '[:space:]')" export AZURE_CLIENT_ID fi if [ -z ${AZURE_CLIENT_SECRET+x} ]; then - AZURE_CLIENT_SECRET=$($PARENT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_SECRET_SSM_NAME) + AZURE_CLIENT_SECRET=$($PARENT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_SECRET) # make sure whitespace is removed AZURE_CLIENT_SECRET="$(echo -e "${AZURE_CLIENT_SECRET}" | tr -d '[:space:]')" export AZURE_CLIENT_SECRET fi if [ -z ${AZURE_TENANT_ID+x} ]; then - AZURE_TENANT_ID=$($PARENT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_TENANT_ID_SSM_NAME) + AZURE_TENANT_ID=$($PARENT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_TENANT_ID) # make sure whitespace is removed AZURE_TENANT_ID="$(echo -e "${AZURE_TENANT_ID}" | tr -d '[:space:]')" export AZURE_TENANT_ID fi if [ -z ${AZURE_SUBSCRIPTION_ID+x} ]; then - AZURE_SUBSCRIPTION_ID=$($PARENT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_SUBSCRIPTION_ID_SSM_NAME) + AZURE_SUBSCRIPTION_ID=$($PARENT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_SUBSCRIPTION_ID) # make sure whitespace is removed AZURE_SUBSCRIPTION_ID="$(echo -e "${AZURE_SUBSCRIPTION_ID}" | tr -d '[:space:]')" export AZURE_SUBSCRIPTION_ID @@ -101,7 +101,7 @@ elif [ "$KITCHEN_PROVIDER" == "ec2" ]; then export KITCHEN_EC2_SSH_KEY_ID="datadog-agent-kitchen" export KITCHEN_EC2_SSH_KEY_PATH="$(pwd)/aws-ssh-key" touch $KITCHEN_EC2_SSH_KEY_PATH && chmod 600 $KITCHEN_EC2_SSH_KEY_PATH - $PARENT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_EC2_SSH_KEY_SSM_NAME > $KITCHEN_EC2_SSH_KEY_PATH + $PARENT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_EC2_SSH_KEY > $KITCHEN_EC2_SSH_KEY_PATH fi fi diff --git a/test/kitchen/tasks/show-strays.sh b/test/kitchen/tasks/show-strays.sh index ab12b9a5edd5f..d5ea5a3315ef6 100755 --- a/test/kitchen/tasks/show-strays.sh +++ b/test/kitchen/tasks/show-strays.sh @@ -10,19 +10,19 @@ set -euo pipefail # These should not be printed out set +x if [ -z ${AZURE_CLIENT_ID+x} ]; then - AZURE_CLIENT_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_ID_SSM_NAME) + AZURE_CLIENT_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_ID) export AZURE_CLIENT_ID fi if [ -z ${AZURE_CLIENT_SECRET+x} ]; then - AZURE_CLIENT_SECRET=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_SECRET_SSM_NAME) + AZURE_CLIENT_SECRET=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_SECRET) export AZURE_CLIENT_SECRET fi if [ -z ${AZURE_TENANT_ID+x} ]; then - AZURE_TENANT_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_TENANT_ID_SSM_NAME) + AZURE_TENANT_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_TENANT_ID) export AZURE_TENANT_ID fi if [ -z ${AZURE_SUBSCRIPTION_ID+x} ]; then - AZURE_SUBSCRIPTION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_SUBSCRIPTION_ID_SSM_NAME) + AZURE_SUBSCRIPTION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_SUBSCRIPTION_ID) export AZURE_SUBSCRIPTION_ID fi if [ -z ${DD_PIPELINE_ID+x} ]; then diff --git a/tools/ci/docker-login.ps1 b/tools/ci/docker-login.ps1 index 81842426672b2..c74dbf79900b4 100644 --- a/tools/ci/docker-login.ps1 +++ b/tools/ci/docker-login.ps1 @@ -7,12 +7,12 @@ If ($lastExitCode -ne "0") { } # DockerHub login $tmpfile = [System.IO.Path]::GetTempFileName() -& "C:\mnt\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:DOCKER_REGISTRY_LOGIN_SSM_KEY" "$tmpfile" +& "C:\mnt\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:DOCKER_REGISTRY_LOGIN" "$tmpfile" If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } $DOCKER_REGISTRY_LOGIN = $(cat "$tmpfile") -& "C:\mnt\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:DOCKER_REGISTRY_PWD_SSM_KEY" "$tmpfile" +& "C:\mnt\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:DOCKER_REGISTRY_PWD" "$tmpfile" If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } diff --git a/tools/ci/junit_upload.sh b/tools/ci/junit_upload.sh index 68f839b21f0c6..3b2ea12aeb6db 100755 --- a/tools/ci/junit_upload.sh +++ b/tools/ci/junit_upload.sh @@ -6,8 +6,8 @@ if [[ -n "$1" ]]; then junit_files="$1" fi -GITLAB_TOKEN="$("$CI_PROJECT_DIR"/tools/ci/aws_ssm_get_wrapper.sh "$GITLAB_READ_API_TOKEN_SSM_NAME")" -DATADOG_API_KEY="$("$CI_PROJECT_DIR"/tools/ci/aws_ssm_get_wrapper.sh "$API_KEY_ORG2_SSM_NAME")" +GITLAB_TOKEN="$("$CI_PROJECT_DIR"/tools/ci/aws_ssm_get_wrapper.sh "$GITLAB_READ_API_TOKEN")" +DATADOG_API_KEY="$("$CI_PROJECT_DIR"/tools/ci/aws_ssm_get_wrapper.sh "$API_KEY_ORG2")" export DATADOG_API_KEY export GITLAB_TOKEN error=0 From e7306ace22122929bdfb0927fb295e1b1f6ef92a Mon Sep 17 00:00:00 2001 From: Paul Cacheux Date: Mon, 9 Sep 2024 22:18:22 +0200 Subject: [PATCH 088/128] [CWS] optimize `os.Stat` in hash resolver because of heavy usage (#29120) --- pkg/security/resolvers/hash/resolver_linux.go | 6 +- pkg/security/utils/stat_unix.go | 59 +++++++++++++++++++ 2 files changed, 61 insertions(+), 4 deletions(-) create mode 100644 pkg/security/utils/stat_unix.go diff --git a/pkg/security/resolvers/hash/resolver_linux.go b/pkg/security/resolvers/hash/resolver_linux.go index 48b0d30fa3acb..d4ab18efb6f8d 100644 --- a/pkg/security/resolvers/hash/resolver_linux.go +++ b/pkg/security/resolvers/hash/resolver_linux.go @@ -18,7 +18,6 @@ import ( "io/fs" "os" "slices" - "syscall" "github.com/DataDog/datadog-go/v5/statsd" "github.com/glaslos/ssdeep" @@ -215,18 +214,17 @@ type fileUniqKey struct { } func getFileInfo(path string) (fs.FileMode, int64, fileUniqKey, error) { - fileInfo, err := os.Stat(path) + stat, err := utils.UnixStat(path) if err != nil { return 0, 0, fileUniqKey{}, err } - stat := fileInfo.Sys().(*syscall.Stat_t) fkey := fileUniqKey{ dev: stat.Dev, inode: stat.Ino, } - return fileInfo.Mode(), fileInfo.Size(), fkey, nil + return utils.UnixStatModeToGoFileMode(stat.Mode), stat.Size, fkey, nil } // hash hashes the provided file event diff --git a/pkg/security/utils/stat_unix.go b/pkg/security/utils/stat_unix.go new file mode 100644 index 0000000000000..4c12a3b6c8178 --- /dev/null +++ b/pkg/security/utils/stat_unix.go @@ -0,0 +1,59 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build unix + +// Package utils holds utils related files +package utils + +import ( + "io/fs" + "syscall" +) + +// UnixStat is an unix only equivalent to os.Stat, but alloc-free, +// and returning directly the platform-specific syscall.Stat_t structure. +func UnixStat(path string) (syscall.Stat_t, error) { + var stat syscall.Stat_t + var err error + for { + err := syscall.Stat(path, &stat) + if err != syscall.EINTR { + break + } + } + return stat, err +} + +// UnixStatModeToGoFileMode converts a Unix mode to a Go fs.FileMode. +func UnixStatModeToGoFileMode(mode uint32) fs.FileMode { + fsmode := fs.FileMode(mode & 0777) + switch mode & syscall.S_IFMT { + case syscall.S_IFBLK: + fsmode |= fs.ModeDevice + case syscall.S_IFCHR: + fsmode |= fs.ModeDevice | fs.ModeCharDevice + case syscall.S_IFDIR: + fsmode |= fs.ModeDir + case syscall.S_IFIFO: + fsmode |= fs.ModeNamedPipe + case syscall.S_IFLNK: + fsmode |= fs.ModeSymlink + case syscall.S_IFREG: + // nothing to do + case syscall.S_IFSOCK: + fsmode |= fs.ModeSocket + } + if mode&syscall.S_ISGID != 0 { + fsmode |= fs.ModeSetgid + } + if mode&syscall.S_ISUID != 0 { + fsmode |= fs.ModeSetuid + } + if mode&syscall.S_ISVTX != 0 { + fsmode |= fs.ModeSticky + } + return fsmode +} From 8d2b5c8a01829dd943ba09c9c1bc429db7d6d2f2 Mon Sep 17 00:00:00 2001 From: Bryce Kahle Date: Mon, 9 Sep 2024 14:41:08 -0700 Subject: [PATCH 089/128] use merge-friendly way of adding vars to genpost (#29124) --- pkg/ebpf/cgo/genpost.go | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/pkg/ebpf/cgo/genpost.go b/pkg/ebpf/cgo/genpost.go index 5d8bb3ff811f8..62863734b35f2 100644 --- a/pkg/ebpf/cgo/genpost.go +++ b/pkg/ebpf/cgo/genpost.go @@ -13,6 +13,7 @@ import ( "os" "regexp" "runtime" + "strings" ) func main() { @@ -23,9 +24,24 @@ func main() { b = removeAbsolutePath(b, runtime.GOOS) + int8variableNames := []string{ + "Buf", + "Cgroup", + "Cgroup_name", + "LocalAddr", + "LocalAddress", + "Probe_id", + "RemoteAddr", + "RemoteAddress", + "Request_fragment", + "Topic_name", + "Trigger_comm", + "Victim_comm", + } + // Convert []int8 to []byte in multiple generated fields from the kernel, to simplify // conversion to string; see golang.org/issue/20753 - convertInt8ArrayToByteArrayRegex := regexp.MustCompile(`(Request_fragment|Topic_name|Buf|Cgroup|RemoteAddr|LocalAddr|Cgroup_name|Victim_comm|Trigger_comm|LocalAddress|RemoteAddress|Probe_id)(\s+)\[(\d+)\]u?int8`) + convertInt8ArrayToByteArrayRegex := regexp.MustCompile(`(` + strings.Join(int8variableNames, "|") + `)(\s+)\[(\d+)\]u?int8`) b = convertInt8ArrayToByteArrayRegex.ReplaceAll(b, []byte("$1$2[$3]byte")) b, err = format.Source(b) From 9d70ef575fb853e5da6683537876bfee9cc31cf1 Mon Sep 17 00:00:00 2001 From: Bryce Kahle Date: Mon, 9 Sep 2024 14:41:12 -0700 Subject: [PATCH 090/128] move runtime compilation asset gitignore to subdir (#29125) --- .gitignore | 10 ---------- pkg/ebpf/bytecode/runtime/.gitignore | 12 ++++++++++++ 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/.gitignore b/.gitignore index ab4936e5d46b1..83c6d28794cff 100644 --- a/.gitignore +++ b/.gitignore @@ -112,16 +112,6 @@ pkg/process/config/logs *.ninja compile_commands.json pkg/ebpf/bytecode/build/**/*.d -pkg/ebpf/bytecode/runtime/conntrack.go -pkg/ebpf/bytecode/runtime/http.go -pkg/ebpf/bytecode/runtime/usm.go -pkg/ebpf/bytecode/runtime/shared-libraries.go -pkg/ebpf/bytecode/runtime/offsetguess-test.go -pkg/ebpf/bytecode/runtime/oom-kill.go -pkg/ebpf/bytecode/runtime/runtime-security.go -pkg/ebpf/bytecode/runtime/tcp-queue-length.go -pkg/ebpf/bytecode/runtime/tracer.go -pkg/ebpf/bytecode/runtime/logdebug-test.go pkg/security/tests/syscall_tester/**/*.d # dsd artifacts diff --git a/pkg/ebpf/bytecode/runtime/.gitignore b/pkg/ebpf/bytecode/runtime/.gitignore index 520bd681081be..9b4fc67872634 100644 --- a/pkg/ebpf/bytecode/runtime/.gitignore +++ b/pkg/ebpf/bytecode/runtime/.gitignore @@ -1,2 +1,14 @@ *.d + +# runtime compilation asset integrity files +conntrack.go dynamicinstrumentation.go +http.go +logdebug-test.go +offsetguess-test.go +oom-kill.go +runtime-security.go +shared-libraries.go +tcp-queue-length.go +tracer.go +usm.go From eee5900fa10aa39daf952db14ff6297e1b054981 Mon Sep 17 00:00:00 2001 From: Florent Clarret Date: Tue, 10 Sep 2024 06:29:24 +0000 Subject: [PATCH 091/128] Bump the latest stable version (#29142) --- release.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/release.json b/release.json index c04669e36a18a..90949e61bc35a 100644 --- a/release.json +++ b/release.json @@ -3,7 +3,7 @@ "current_milestone": "7.59.0", "last_stable": { "6": "6.53.0", - "7": "7.56.2" + "7": "7.57.0" }, "nightly": { "INTEGRATIONS_CORE_VERSION": "master", From 9f49bc9f7069b9c175e8607c8b07a3688c14c3bd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 07:33:17 +0000 Subject: [PATCH 092/128] Bump golang.org/x/sys from 0.24.0 to 0.25.0 in /pkg/security/secl (#29194) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- pkg/security/secl/go.mod | 2 +- pkg/security/secl/go.sum | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index b1f17ce865430..b0a3782d13455 100644 --- a/go.mod +++ b/go.mod @@ -305,7 +305,7 @@ require ( golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa golang.org/x/net v0.28.0 golang.org/x/sync v0.8.0 - golang.org/x/sys v0.24.0 + golang.org/x/sys v0.25.0 golang.org/x/text v0.17.0 golang.org/x/time v0.6.0 golang.org/x/tools v0.24.0 diff --git a/go.sum b/go.sum index a131d6f6b7cd3..cc6dcebb52cfb 100644 --- a/go.sum +++ b/go.sum @@ -3193,8 +3193,8 @@ golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= diff --git a/pkg/security/secl/go.mod b/pkg/security/secl/go.mod index 87b83d5e5f01c..6e3a2fde298b1 100644 --- a/pkg/security/secl/go.mod +++ b/pkg/security/secl/go.mod @@ -17,7 +17,7 @@ require ( github.com/spf13/cast v1.7.0 github.com/stretchr/testify v1.9.0 github.com/xeipuuv/gojsonschema v1.2.0 - golang.org/x/sys v0.24.0 + golang.org/x/sys v0.25.0 golang.org/x/text v0.17.0 golang.org/x/tools v0.24.0 gopkg.in/yaml.v2 v2.4.0 diff --git a/pkg/security/secl/go.sum b/pkg/security/secl/go.sum index 4d9aec07ce4cb..18f2f039d30a1 100644 --- a/pkg/security/secl/go.sum +++ b/pkg/security/secl/go.sum @@ -75,8 +75,8 @@ golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= From b06e01a41117f6b11fd74848b4b2618bff8c07af Mon Sep 17 00:00:00 2001 From: Florent Clarret Date: Tue, 10 Sep 2024 07:53:35 +0000 Subject: [PATCH 093/128] Update the Windows DDNPM and DDPROCMON in the release.json file (#29072) --- release.json | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/release.json b/release.json index 90949e61bc35a..d9207c7ee2a8d 100644 --- a/release.json +++ b/release.json @@ -52,11 +52,11 @@ "SECURITY_AGENT_POLICIES_VERSION": "v0.58.0", "MACOS_BUILD_VERSION": "6.56.0-rc.3", "WINDOWS_DDNPM_DRIVER": "release-signed", - "WINDOWS_DDNPM_VERSION": "2.7.0", - "WINDOWS_DDNPM_SHASUM": "de6a2f437b906d1d0f3cfc9222c7f686b3d69726355c940476448a34535064c8", + "WINDOWS_DDNPM_VERSION": "2.7.1", + "WINDOWS_DDNPM_SHASUM": "0f4665761324e1fef1c21651be5b70e79c72b5e7e5662d74619e7db2b27d5bc2", "WINDOWS_DDPROCMON_DRIVER": "release-signed", - "WINDOWS_DDPROCMON_VERSION": "1.0.2", - "WINDOWS_DDPROCMON_SHASUM": "cf55e5163659dbbfac0c0cced6559a3042107da9e4df8140bea17067278061ab" + "WINDOWS_DDPROCMON_VERSION": "1.0.4", + "WINDOWS_DDPROCMON_SHASUM": "3a23804adc7280390aabc01f0b709853755baa111f821f99627cd661ee917490" }, "release-a7": { "INTEGRATIONS_CORE_VERSION": "7.56.0-rc.2", @@ -67,11 +67,11 @@ "SECURITY_AGENT_POLICIES_VERSION": "v0.58.0", "MACOS_BUILD_VERSION": "7.56.0-rc.3", "WINDOWS_DDNPM_DRIVER": "release-signed", - "WINDOWS_DDNPM_VERSION": "2.7.0", - "WINDOWS_DDNPM_SHASUM": "de6a2f437b906d1d0f3cfc9222c7f686b3d69726355c940476448a34535064c8", + "WINDOWS_DDNPM_VERSION": "2.7.1", + "WINDOWS_DDNPM_SHASUM": "0f4665761324e1fef1c21651be5b70e79c72b5e7e5662d74619e7db2b27d5bc2", "WINDOWS_DDPROCMON_DRIVER": "release-signed", - "WINDOWS_DDPROCMON_VERSION": "1.0.2", - "WINDOWS_DDPROCMON_SHASUM": "cf55e5163659dbbfac0c0cced6559a3042107da9e4df8140bea17067278061ab" + "WINDOWS_DDPROCMON_VERSION": "1.0.4", + "WINDOWS_DDPROCMON_SHASUM": "3a23804adc7280390aabc01f0b709853755baa111f821f99627cd661ee917490" }, "dca-1.17.0": { "SECURITY_AGENT_POLICIES_VERSION": "v0.18.6" From b3a055e6533c593168a1cf58fbcb47064c553256 Mon Sep 17 00:00:00 2001 From: Jonathan Ribas Date: Tue, 10 Sep 2024 10:10:20 +0200 Subject: [PATCH 094/128] [CWS] Add auto-suppression guards (#29144) --- pkg/security/config/config.go | 2 ++ pkg/security/config/config_linux.go | 15 +++++++++++++++ pkg/security/config/config_others.go | 15 +++++++++++++++ .../rules/autosuppression/autosuppression.go | 6 ++++-- pkg/security/rules/engine.go | 2 ++ 5 files changed, 38 insertions(+), 2 deletions(-) create mode 100644 pkg/security/config/config_linux.go create mode 100644 pkg/security/config/config_others.go diff --git a/pkg/security/config/config.go b/pkg/security/config/config.go index f854a755126ec..95a261f32963d 100644 --- a/pkg/security/config/config.go +++ b/pkg/security/config/config.go @@ -519,6 +519,8 @@ func (c *RuntimeSecurityConfig) sanitize() error { return fmt.Errorf("invalid value for runtime_security_config.enforcement.disarmer.executable.max_allowed: %d", c.EnforcementDisarmerExecutableMaxAllowed) } + c.sanitizePlatform() + return c.sanitizeRuntimeSecurityConfigActivityDump() } diff --git a/pkg/security/config/config_linux.go b/pkg/security/config/config_linux.go new file mode 100644 index 0000000000000..02062f9def762 --- /dev/null +++ b/pkg/security/config/config_linux.go @@ -0,0 +1,15 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package config holds config related files +package config + +func (c *RuntimeSecurityConfig) sanitizePlatform() { + // Force the disable of features unavailable on EBPFLess + if c.EBPFLessEnabled { + c.ActivityDumpEnabled = false + c.SecurityProfileEnabled = false + } +} diff --git a/pkg/security/config/config_others.go b/pkg/security/config/config_others.go new file mode 100644 index 0000000000000..91da3e31277fb --- /dev/null +++ b/pkg/security/config/config_others.go @@ -0,0 +1,15 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build !linux + +// Package config holds config related files +package config + +func (c *RuntimeSecurityConfig) sanitizePlatform() { + // Force the disable of features unavailable on Windows + c.ActivityDumpEnabled = false + c.SecurityProfileEnabled = false +} diff --git a/pkg/security/rules/autosuppression/autosuppression.go b/pkg/security/rules/autosuppression/autosuppression.go index 168eb23087be6..56f9d0b28ce34 100644 --- a/pkg/security/rules/autosuppression/autosuppression.go +++ b/pkg/security/rules/autosuppression/autosuppression.go @@ -38,7 +38,9 @@ const ( // Opts holds options for auto suppression type Opts struct { + SecurityProfileEnabled bool SecurityProfileAutoSuppressionEnabled bool + ActivityDumpEnabled bool ActivityDumpAutoSuppressionEnabled bool EventTypes []model.EventType } @@ -68,7 +70,7 @@ func (as *AutoSuppression) Init(opts Opts) { // Suppresses returns true if the event should be suppressed for the given rule, false otherwise. It also counts statistics depending on this result func (as *AutoSuppression) Suppresses(rule *rules.Rule, event *model.Event) bool { if isAllowAutosuppressionRule(rule) && event.ContainerContext.ContainerID != "" && slices.Contains(as.opts.EventTypes, event.GetEventType()) { - if as.opts.ActivityDumpAutoSuppressionEnabled { + if as.opts.ActivityDumpEnabled && as.opts.ActivityDumpAutoSuppressionEnabled { if event.HasActiveActivityDump() { as.count(rule.ID, activityDumpSuppressionType) return true @@ -77,7 +79,7 @@ func (as *AutoSuppression) Suppresses(rule *rules.Rule, event *model.Event) bool return true } } - if as.opts.SecurityProfileAutoSuppressionEnabled { + if as.opts.SecurityProfileEnabled && as.opts.SecurityProfileAutoSuppressionEnabled { if event.IsInProfile() { as.count(rule.ID, securityProfileSuppressionType) return true diff --git a/pkg/security/rules/engine.go b/pkg/security/rules/engine.go index 03ee84af37c56..2c24d48d230a8 100644 --- a/pkg/security/rules/engine.go +++ b/pkg/security/rules/engine.go @@ -88,7 +88,9 @@ func NewRuleEngine(evm *eventmonitor.EventMonitor, config *config.RuntimeSecurit } engine.AutoSuppression.Init(autosuppression.Opts{ + SecurityProfileEnabled: config.SecurityProfileEnabled, SecurityProfileAutoSuppressionEnabled: config.SecurityProfileAutoSuppressionEnabled, + ActivityDumpEnabled: config.ActivityDumpEnabled, ActivityDumpAutoSuppressionEnabled: config.ActivityDumpAutoSuppressionEnabled, EventTypes: config.SecurityProfileAutoSuppressionEventTypes, }) From e781598970bcc472463e1d901a0ed20b3dc0c672 Mon Sep 17 00:00:00 2001 From: Paul Cacheux Date: Tue, 10 Sep 2024 10:37:25 +0200 Subject: [PATCH 095/128] [CWS] directly call `filepath.Join` instead of `HostProc` in utils (#29158) --- pkg/security/utils/proc_linux.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/pkg/security/utils/proc_linux.go b/pkg/security/utils/proc_linux.go index cfb0894125b81..6746091231e65 100644 --- a/pkg/security/utils/proc_linux.go +++ b/pkg/security/utils/proc_linux.go @@ -12,6 +12,7 @@ import ( "io" "os" "path" + "path/filepath" "regexp" "strconv" "strings" @@ -122,17 +123,20 @@ func ProcRootFilePath(pid uint32, file string) string { return procPidPath2(pid, "root", file) } +// we do not use `HostProc` here because of the double call to `filepath.Join` +// and those functions can be called in a tight loop + func procPidPath(pid uint32, path string) string { - return kernel.HostProc(strconv.FormatUint(uint64(pid), 10), path) + return filepath.Join(kernel.ProcFSRoot(), strconv.FormatUint(uint64(pid), 10), path) } func procPidPath2(pid uint32, path1 string, path2 string) string { - return kernel.HostProc(strconv.FormatUint(uint64(pid), 10), path1, path2) + return filepath.Join(kernel.ProcFSRoot(), strconv.FormatUint(uint64(pid), 10), path1, path2) } // ModulesPath returns the path to the modules file in /proc func ModulesPath() string { - return kernel.HostProc("modules") + return filepath.Join(kernel.ProcFSRoot(), "modules") } // GetLoginUID returns the login uid of the provided process From 3d2edde971ae1e0936ceed8c0368e0c3c68f6910 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 08:59:40 +0000 Subject: [PATCH 096/128] Bump golang.org/x/text from 0.17.0 to 0.18.0 in /pkg/security/secl (#29193) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- pkg/security/secl/go.mod | 2 +- pkg/security/secl/go.sum | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index b0a3782d13455..585d57758a9c6 100644 --- a/go.mod +++ b/go.mod @@ -306,7 +306,7 @@ require ( golang.org/x/net v0.28.0 golang.org/x/sync v0.8.0 golang.org/x/sys v0.25.0 - golang.org/x/text v0.17.0 + golang.org/x/text v0.18.0 golang.org/x/time v0.6.0 golang.org/x/tools v0.24.0 golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 diff --git a/go.sum b/go.sum index cc6dcebb52cfb..4930ad6b3d441 100644 --- a/go.sum +++ b/go.sum @@ -3232,8 +3232,8 @@ golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/pkg/security/secl/go.mod b/pkg/security/secl/go.mod index 6e3a2fde298b1..20522add6cd6e 100644 --- a/pkg/security/secl/go.mod +++ b/pkg/security/secl/go.mod @@ -18,7 +18,7 @@ require ( github.com/stretchr/testify v1.9.0 github.com/xeipuuv/gojsonschema v1.2.0 golang.org/x/sys v0.25.0 - golang.org/x/text v0.17.0 + golang.org/x/text v0.18.0 golang.org/x/tools v0.24.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 diff --git a/pkg/security/secl/go.sum b/pkg/security/secl/go.sum index 18f2f039d30a1..108e97fca09cc 100644 --- a/pkg/security/secl/go.sum +++ b/pkg/security/secl/go.sum @@ -77,8 +77,8 @@ golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From b19bbcf605e585e5938d6dedd30f9f68cbfb7ae1 Mon Sep 17 00:00:00 2001 From: Jade Guiton Date: Tue, 10 Sep 2024 11:23:59 +0200 Subject: [PATCH 097/128] [OTEL-1282] Replace logging-exporter with debug-exporter (#29044) Co-authored-by: Austin Lai <76412946+alai97@users.noreply.github.com> --- LICENSE-3rdparty.csv | 3 - cmd/serverless/dependencies_linux_amd64.txt | 6 +- cmd/serverless/dependencies_linux_arm64.txt | 6 +- .../collector-contrib/impl/components.go | 2 - comp/otelcol/collector-contrib/impl/go.mod | 1 - comp/otelcol/collector-contrib/impl/go.sum | 2 - .../collector-contrib/impl/manifest.yaml | 1 - comp/otelcol/otlp/collector.go | 37 +++----- comp/otelcol/otlp/config_test.go | 14 +-- comp/otelcol/otlp/map_provider.go | 14 +-- .../otlp/map_provider_not_serverless_test.go | 94 +++++++++---------- .../otlp/map_provider_serverless_test.go | 44 ++++----- .../testdata/debug/loglevel_disabled.yaml | 3 - .../testdata/debug/verbosity_detailed.yaml | 3 + ...oglevel_debug.yaml => verbosity_none.yaml} | 2 +- .../otlp/testdata/metrics/allconfig.yaml | 2 +- go.mod | 5 +- go.sum | 2 - pkg/config/config_template.yaml | 13 +-- pkg/config/setup/otlp.go | 1 - ...ace-logging-exporter-82062f8c865cb529.yaml | 16 ++++ .../collector/valid_datadog_manifest.yaml | 1 - .../containers/otlp_sender/cmd/sender/main.go | 4 +- test/e2e/containers/otlp_sender/go.mod | 2 +- test/e2e/containers/otlp_sender/go.sum | 4 +- .../datadog-agent/datadog.yaml | 2 +- 26 files changed, 134 insertions(+), 150 deletions(-) delete mode 100644 comp/otelcol/otlp/testdata/debug/loglevel_disabled.yaml create mode 100644 comp/otelcol/otlp/testdata/debug/verbosity_detailed.yaml rename comp/otelcol/otlp/testdata/debug/{loglevel_debug.yaml => verbosity_none.yaml} (52%) create mode 100644 releasenotes/notes/otel-replace-logging-exporter-82062f8c865cb529.yaml diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index 219896d013391..f8555a000f46d 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -2352,12 +2352,9 @@ core,go.opentelemetry.io/collector/exporter/exporterbatcher,Apache-2.0,Copyright core,go.opentelemetry.io/collector/exporter/exporterhelper,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/exporter/exporterqueue,Apache-2.0,Copyright The OpenTelemetry Authors -core,go.opentelemetry.io/collector/exporter/internal/common,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/exporter/internal/experr,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/exporter/internal/otlptext,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/exporter/internal/queue,Apache-2.0,Copyright The OpenTelemetry Authors -core,go.opentelemetry.io/collector/exporter/loggingexporter,Apache-2.0,Copyright The OpenTelemetry Authors -core,go.opentelemetry.io/collector/exporter/loggingexporter/internal/metadata,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/exporter/nopexporter,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/exporter/nopexporter/internal/metadata,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/exporter/otlpexporter,Apache-2.0,Copyright The OpenTelemetry Authors diff --git a/cmd/serverless/dependencies_linux_amd64.txt b/cmd/serverless/dependencies_linux_amd64.txt index 0817058707727..c31cb5dcebd87 100644 --- a/cmd/serverless/dependencies_linux_amd64.txt +++ b/cmd/serverless/dependencies_linux_amd64.txt @@ -580,16 +580,16 @@ go.opentelemetry.io/collector/connector go.opentelemetry.io/collector/consumer go.opentelemetry.io/collector/consumer/consumererror go.opentelemetry.io/collector/exporter +go.opentelemetry.io/collector/exporter/debugexporter +go.opentelemetry.io/collector/exporter/debugexporter/internal/metadata +go.opentelemetry.io/collector/exporter/debugexporter/internal/normal go.opentelemetry.io/collector/exporter/exporterbatcher go.opentelemetry.io/collector/exporter/exporterhelper go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata go.opentelemetry.io/collector/exporter/exporterqueue -go.opentelemetry.io/collector/exporter/internal/common go.opentelemetry.io/collector/exporter/internal/experr go.opentelemetry.io/collector/exporter/internal/otlptext go.opentelemetry.io/collector/exporter/internal/queue -go.opentelemetry.io/collector/exporter/loggingexporter -go.opentelemetry.io/collector/exporter/loggingexporter/internal/metadata go.opentelemetry.io/collector/exporter/otlpexporter go.opentelemetry.io/collector/exporter/otlpexporter/internal/metadata go.opentelemetry.io/collector/extension diff --git a/cmd/serverless/dependencies_linux_arm64.txt b/cmd/serverless/dependencies_linux_arm64.txt index 31d6006a051e9..afc9f4e4fa881 100644 --- a/cmd/serverless/dependencies_linux_arm64.txt +++ b/cmd/serverless/dependencies_linux_arm64.txt @@ -579,16 +579,16 @@ go.opentelemetry.io/collector/connector go.opentelemetry.io/collector/consumer go.opentelemetry.io/collector/consumer/consumererror go.opentelemetry.io/collector/exporter +go.opentelemetry.io/collector/exporter/debugexporter +go.opentelemetry.io/collector/exporter/debugexporter/internal/metadata +go.opentelemetry.io/collector/exporter/debugexporter/internal/normal go.opentelemetry.io/collector/exporter/exporterbatcher go.opentelemetry.io/collector/exporter/exporterhelper go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata go.opentelemetry.io/collector/exporter/exporterqueue -go.opentelemetry.io/collector/exporter/internal/common go.opentelemetry.io/collector/exporter/internal/experr go.opentelemetry.io/collector/exporter/internal/otlptext go.opentelemetry.io/collector/exporter/internal/queue -go.opentelemetry.io/collector/exporter/loggingexporter -go.opentelemetry.io/collector/exporter/loggingexporter/internal/metadata go.opentelemetry.io/collector/exporter/otlpexporter go.opentelemetry.io/collector/exporter/otlpexporter/internal/metadata go.opentelemetry.io/collector/extension diff --git a/comp/otelcol/collector-contrib/impl/components.go b/comp/otelcol/collector-contrib/impl/components.go index 28ac885df9fa7..9f27554af7d37 100644 --- a/comp/otelcol/collector-contrib/impl/components.go +++ b/comp/otelcol/collector-contrib/impl/components.go @@ -38,7 +38,6 @@ import ( "go.opentelemetry.io/collector/connector" "go.opentelemetry.io/collector/exporter" debugexporter "go.opentelemetry.io/collector/exporter/debugexporter" - loggingexporter "go.opentelemetry.io/collector/exporter/loggingexporter" nopexporter "go.opentelemetry.io/collector/exporter/nopexporter" otlpexporter "go.opentelemetry.io/collector/exporter/otlpexporter" otlphttpexporter "go.opentelemetry.io/collector/exporter/otlphttpexporter" @@ -88,7 +87,6 @@ func components() (otelcol.Factories, error) { factories.Exporters, err = exporter.MakeFactoryMap( debugexporter.NewFactory(), - loggingexporter.NewFactory(), nopexporter.NewFactory(), otlpexporter.NewFactory(), otlphttpexporter.NewFactory(), diff --git a/comp/otelcol/collector-contrib/impl/go.mod b/comp/otelcol/collector-contrib/impl/go.mod index 96fb8ef3d6823..6342ac0e8bca0 100644 --- a/comp/otelcol/collector-contrib/impl/go.mod +++ b/comp/otelcol/collector-contrib/impl/go.mod @@ -38,7 +38,6 @@ require ( go.opentelemetry.io/collector/connector v0.104.0 go.opentelemetry.io/collector/exporter v0.104.0 go.opentelemetry.io/collector/exporter/debugexporter v0.104.0 - go.opentelemetry.io/collector/exporter/loggingexporter v0.104.0 go.opentelemetry.io/collector/exporter/nopexporter v0.104.0 go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0 go.opentelemetry.io/collector/exporter/otlphttpexporter v0.104.0 diff --git a/comp/otelcol/collector-contrib/impl/go.sum b/comp/otelcol/collector-contrib/impl/go.sum index b4814e7aa6f0c..9910d38ca9f68 100644 --- a/comp/otelcol/collector-contrib/impl/go.sum +++ b/comp/otelcol/collector-contrib/impl/go.sum @@ -928,8 +928,6 @@ go.opentelemetry.io/collector/exporter v0.104.0 h1:C2HmnfBa05IQ2T+p9T7K7gXVxjrBL go.opentelemetry.io/collector/exporter v0.104.0/go.mod h1:Rx0oB0E4Ccg1JuAnEWwhtrq1ygRBkfx4mco1DpR3WaQ= go.opentelemetry.io/collector/exporter/debugexporter v0.104.0 h1:1Z63H/xxv6IzMP7GPmI6v/lQAqZwYZCVC0rWYcYOomw= go.opentelemetry.io/collector/exporter/debugexporter v0.104.0/go.mod h1:NHVzTM0Z/bomgR7SAe3ysx4CZzh2UJ3TXWSCnaOB1Wo= -go.opentelemetry.io/collector/exporter/loggingexporter v0.104.0 h1:MaBTuHmK/HAQ+/rLTrGf3tazKum8Sic3/CaXgNr5xnc= -go.opentelemetry.io/collector/exporter/loggingexporter v0.104.0/go.mod h1:sXZhACvds6z71cf2fzKrojMgdJItJZxeClKlF/PI/l8= go.opentelemetry.io/collector/exporter/nopexporter v0.104.0 h1:33JeCQiJbvhSXFqQ34R4ole/wD4iHtF5LYp2GziYVnY= go.opentelemetry.io/collector/exporter/nopexporter v0.104.0/go.mod h1:73afhI8uc5NKAl9pMJlgQQ46Ck9e7nQ2zZGXHHSzuwo= go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0 h1:EFOdhnc2yGhqou0Tud1HsM7fsgWo/H3tdQhYYytDprQ= diff --git a/comp/otelcol/collector-contrib/impl/manifest.yaml b/comp/otelcol/collector-contrib/impl/manifest.yaml index 092ea7cb47a3c..69f4208a1de98 100644 --- a/comp/otelcol/collector-contrib/impl/manifest.yaml +++ b/comp/otelcol/collector-contrib/impl/manifest.yaml @@ -18,7 +18,6 @@ extensions: exporters: - gomod: go.opentelemetry.io/collector/exporter/debugexporter v0.104.0 - - gomod: go.opentelemetry.io/collector/exporter/loggingexporter v0.104.0 - gomod: go.opentelemetry.io/collector/exporter/nopexporter v0.104.0 - gomod: go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0 - gomod: go.opentelemetry.io/collector/exporter/otlphttpexporter v0.104.0 diff --git a/comp/otelcol/otlp/collector.go b/comp/otelcol/otlp/collector.go index e3978764031e5..96787d7a53196 100644 --- a/comp/otelcol/otlp/collector.go +++ b/comp/otelcol/otlp/collector.go @@ -12,9 +12,10 @@ import ( "fmt" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configtelemetry" "go.opentelemetry.io/collector/confmap" "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/exporter/loggingexporter" + "go.opentelemetry.io/collector/exporter/debugexporter" "go.opentelemetry.io/collector/exporter/otlpexporter" "go.opentelemetry.io/collector/extension" "go.opentelemetry.io/collector/otelcol" @@ -106,7 +107,7 @@ func getComponents(s serializer.MetricSerializer, logsAgentChannel chan *message exporterFactories := []exporter.Factory{ otlpexporter.NewFactory(), serializerexporter.NewFactory(s, &tagEnricher{cardinality: types.LowCardinality}, hostname.Get, nil, nil), - loggingexporter.NewFactory(), + debugexporter.NewFactory(), } if logsAgentChannel != nil { @@ -163,30 +164,20 @@ type PipelineConfig struct { Metrics map[string]interface{} } -// valid values for debug log level. -var debugLogLevelMap = map[string]struct{}{ - "disabled": {}, - "debug": {}, - "info": {}, - "warn": {}, - "error": {}, -} - // shouldSetLoggingSection returns whether debug logging is enabled. -// If an invalid loglevel value is set, it assumes debug logging is disabled. -// If the special 'disabled' value is set, it returns false. -// Otherwise it returns true and lets the Collector handle the rest. +// Debug logging is enabled when verbosity is set to a valid value except for "none", or left unset. func (p *PipelineConfig) shouldSetLoggingSection() bool { - // Legacy behavior: keep it so that we support `loglevel: disabled`. - if v, ok := p.Debug["loglevel"]; ok { - if s, ok := v.(string); ok { - _, ok := debugLogLevelMap[s] - return ok && s != "disabled" - } + v, ok := p.Debug["verbosity"] + if !ok { + return true } - - // If the legacy behavior does not apply, we always want to set the logging section. - return true + s, ok := v.(string) + if !ok { + return false + } + var level configtelemetry.Level + err := level.UnmarshalText([]byte(s)) + return err == nil && s != "none" } // Pipeline is an OTLP pipeline. diff --git a/comp/otelcol/otlp/config_test.go b/comp/otelcol/otlp/config_test.go index 593c27e495cfc..faa2223c8c661 100644 --- a/comp/otelcol/otlp/config_test.go +++ b/comp/otelcol/otlp/config_test.go @@ -304,7 +304,7 @@ func TestFromEnvironmentVariables(t *testing.T) { name: "only gRPC, disabled logging", env: map[string]string{ "DD_OTLP_CONFIG_RECEIVER_PROTOCOLS_GRPC_ENDPOINT": "0.0.0.0:9999", - "DD_OTLP_CONFIG_DEBUG_LOGLEVEL": "disabled", + "DD_OTLP_CONFIG_DEBUG_VERBOSITY": "none", }, cfg: PipelineConfig{ OTLPReceiverConfig: map[string]interface{}{ @@ -325,7 +325,7 @@ func TestFromEnvironmentVariables(t *testing.T) { "apm_stats_receiver_addr": "http://localhost:8126/v0.6/stats", }, Debug: map[string]interface{}{ - "loglevel": "disabled", + "verbosity": "none", }, }, }, @@ -475,7 +475,7 @@ func TestFromAgentConfigMetrics(t *testing.T) { "tags": "tag1:value1,tag2:value2", }, Debug: map[string]interface{}{ - "loglevel": "debug", + "verbosity": "detailed", }, }, }, @@ -520,7 +520,7 @@ func TestFromAgentConfigDebug(t *testing.T) { }, }, { - path: "debug/loglevel_debug.yaml", + path: "debug/verbosity_detailed.yaml", shouldSet: true, cfg: PipelineConfig{ OTLPReceiverConfig: map[string]interface{}{}, @@ -529,7 +529,7 @@ func TestFromAgentConfigDebug(t *testing.T) { MetricsEnabled: true, TracesEnabled: true, LogsEnabled: false, - Debug: map[string]interface{}{"loglevel": "debug"}, + Debug: map[string]interface{}{"verbosity": "detailed"}, Metrics: map[string]interface{}{ "enabled": true, "tag_cardinality": "low", @@ -538,7 +538,7 @@ func TestFromAgentConfigDebug(t *testing.T) { }, }, { - path: "debug/loglevel_disabled.yaml", + path: "debug/verbosity_none.yaml", shouldSet: false, cfg: PipelineConfig{ OTLPReceiverConfig: map[string]interface{}{}, @@ -547,7 +547,7 @@ func TestFromAgentConfigDebug(t *testing.T) { MetricsEnabled: true, TracesEnabled: true, LogsEnabled: false, - Debug: map[string]interface{}{"loglevel": "disabled"}, + Debug: map[string]interface{}{"verbosity": "none"}, Metrics: map[string]interface{}{ "enabled": true, "tag_cardinality": "low", diff --git a/comp/otelcol/otlp/map_provider.go b/comp/otelcol/otlp/map_provider.go index ed443ff15ca56..0c65bcc0fa99d 100644 --- a/comp/otelcol/otlp/map_provider.go +++ b/comp/otelcol/otlp/map_provider.go @@ -94,31 +94,31 @@ func buildMap(cfg PipelineConfig) (*confmap.Conf, error) { if cfg.shouldSetLoggingSection() { m := map[string]interface{}{ "exporters": map[string]interface{}{ - "logging": cfg.Debug, + "debug": cfg.Debug, }, } if cfg.MetricsEnabled { key := buildKey("service", "pipelines", "metrics", "exporters") if v, ok := retMap.Get(key).([]interface{}); ok { - m[key] = append(v, "logging") + m[key] = append(v, "debug") } else { - m[key] = []interface{}{"logging"} + m[key] = []interface{}{"debug"} } } if cfg.TracesEnabled { key := buildKey("service", "pipelines", "traces", "exporters") if v, ok := retMap.Get(key).([]interface{}); ok { - m[key] = append(v, "logging") + m[key] = append(v, "debug") } else { - m[key] = []interface{}{"logging"} + m[key] = []interface{}{"debug"} } } if cfg.LogsEnabled { key := buildKey("service", "pipelines", "logs", "exporters") if v, ok := retMap.Get(key).([]interface{}); ok { - m[key] = append(v, "logging") + m[key] = append(v, "debug") } else { - m[key] = []interface{}{"logging"} + m[key] = []interface{}{"debug"} } } errs = append(errs, retMap.Merge(confmap.NewFromStringMap(m))) diff --git a/comp/otelcol/otlp/map_provider_not_serverless_test.go b/comp/otelcol/otlp/map_provider_not_serverless_test.go index 9b9624e405f06..4ee1a08a38efe 100644 --- a/comp/otelcol/otlp/map_provider_not_serverless_test.go +++ b/comp/otelcol/otlp/map_provider_not_serverless_test.go @@ -37,7 +37,7 @@ func TestNewMap(t *testing.T) { TracePort: 5003, TracesEnabled: true, Debug: map[string]interface{}{ - "loglevel": "disabled", + "verbosity": "none", }, }, ocfg: map[string]interface{}{ @@ -91,7 +91,7 @@ func TestNewMap(t *testing.T) { }, }, Debug: map[string]interface{}{ - "loglevel": "disabled", + "verbosity": "none", }, }, ocfg: map[string]interface{}{ @@ -150,7 +150,7 @@ func TestNewMap(t *testing.T) { }, }, { - name: "only HTTP, metrics and traces, invalid loglevel(ignored)", + name: "only HTTP, metrics and traces, invalid verbosity (ignored)", pcfg: PipelineConfig{ OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 0, 1234), TracePort: 5003, @@ -167,7 +167,7 @@ func TestNewMap(t *testing.T) { }, }, Debug: map[string]interface{}{ - "loglevel": "foo", + "verbosity": "foo", }, }, ocfg: map[string]interface{}{ @@ -232,7 +232,7 @@ func TestNewMap(t *testing.T) { TracePort: 5003, TracesEnabled: true, Debug: map[string]interface{}{ - "loglevel": "disabled", + "verbosity": "none", }, }, ocfg: map[string]interface{}{ @@ -288,7 +288,7 @@ func TestNewMap(t *testing.T) { }, }, Debug: map[string]interface{}{ - "loglevel": "disabled", + "verbosity": "none", }, }, ocfg: map[string]interface{}{ @@ -333,13 +333,13 @@ func TestNewMap(t *testing.T) { }, }, { - name: "only gRPC, only Traces, logging info", + name: "only gRPC, only Traces, logging with normal verbosity", pcfg: PipelineConfig{ OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 1234, 0), TracePort: 5003, TracesEnabled: true, Debug: map[string]interface{}{ - "loglevel": "info", + "verbosity": "normal", }, }, ocfg: map[string]interface{}{ @@ -363,8 +363,8 @@ func TestNewMap(t *testing.T) { "enabled": false, }, }, - "logging": map[string]interface{}{ - "loglevel": "info", + "debug": map[string]interface{}{ + "verbosity": "normal", }, }, "service": map[string]interface{}{ @@ -372,14 +372,14 @@ func TestNewMap(t *testing.T) { "pipelines": map[string]interface{}{ "traces": map[string]interface{}{ "receivers": []interface{}{"otlp"}, - "exporters": []interface{}{"otlp", "logging"}, + "exporters": []interface{}{"otlp", "debug"}, }, }, }, }, }, { - name: "only HTTP, only metrics, logging debug", + name: "only HTTP, only metrics, logging with detailed verbosity", pcfg: PipelineConfig{ OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 0, 1234), TracePort: 5003, @@ -394,7 +394,7 @@ func TestNewMap(t *testing.T) { }, }, Debug: map[string]interface{}{ - "loglevel": "debug", + "verbosity": "detailed", }, }, ocfg: map[string]interface{}{ @@ -424,8 +424,8 @@ func TestNewMap(t *testing.T) { }, }, }, - "logging": map[string]interface{}{ - "loglevel": "debug", + "debug": map[string]interface{}{ + "verbosity": "detailed", }, }, "service": map[string]interface{}{ @@ -434,14 +434,14 @@ func TestNewMap(t *testing.T) { "metrics": map[string]interface{}{ "receivers": []interface{}{"otlp"}, "processors": []interface{}{"batch"}, - "exporters": []interface{}{"serializer", "logging"}, + "exporters": []interface{}{"serializer", "debug"}, }, }, }, }, }, { - name: "only HTTP, metrics and traces, logging warn", + name: "only HTTP, metrics and traces, logging with basic verbosity", pcfg: PipelineConfig{ OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 0, 1234), TracePort: 5003, @@ -457,7 +457,7 @@ func TestNewMap(t *testing.T) { }, }, Debug: map[string]interface{}{ - "loglevel": "warn", + "verbosity": "basic", }, }, ocfg: map[string]interface{}{ @@ -497,8 +497,8 @@ func TestNewMap(t *testing.T) { }, }, }, - "logging": map[string]interface{}{ - "loglevel": "warn", + "debug": map[string]interface{}{ + "verbosity": "basic", }, }, "service": map[string]interface{}{ @@ -506,12 +506,12 @@ func TestNewMap(t *testing.T) { "pipelines": map[string]interface{}{ "traces": map[string]interface{}{ "receivers": []interface{}{"otlp"}, - "exporters": []interface{}{"otlp", "logging"}, + "exporters": []interface{}{"otlp", "debug"}, }, "metrics": map[string]interface{}{ "receivers": []interface{}{"otlp"}, "processors": []interface{}{"batch"}, - "exporters": []interface{}{"serializer", "logging"}, + "exporters": []interface{}{"serializer", "debug"}, }, }, }, @@ -525,7 +525,7 @@ func TestNewMap(t *testing.T) { TracesEnabled: true, LogsEnabled: true, Debug: map[string]interface{}{ - "loglevel": "disabled", + "verbosity": "none", }, }, ocfg: map[string]interface{}{ @@ -592,7 +592,7 @@ func TestNewMap(t *testing.T) { }, }, Debug: map[string]interface{}{ - "loglevel": "disabled", + "verbosity": "none", }, }, ocfg: map[string]interface{}{ @@ -658,7 +658,7 @@ func TestNewMap(t *testing.T) { }, }, { - name: "only HTTP; metrics, logs and traces; invalid loglevel(ignored)", + name: "only HTTP; metrics, logs and traces; invalid verbosity (ignored)", pcfg: PipelineConfig{ OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 0, 1234), TracePort: 5003, @@ -676,7 +676,7 @@ func TestNewMap(t *testing.T) { }, }, Debug: map[string]interface{}{ - "loglevel": "foo", + "verbosity": "foo", }, }, ocfg: map[string]interface{}{ @@ -749,7 +749,7 @@ func TestNewMap(t *testing.T) { TracesEnabled: true, LogsEnabled: true, Debug: map[string]interface{}{ - "loglevel": "disabled", + "verbosity": "none", }, }, ocfg: map[string]interface{}{ @@ -818,7 +818,7 @@ func TestNewMap(t *testing.T) { }, }, Debug: map[string]interface{}{ - "loglevel": "disabled", + "verbosity": "none", }, }, ocfg: map[string]interface{}{ @@ -870,14 +870,14 @@ func TestNewMap(t *testing.T) { }, }, { - name: "only gRPC, traces and logs, logging info", + name: "only gRPC, traces and logs, logging with normal verbosity", pcfg: PipelineConfig{ OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 1234, 0), TracePort: 5003, TracesEnabled: true, LogsEnabled: true, Debug: map[string]interface{}{ - "loglevel": "info", + "verbosity": "normal", }, }, ocfg: map[string]interface{}{ @@ -907,8 +907,8 @@ func TestNewMap(t *testing.T) { "enabled": false, }, }, - "logging": map[string]interface{}{ - "loglevel": "info", + "debug": map[string]interface{}{ + "verbosity": "normal", }, "logsagent": interface{}(nil), }, @@ -917,19 +917,19 @@ func TestNewMap(t *testing.T) { "pipelines": map[string]interface{}{ "traces": map[string]interface{}{ "receivers": []interface{}{"otlp"}, - "exporters": []interface{}{"otlp", "logging"}, + "exporters": []interface{}{"otlp", "debug"}, }, "logs": map[string]interface{}{ "receivers": []interface{}{"otlp"}, "processors": []interface{}{"infraattributes", "batch"}, - "exporters": []interface{}{"logsagent", "logging"}, + "exporters": []interface{}{"logsagent", "debug"}, }, }, }, }, }, { - name: "only HTTP, metrics and logs, logging debug", + name: "only HTTP, metrics and logs, logging with detailed verbosity", pcfg: PipelineConfig{ OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 0, 1234), TracePort: 5003, @@ -945,7 +945,7 @@ func TestNewMap(t *testing.T) { }, }, Debug: map[string]interface{}{ - "loglevel": "debug", + "verbosity": "detailed", }, }, ocfg: map[string]interface{}{ @@ -976,8 +976,8 @@ func TestNewMap(t *testing.T) { }, }, }, - "logging": map[string]interface{}{ - "loglevel": "debug", + "debug": map[string]interface{}{ + "verbosity": "detailed", }, "logsagent": interface{}(nil), }, @@ -987,19 +987,19 @@ func TestNewMap(t *testing.T) { "metrics": map[string]interface{}{ "receivers": []interface{}{"otlp"}, "processors": []interface{}{"batch"}, - "exporters": []interface{}{"serializer", "logging"}, + "exporters": []interface{}{"serializer", "debug"}, }, "logs": map[string]interface{}{ "receivers": []interface{}{"otlp"}, "processors": []interface{}{"infraattributes", "batch"}, - "exporters": []interface{}{"logsagent", "logging"}, + "exporters": []interface{}{"logsagent", "debug"}, }, }, }, }, }, { - name: "only HTTP; metrics, traces, and logs; logging warn", + name: "only HTTP; metrics, traces, and logs; logging with basic verbosity", pcfg: PipelineConfig{ OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 0, 1234), TracePort: 5003, @@ -1016,7 +1016,7 @@ func TestNewMap(t *testing.T) { }, }, Debug: map[string]interface{}{ - "loglevel": "warn", + "verbosity": "basic", }, }, ocfg: map[string]interface{}{ @@ -1057,8 +1057,8 @@ func TestNewMap(t *testing.T) { }, }, }, - "logging": map[string]interface{}{ - "loglevel": "warn", + "debug": map[string]interface{}{ + "verbosity": "basic", }, "logsagent": interface{}(nil), }, @@ -1067,17 +1067,17 @@ func TestNewMap(t *testing.T) { "pipelines": map[string]interface{}{ "traces": map[string]interface{}{ "receivers": []interface{}{"otlp"}, - "exporters": []interface{}{"otlp", "logging"}, + "exporters": []interface{}{"otlp", "debug"}, }, "metrics": map[string]interface{}{ "receivers": []interface{}{"otlp"}, "processors": []interface{}{"batch"}, - "exporters": []interface{}{"serializer", "logging"}, + "exporters": []interface{}{"serializer", "debug"}, }, "logs": map[string]interface{}{ "receivers": []interface{}{"otlp"}, "processors": []interface{}{"infraattributes", "batch"}, - "exporters": []interface{}{"logsagent", "logging"}, + "exporters": []interface{}{"logsagent", "debug"}, }, }, }, diff --git a/comp/otelcol/otlp/map_provider_serverless_test.go b/comp/otelcol/otlp/map_provider_serverless_test.go index 7e1721cd33883..99f58c78a1660 100644 --- a/comp/otelcol/otlp/map_provider_serverless_test.go +++ b/comp/otelcol/otlp/map_provider_serverless_test.go @@ -31,7 +31,7 @@ func TestNewMap(t *testing.T) { TracePort: 5003, TracesEnabled: true, Debug: map[string]interface{}{ - "loglevel": "disabled", + "verbosity": "none", }, }, ocfg: map[string]interface{}{ @@ -85,7 +85,7 @@ func TestNewMap(t *testing.T) { }, }, Debug: map[string]interface{}{ - "loglevel": "disabled", + "verbosity": "none", }, }, ocfg: map[string]interface{}{ @@ -138,7 +138,7 @@ func TestNewMap(t *testing.T) { }, }, { - name: "only HTTP, metrics and traces, invalid loglevel(ignored)", + name: "only HTTP, metrics and traces, invalid verbosity (ignored)", pcfg: PipelineConfig{ OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 0, 1234), TracePort: 5003, @@ -155,7 +155,7 @@ func TestNewMap(t *testing.T) { }, }, Debug: map[string]interface{}{ - "loglevel": "foo", + "verbosity": "foo", }, }, ocfg: map[string]interface{}{ @@ -214,7 +214,7 @@ func TestNewMap(t *testing.T) { TracePort: 5003, TracesEnabled: true, Debug: map[string]interface{}{ - "loglevel": "disabled", + "verbosity": "none", }, }, ocfg: map[string]interface{}{ @@ -270,7 +270,7 @@ func TestNewMap(t *testing.T) { }, }, Debug: map[string]interface{}{ - "loglevel": "disabled", + "verbosity": "none", }, }, ocfg: map[string]interface{}{ @@ -309,13 +309,13 @@ func TestNewMap(t *testing.T) { }, }, { - name: "only gRPC, only Traces, logging info", + name: "only gRPC, only Traces, logging with normal verbosity", pcfg: PipelineConfig{ OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 1234, 0), TracePort: 5003, TracesEnabled: true, Debug: map[string]interface{}{ - "loglevel": "info", + "verbosity": "normal", }, }, ocfg: map[string]interface{}{ @@ -339,8 +339,8 @@ func TestNewMap(t *testing.T) { "enabled": false, }, }, - "logging": map[string]interface{}{ - "loglevel": "info", + "debug": map[string]interface{}{ + "verbosity": "normal", }, }, "service": map[string]interface{}{ @@ -348,14 +348,14 @@ func TestNewMap(t *testing.T) { "pipelines": map[string]interface{}{ "traces": map[string]interface{}{ "receivers": []interface{}{"otlp"}, - "exporters": []interface{}{"otlp", "logging"}, + "exporters": []interface{}{"otlp", "debug"}, }, }, }, }, }, { - name: "only HTTP, only metrics, logging debug", + name: "only HTTP, only metrics, logging with detailed verbosity", pcfg: PipelineConfig{ OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 0, 1234), TracePort: 5003, @@ -370,7 +370,7 @@ func TestNewMap(t *testing.T) { }, }, Debug: map[string]interface{}{ - "loglevel": "debug", + "verbosity": "detailed", }, }, ocfg: map[string]interface{}{ @@ -395,8 +395,8 @@ func TestNewMap(t *testing.T) { }, }, }, - "logging": map[string]interface{}{ - "loglevel": "debug", + "debug": map[string]interface{}{ + "verbosity": "detailed", }, }, "service": map[string]interface{}{ @@ -404,14 +404,14 @@ func TestNewMap(t *testing.T) { "pipelines": map[string]interface{}{ "metrics": map[string]interface{}{ "receivers": []interface{}{"otlp"}, - "exporters": []interface{}{"serializer", "logging"}, + "exporters": []interface{}{"serializer", "debug"}, }, }, }, }, }, { - name: "only HTTP, metrics and traces, logging warn", + name: "only HTTP, metrics and traces, logging with basic verbosity", pcfg: PipelineConfig{ OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 0, 1234), TracePort: 5003, @@ -427,7 +427,7 @@ func TestNewMap(t *testing.T) { }, }, Debug: map[string]interface{}{ - "loglevel": "warn", + "verbosity": "basic", }, }, ocfg: map[string]interface{}{ @@ -462,8 +462,8 @@ func TestNewMap(t *testing.T) { }, }, }, - "logging": map[string]interface{}{ - "loglevel": "warn", + "debug": map[string]interface{}{ + "verbosity": "basic", }, }, "service": map[string]interface{}{ @@ -471,11 +471,11 @@ func TestNewMap(t *testing.T) { "pipelines": map[string]interface{}{ "traces": map[string]interface{}{ "receivers": []interface{}{"otlp"}, - "exporters": []interface{}{"otlp", "logging"}, + "exporters": []interface{}{"otlp", "debug"}, }, "metrics": map[string]interface{}{ "receivers": []interface{}{"otlp"}, - "exporters": []interface{}{"serializer", "logging"}, + "exporters": []interface{}{"serializer", "debug"}, }, }, }, diff --git a/comp/otelcol/otlp/testdata/debug/loglevel_disabled.yaml b/comp/otelcol/otlp/testdata/debug/loglevel_disabled.yaml deleted file mode 100644 index 92576a13e2c0a..0000000000000 --- a/comp/otelcol/otlp/testdata/debug/loglevel_disabled.yaml +++ /dev/null @@ -1,3 +0,0 @@ -otlp_config: - debug: - loglevel: disabled diff --git a/comp/otelcol/otlp/testdata/debug/verbosity_detailed.yaml b/comp/otelcol/otlp/testdata/debug/verbosity_detailed.yaml new file mode 100644 index 0000000000000..ff66512c59706 --- /dev/null +++ b/comp/otelcol/otlp/testdata/debug/verbosity_detailed.yaml @@ -0,0 +1,3 @@ +otlp_config: + debug: + verbosity: detailed diff --git a/comp/otelcol/otlp/testdata/debug/loglevel_debug.yaml b/comp/otelcol/otlp/testdata/debug/verbosity_none.yaml similarity index 52% rename from comp/otelcol/otlp/testdata/debug/loglevel_debug.yaml rename to comp/otelcol/otlp/testdata/debug/verbosity_none.yaml index 48df64859c790..bd281f2225170 100644 --- a/comp/otelcol/otlp/testdata/debug/loglevel_debug.yaml +++ b/comp/otelcol/otlp/testdata/debug/verbosity_none.yaml @@ -1,3 +1,3 @@ otlp_config: debug: - loglevel: debug + verbosity: none diff --git a/comp/otelcol/otlp/testdata/metrics/allconfig.yaml b/comp/otelcol/otlp/testdata/metrics/allconfig.yaml index 9982e9731a587..3d7b7849259ce 100644 --- a/comp/otelcol/otlp/testdata/metrics/allconfig.yaml +++ b/comp/otelcol/otlp/testdata/metrics/allconfig.yaml @@ -17,4 +17,4 @@ otlp_config: send_count_sum_metrics: true send_aggregation_metrics: true debug: - loglevel: debug + verbosity: detailed diff --git a/go.mod b/go.mod index 585d57758a9c6..0c50b1153c242 100644 --- a/go.mod +++ b/go.mod @@ -287,7 +287,7 @@ require ( go.opentelemetry.io/collector/component v0.104.0 go.opentelemetry.io/collector/confmap v0.104.0 go.opentelemetry.io/collector/exporter v0.104.0 - go.opentelemetry.io/collector/exporter/loggingexporter v0.104.0 + go.opentelemetry.io/collector/exporter/debugexporter v0.104.0 go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0 go.opentelemetry.io/collector/pdata v1.11.0 go.opentelemetry.io/collector/processor/batchprocessor v0.104.0 @@ -605,6 +605,7 @@ require ( github.com/jellydator/ttlcache/v3 v3.3.0 github.com/kouhin/envflag v0.0.0-20150818174321-0e9a86061649 github.com/lorenzosaino/go-sysctl v0.3.1 + go.opentelemetry.io/collector/config/configtelemetry v0.104.0 ) require ( @@ -966,11 +967,9 @@ require ( go.opentelemetry.io/collector/config/confignet v0.104.0 // indirect go.opentelemetry.io/collector/config/configopaque v1.11.0 // indirect go.opentelemetry.io/collector/config/configretry v1.11.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.104.0 // indirect go.opentelemetry.io/collector/config/configtls v0.104.0 // indirect go.opentelemetry.io/collector/config/internal v0.104.0 // indirect go.opentelemetry.io/collector/connector v0.104.0 // indirect - go.opentelemetry.io/collector/exporter/debugexporter v0.104.0 // indirect go.opentelemetry.io/collector/exporter/nopexporter v0.104.0 // indirect go.opentelemetry.io/collector/exporter/otlphttpexporter v0.104.0 // indirect go.opentelemetry.io/collector/extension/auth v0.104.0 // indirect diff --git a/go.sum b/go.sum index 4930ad6b3d441..36b36758b1e70 100644 --- a/go.sum +++ b/go.sum @@ -2695,8 +2695,6 @@ go.opentelemetry.io/collector/exporter v0.104.0 h1:C2HmnfBa05IQ2T+p9T7K7gXVxjrBL go.opentelemetry.io/collector/exporter v0.104.0/go.mod h1:Rx0oB0E4Ccg1JuAnEWwhtrq1ygRBkfx4mco1DpR3WaQ= go.opentelemetry.io/collector/exporter/debugexporter v0.104.0 h1:1Z63H/xxv6IzMP7GPmI6v/lQAqZwYZCVC0rWYcYOomw= go.opentelemetry.io/collector/exporter/debugexporter v0.104.0/go.mod h1:NHVzTM0Z/bomgR7SAe3ysx4CZzh2UJ3TXWSCnaOB1Wo= -go.opentelemetry.io/collector/exporter/loggingexporter v0.104.0 h1:MaBTuHmK/HAQ+/rLTrGf3tazKum8Sic3/CaXgNr5xnc= -go.opentelemetry.io/collector/exporter/loggingexporter v0.104.0/go.mod h1:sXZhACvds6z71cf2fzKrojMgdJItJZxeClKlF/PI/l8= go.opentelemetry.io/collector/exporter/nopexporter v0.104.0 h1:33JeCQiJbvhSXFqQ34R4ole/wD4iHtF5LYp2GziYVnY= go.opentelemetry.io/collector/exporter/nopexporter v0.104.0/go.mod h1:73afhI8uc5NKAl9pMJlgQQ46Ck9e7nQ2zZGXHHSzuwo= go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0 h1:EFOdhnc2yGhqou0Tud1HsM7fsgWo/H3tdQhYYytDprQ= diff --git a/pkg/config/config_template.yaml b/pkg/config/config_template.yaml index 7b085ec603a1b..979f769cfff8e 100644 --- a/pkg/config/config_template.yaml +++ b/pkg/config/config_template.yaml @@ -4395,22 +4395,13 @@ api_key: ## Debug-specific configuration for OTLP ingest in the Datadog Agent. ## This template lists the most commonly used settings; see the OpenTelemetry Collector documentation ## for a full list of available settings: - ## https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/loggingexporter#getting-started + ## https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/debugexporter#getting-started # # debug: - - ## Deprecated (v[6/7].41.0) - use `verbosity` instead - ## @param loglevel - string - optional - default: none - ## @env DD_OTLP_CONFIG_DEBUG_LOGLEVEL - string - optional - default: none - ## Verbosity of debug logs when Datadog Agent receives otlp traces/metrics. - ## Valid values are disabled, debug, info, error, warn. - # - # loglevel: info - ## @param verbosity - string - optional - default: normal ## @env DD_OTLP_CONFIG_DEBUG_VERBOSITY - string - optional - default: normal ## Verbosity of debug logs when Datadog Agent receives otlp traces/metrics. - ## Valid values are basic, normal, detailed. + ## Valid values are basic, normal, detailed, none. # # verbosity: normal {{- if (eq .OS "windows")}} diff --git a/pkg/config/setup/otlp.go b/pkg/config/setup/otlp.go index 7ff9245d6196a..f6312342e319c 100644 --- a/pkg/config/setup/otlp.go +++ b/pkg/config/setup/otlp.go @@ -94,6 +94,5 @@ func setupOTLPEnvironmentVariables(config pkgconfigmodel.Setup) { config.BindEnv(OTLPSection + ".metrics.summaries.mode") // Debug settings - config.BindEnv(OTLPSection + ".debug.loglevel") // Deprecated config.BindEnv(OTLPSection + ".debug.verbosity") } diff --git a/releasenotes/notes/otel-replace-logging-exporter-82062f8c865cb529.yaml b/releasenotes/notes/otel-replace-logging-exporter-82062f8c865cb529.yaml new file mode 100644 index 0000000000000..ff993fc5f45d1 --- /dev/null +++ b/releasenotes/notes/otel-replace-logging-exporter-82062f8c865cb529.yaml @@ -0,0 +1,16 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +upgrade: + - | + Removed the deprecated config option ``otlp_config.debug.loglevel`` in favor of ``otlp_config.debug.verbosity``: + * ``loglevel: debug`` maps to ``verbosity: detailed`` + * ``loglevel: info`` maps to ``verbosity: normal`` + * ``loglevel: warn/error`` maps to ``verbosity: basic`` + * ``loglevel: disabled`` maps to ``verbosity: none`` + diff --git a/tasks/unit_tests/testdata/collector/valid_datadog_manifest.yaml b/tasks/unit_tests/testdata/collector/valid_datadog_manifest.yaml index 3a5f0ec2d1bc7..74d4ee191925a 100644 --- a/tasks/unit_tests/testdata/collector/valid_datadog_manifest.yaml +++ b/tasks/unit_tests/testdata/collector/valid_datadog_manifest.yaml @@ -19,7 +19,6 @@ extensions: exporters: - gomod: go.opentelemetry.io/collector/exporter/debugexporter v0.104.0 - - gomod: go.opentelemetry.io/collector/exporter/loggingexporter v0.104.0 - gomod: go.opentelemetry.io/collector/exporter/nopexporter v0.104.0 - gomod: go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0 - gomod: go.opentelemetry.io/collector/exporter/otlphttpexporter v0.104.0 diff --git a/test/e2e/containers/otlp_sender/cmd/sender/main.go b/test/e2e/containers/otlp_sender/cmd/sender/main.go index 242b6ef83f01d..b30813cc7f789 100644 --- a/test/e2e/containers/otlp_sender/cmd/sender/main.go +++ b/test/e2e/containers/otlp_sender/cmd/sender/main.go @@ -11,7 +11,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/exporter/loggingexporter" + "go.opentelemetry.io/collector/exporter/debugexporter" "go.opentelemetry.io/collector/exporter/otlpexporter" "go.opentelemetry.io/collector/exporter/otlphttpexporter" "go.opentelemetry.io/collector/extension" @@ -40,7 +40,7 @@ func components() ( exporters, err := exporter.MakeFactoryMap( otlpexporter.NewFactory(), otlphttpexporter.NewFactory(), - loggingexporter.NewFactory(), + debugexporter.NewFactory(), ) errs = multierr.Append(errs, err) diff --git a/test/e2e/containers/otlp_sender/go.mod b/test/e2e/containers/otlp_sender/go.mod index 13a7945ebe571..3ae057624be2d 100644 --- a/test/e2e/containers/otlp_sender/go.mod +++ b/test/e2e/containers/otlp_sender/go.mod @@ -6,7 +6,7 @@ require ( go.opentelemetry.io/collector/component v0.104.0 go.opentelemetry.io/collector/consumer v0.104.0 go.opentelemetry.io/collector/exporter v0.104.0 - go.opentelemetry.io/collector/exporter/loggingexporter v0.104.0 + go.opentelemetry.io/collector/exporter/debugexporter v0.104.0 go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0 go.opentelemetry.io/collector/exporter/otlphttpexporter v0.104.0 go.opentelemetry.io/collector/extension v0.104.0 diff --git a/test/e2e/containers/otlp_sender/go.sum b/test/e2e/containers/otlp_sender/go.sum index 7ab785e653595..5f88f0d5eafab 100644 --- a/test/e2e/containers/otlp_sender/go.sum +++ b/test/e2e/containers/otlp_sender/go.sum @@ -175,8 +175,8 @@ go.opentelemetry.io/collector/consumer v0.104.0 h1:Z1ZjapFp5mUcbkGEL96ljpqLIUMhR go.opentelemetry.io/collector/consumer v0.104.0/go.mod h1:60zcIb0W9GW0z9uJCv6NmjpSbCfBOeRUyrtEwqK6Hzo= go.opentelemetry.io/collector/exporter v0.104.0 h1:C2HmnfBa05IQ2T+p9T7K7gXVxjrBLd+JxEtAWo7JNbg= go.opentelemetry.io/collector/exporter v0.104.0/go.mod h1:Rx0oB0E4Ccg1JuAnEWwhtrq1ygRBkfx4mco1DpR3WaQ= -go.opentelemetry.io/collector/exporter/loggingexporter v0.104.0 h1:MaBTuHmK/HAQ+/rLTrGf3tazKum8Sic3/CaXgNr5xnc= -go.opentelemetry.io/collector/exporter/loggingexporter v0.104.0/go.mod h1:sXZhACvds6z71cf2fzKrojMgdJItJZxeClKlF/PI/l8= +go.opentelemetry.io/collector/exporter/debugexporter v0.104.0 h1:1Z63H/xxv6IzMP7GPmI6v/lQAqZwYZCVC0rWYcYOomw= +go.opentelemetry.io/collector/exporter/debugexporter v0.104.0/go.mod h1:NHVzTM0Z/bomgR7SAe3ysx4CZzh2UJ3TXWSCnaOB1Wo= go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0 h1:EFOdhnc2yGhqou0Tud1HsM7fsgWo/H3tdQhYYytDprQ= go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0/go.mod h1:fAF7Q3Xh0OkxYWUycdrNNDXkyz3nhHIRKDkez0aQ6zg= go.opentelemetry.io/collector/exporter/otlphttpexporter v0.104.0 h1:JkNCOj7DdyJhcYIaRqtS/X+YtAPRjE4pcruyY6LoM7c= diff --git a/test/regression/cases/otel_to_otel_logs/datadog-agent/datadog.yaml b/test/regression/cases/otel_to_otel_logs/datadog-agent/datadog.yaml index 35593c48fbd5c..755c07c819559 100644 --- a/test/regression/cases/otel_to_otel_logs/datadog-agent/datadog.yaml +++ b/test/regression/cases/otel_to_otel_logs/datadog-agent/datadog.yaml @@ -36,4 +36,4 @@ otlp_config: traces: enabled: true debug: - loglevel: info + verbosity: normal From 3231f4304d71074a9116184157832653902c171a Mon Sep 17 00:00:00 2001 From: Daniel Lavie Date: Tue, 10 Sep 2024 12:24:38 +0300 Subject: [PATCH 098/128] USMON-1210: Kafka monitoring fetch functions rename (#29147) --- .../ebpf/c/protocols/classification/defs.h | 8 +-- .../ebpf/c/protocols/kafka/kafka-parsing.h | 48 +++++++-------- pkg/network/protocols/ebpf_types.go | 20 +++---- pkg/network/protocols/ebpf_types_linux.go | 8 +-- pkg/network/protocols/kafka/protocol.go | 60 +++++++++---------- 5 files changed, 72 insertions(+), 72 deletions(-) diff --git a/pkg/network/ebpf/c/protocols/classification/defs.h b/pkg/network/ebpf/c/protocols/classification/defs.h index d44d2adb8d8ce..823112a4fb7e1 100644 --- a/pkg/network/ebpf/c/protocols/classification/defs.h +++ b/pkg/network/ebpf/c/protocols/classification/defs.h @@ -143,10 +143,10 @@ typedef enum { PROG_HTTP2_EOS_PARSER, PROG_HTTP2_TERMINATION, PROG_KAFKA, - PROG_KAFKA_RESPONSE_PARTITION_PARSER_V0, - PROG_KAFKA_RESPONSE_PARTITION_PARSER_V12, - PROG_KAFKA_RESPONSE_RECORD_BATCH_PARSER_V0, - PROG_KAFKA_RESPONSE_RECORD_BATCH_PARSER_V12, + PROG_KAFKA_FETCH_RESPONSE_PARTITION_PARSER_V0, + PROG_KAFKA_FETCH_RESPONSE_PARTITION_PARSER_V12, + PROG_KAFKA_FETCH_RESPONSE_RECORD_BATCH_PARSER_V0, + PROG_KAFKA_FETCH_RESPONSE_RECORD_BATCH_PARSER_V12, PROG_KAFKA_PRODUCE_RESPONSE_PARTITION_PARSER_V0, PROG_KAFKA_PRODUCE_RESPONSE_PARTITION_PARSER_V9, PROG_KAFKA_TERMINATION, diff --git a/pkg/network/ebpf/c/protocols/kafka/kafka-parsing.h b/pkg/network/ebpf/c/protocols/kafka/kafka-parsing.h index f20905e92e041..e55ab7954124d 100644 --- a/pkg/network/ebpf/c/protocols/kafka/kafka-parsing.h +++ b/pkg/network/ebpf/c/protocols/kafka/kafka-parsing.h @@ -1114,9 +1114,9 @@ static __always_inline void kafka_call_response_parser(void *ctx, conn_tuple_t * switch (level) { case PARSER_LEVEL_RECORD_BATCH: if (api_version >= 12) { - index = PROG_KAFKA_RESPONSE_RECORD_BATCH_PARSER_V12; + index = PROG_KAFKA_FETCH_RESPONSE_RECORD_BATCH_PARSER_V12; } else { - index = PROG_KAFKA_RESPONSE_RECORD_BATCH_PARSER_V0; + index = PROG_KAFKA_FETCH_RESPONSE_RECORD_BATCH_PARSER_V0; } break; case PARSER_LEVEL_PARTITION: @@ -1124,9 +1124,9 @@ static __always_inline void kafka_call_response_parser(void *ctx, conn_tuple_t * switch (api_key) { case KAFKA_FETCH: if (api_version >= 12) { - index = PROG_KAFKA_RESPONSE_PARTITION_PARSER_V12; + index = PROG_KAFKA_FETCH_RESPONSE_PARTITION_PARSER_V12; } else { - index = PROG_KAFKA_RESPONSE_PARTITION_PARSER_V0; + index = PROG_KAFKA_FETCH_RESPONSE_PARTITION_PARSER_V0; } break; case KAFKA_PRODUCE: @@ -1147,9 +1147,9 @@ static __always_inline void kafka_call_response_parser(void *ctx, conn_tuple_t * switch (level) { case PARSER_LEVEL_RECORD_BATCH: if (api_version >= 12) { - index = PROG_KAFKA_RESPONSE_RECORD_BATCH_PARSER_V12; + index = PROG_KAFKA_FETCH_RESPONSE_RECORD_BATCH_PARSER_V12; } else { - index = PROG_KAFKA_RESPONSE_RECORD_BATCH_PARSER_V0; + index = PROG_KAFKA_FETCH_RESPONSE_RECORD_BATCH_PARSER_V0; } break; case PARSER_LEVEL_PARTITION: @@ -1157,9 +1157,9 @@ static __always_inline void kafka_call_response_parser(void *ctx, conn_tuple_t * switch (api_key) { case KAFKA_FETCH: if (api_version >= 12) { - index = PROG_KAFKA_RESPONSE_PARTITION_PARSER_V12; + index = PROG_KAFKA_FETCH_RESPONSE_PARTITION_PARSER_V12; } else { - index = PROG_KAFKA_RESPONSE_PARTITION_PARSER_V0; + index = PROG_KAFKA_FETCH_RESPONSE_PARTITION_PARSER_V0; } break; case KAFKA_PRODUCE: @@ -1410,23 +1410,23 @@ static __always_inline int __socket__kafka_response_parser(struct __sk_buff *skb return 0; } -SEC("socket/kafka_response_partition_parser_v0") -int socket__kafka_response_partition_parser_v0(struct __sk_buff *skb) { +SEC("socket/kafka_fetch_response_partition_parser_v0") +int socket__kafka_fetch_response_partition_parser_v0(struct __sk_buff *skb) { return __socket__kafka_response_parser(skb, PARSER_LEVEL_PARTITION, 0, 11, KAFKA_FETCH); } -SEC("socket/kafka_response_partition_parser_v12") -int socket__kafka_response_partition_parser_v12(struct __sk_buff *skb) { +SEC("socket/kafka_fetch_response_partition_parser_v12") +int socket__kafka_fetch_response_partition_parser_v12(struct __sk_buff *skb) { return __socket__kafka_response_parser(skb, PARSER_LEVEL_PARTITION, 12, 12, KAFKA_FETCH); } -SEC("socket/kafka_response_record_batch_parser_v0") -int socket__kafka_response_record_batch_parser_v0(struct __sk_buff *skb) { +SEC("socket/kafka_fetch_response_record_batch_parser_v0") +int socket__kafka_fetch_response_record_batch_parser_v0(struct __sk_buff *skb) { return __socket__kafka_response_parser(skb, PARSER_LEVEL_RECORD_BATCH, 0, 11, KAFKA_FETCH); } -SEC("socket/kafka_response_record_batch_parser_v12") -int socket__kafka_response_record_batch_parser_v12(struct __sk_buff *skb) { +SEC("socket/kafka_fetch_response_record_batch_parser_v12") +int socket__kafka_fetch_response_record_batch_parser_v12(struct __sk_buff *skb) { return __socket__kafka_response_parser(skb, PARSER_LEVEL_RECORD_BATCH, 12, 12, KAFKA_FETCH); } @@ -1460,23 +1460,23 @@ static __always_inline int __uprobe__kafka_tls_response_parser(struct pt_regs *c return 0; } -SEC("uprobe/kafka_tls_response_partition_parser_v0") -int uprobe__kafka_tls_response_partition_parser_v0(struct pt_regs *ctx) { +SEC("uprobe/kafka_tls_fetch_response_partition_parser_v0") +int uprobe__kafka_tls_fetch_response_partition_parser_v0(struct pt_regs *ctx) { return __uprobe__kafka_tls_response_parser(ctx, PARSER_LEVEL_PARTITION, 0, 11, KAFKA_FETCH); } -SEC("uprobe/kafka_tls_response_partition_parser_v12") -int uprobe__kafka_tls_response_partition_parser_v12(struct pt_regs *ctx) { +SEC("uprobe/kafka_tls_fetch_response_partition_parser_v12") +int uprobe__kafka_tls_fetch_response_partition_parser_v12(struct pt_regs *ctx) { return __uprobe__kafka_tls_response_parser(ctx, PARSER_LEVEL_PARTITION, 12, 12, KAFKA_FETCH); } -SEC("uprobe/kafka_tls_response_record_batch_parser_v0") -int uprobe__kafka_tls_response_record_batch_parser_v0(struct pt_regs *ctx) { +SEC("uprobe/kafka_tls_fetch_response_record_batch_parser_v0") +int uprobe__kafka_tls_fetch_response_record_batch_parser_v0(struct pt_regs *ctx) { return __uprobe__kafka_tls_response_parser(ctx, PARSER_LEVEL_RECORD_BATCH, 0, 11, KAFKA_FETCH); } -SEC("uprobe/kafka_tls_response_record_batch_parser_v12") -int uprobe__kafka_tls_response_record_batch_parser_v12(struct pt_regs *ctx) { +SEC("uprobe/kafka_tls_fetch_response_record_batch_parser_v12") +int uprobe__kafka_tls_fetch_response_record_batch_parser_v12(struct pt_regs *ctx) { return __uprobe__kafka_tls_response_parser(ctx, PARSER_LEVEL_RECORD_BATCH, 12, 12, KAFKA_FETCH); } diff --git a/pkg/network/protocols/ebpf_types.go b/pkg/network/protocols/ebpf_types.go index ab293e741290b..9fcf544843b86 100644 --- a/pkg/network/protocols/ebpf_types.go +++ b/pkg/network/protocols/ebpf_types.go @@ -47,17 +47,17 @@ const ( ProgramHTTP2Termination ProgramType = C.PROG_HTTP2_TERMINATION // ProgramKafka is the Golang representation of the C.PROG_KAFKA enum ProgramKafka ProgramType = C.PROG_KAFKA - // ProgramKafkaResponsePartitionParserV0 is the Golang representation of the C.PROG_KAFKA_RESPONSE_PARTITION_PARSER_v0 enum - ProgramKafkaResponsePartitionParserV0 ProgramType = C.PROG_KAFKA_RESPONSE_PARTITION_PARSER_V0 - // ProgramKafkaResponsePartitionParserV12 is the Golang representation of the C.PROG_KAFKA_RESPONSE_PARTITION_PARSER_v0 enum - ProgramKafkaResponsePartitionParserV12 ProgramType = C.PROG_KAFKA_RESPONSE_PARTITION_PARSER_V12 - // ProgramKafkaResponseRecordBatchParserV0 is the Golang representation of the C.PROG_KAFKA_RESPONSE_RECORD_BATCH_PARSER_v0 enum - ProgramKafkaResponseRecordBatchParserV0 ProgramType = C.PROG_KAFKA_RESPONSE_RECORD_BATCH_PARSER_V0 - // ProgramKafkaResponseRecordBatchParserV12 is the Golang representation of the C.PROG_KAFKA_RESPONSE_RECORD_BATCH_PARSER_v0 enum - ProgramKafkaResponseRecordBatchParserV12 ProgramType = C.PROG_KAFKA_RESPONSE_RECORD_BATCH_PARSER_V12 - // ProgramKafkaProduceResponsePartitionParserV0 is the Golang representation of the C.PROG_KAFKA_RESPONSE_PARTITION_PARSER_v0 enum + // ProgramKafkaFetchResponsePartitionParserV0 is the Golang representation of the C.PROG_KAFKA_FETCH_RESPONSE_PARTITION_PARSER_V0 enum + ProgramKafkaFetchResponsePartitionParserV0 ProgramType = C.PROG_KAFKA_FETCH_RESPONSE_PARTITION_PARSER_V0 + // ProgramKafkaFetchResponsePartitionParserV12 is the Golang representation of the C.PROG_KAFKA_FETCH_RESPONSE_PARTITION_PARSER_V12 enum + ProgramKafkaFetchResponsePartitionParserV12 ProgramType = C.PROG_KAFKA_FETCH_RESPONSE_PARTITION_PARSER_V12 + // ProgramKafkaFetchResponseRecordBatchParserV0 is the Golang representation of the C.PROG_KAFKA_FETCH_RESPONSE_RECORD_BATCH_PARSER_V0 enum + ProgramKafkaFetchResponseRecordBatchParserV0 ProgramType = C.PROG_KAFKA_FETCH_RESPONSE_RECORD_BATCH_PARSER_V0 + // ProgramKafkaFetchResponseRecordBatchParserV12 is the Golang representation of the C.PROG_KAFKA_FETCH_RESPONSE_RECORD_BATCH_PARSER_V12 enum + ProgramKafkaFetchResponseRecordBatchParserV12 ProgramType = C.PROG_KAFKA_FETCH_RESPONSE_RECORD_BATCH_PARSER_V12 + // ProgramKafkaProduceResponsePartitionParserV0 is the Golang representation of the C.PROG_KAFKA_PRODUCE_RESPONSE_PARTITION_PARSER_V0 enum ProgramKafkaProduceResponsePartitionParserV0 ProgramType = C.PROG_KAFKA_PRODUCE_RESPONSE_PARTITION_PARSER_V0 - // ProgramKafkaProduceResponsePartitionParserV9 is the Golang representation of the C.PROG_KAFKA_RESPONSE_PARTITION_PARSER_v0 enum + // ProgramKafkaProduceResponsePartitionParserV9 is the Golang representation of the C.PROG_KAFKA_PRODUCE_RESPONSE_PARTITION_PARSER_V9 enum ProgramKafkaProduceResponsePartitionParserV9 ProgramType = C.PROG_KAFKA_PRODUCE_RESPONSE_PARTITION_PARSER_V9 // ProgramKafkaTermination is tail call to process Kafka termination. ProgramKafkaTermination ProgramType = C.PROG_KAFKA_TERMINATION diff --git a/pkg/network/protocols/ebpf_types_linux.go b/pkg/network/protocols/ebpf_types_linux.go index 24e84cd1b3511..9cc859e489174 100644 --- a/pkg/network/protocols/ebpf_types_linux.go +++ b/pkg/network/protocols/ebpf_types_linux.go @@ -36,13 +36,13 @@ const ( ProgramKafka ProgramType = 0x9 - ProgramKafkaResponsePartitionParserV0 ProgramType = 0xa + ProgramKafkaFetchResponsePartitionParserV0 ProgramType = 0xa - ProgramKafkaResponsePartitionParserV12 ProgramType = 0xb + ProgramKafkaFetchResponsePartitionParserV12 ProgramType = 0xb - ProgramKafkaResponseRecordBatchParserV0 ProgramType = 0xc + ProgramKafkaFetchResponseRecordBatchParserV0 ProgramType = 0xc - ProgramKafkaResponseRecordBatchParserV12 ProgramType = 0xd + ProgramKafkaFetchResponseRecordBatchParserV12 ProgramType = 0xd ProgramKafkaProduceResponsePartitionParserV0 ProgramType = 0xe diff --git a/pkg/network/protocols/kafka/protocol.go b/pkg/network/protocols/kafka/protocol.go index 542d97367e9ce..41d0dc72b3854 100644 --- a/pkg/network/protocols/kafka/protocol.go +++ b/pkg/network/protocols/kafka/protocol.go @@ -40,12 +40,12 @@ const ( eventStreamName = "kafka" filterTailCall = "socket__kafka_filter" - responsePartitionParserV0TailCall = "socket__kafka_response_partition_parser_v0" - responsePartitionParserV12TailCall = "socket__kafka_response_partition_parser_v12" - responseRecordBatchParserV0TailCall = "socket__kafka_response_record_batch_parser_v0" - responseRecordBatchParserV12TailCall = "socket__kafka_response_record_batch_parser_v12" - ProduceResponsePartitionParserV0TailCall = "socket__kafka_produce_response_partition_parser_v0" - ProduceResponsePartitionParserV9TailCall = "socket__kafka_produce_response_partition_parser_v9" + fetchResponsePartitionParserV0TailCall = "socket__kafka_fetch_response_partition_parser_v0" + fetchResponsePartitionParserV12TailCall = "socket__kafka_fetch_response_partition_parser_v12" + fetchResponseRecordBatchParserV0TailCall = "socket__kafka_fetch_response_record_batch_parser_v0" + fetchResponseRecordBatchParserV12TailCall = "socket__kafka_fetch_response_record_batch_parser_v12" + produceResponsePartitionParserV0TailCall = "socket__kafka_produce_response_partition_parser_v0" + produceResponsePartitionParserV9TailCall = "socket__kafka_produce_response_partition_parser_v9" dispatcherTailCall = "socket__protocol_dispatcher_kafka" kafkaHeapMap = "kafka_heap" @@ -55,12 +55,12 @@ const ( tlsFilterTailCall = "uprobe__kafka_tls_filter" - tlsResponsePartitionParserV0TailCall = "uprobe__kafka_tls_response_partition_parser_v0" - tlsResponsePartitionParserV12TailCall = "uprobe__kafka_tls_response_partition_parser_v12" - tlsResponseRecordBatchParserV0TailCall = "uprobe__kafka_tls_response_record_batch_parser_v0" - tlsResponseRecordBatchParserV12TailCall = "uprobe__kafka_tls_response_record_batch_parser_v12" - tlsProduceResponsePartitionParserV0TailCall = "uprobe__kafka_tls_produce_response_partition_parser_v0" - tlsProduceResponsePartitionParserV9TailCall = "uprobe__kafka_tls_produce_response_partition_parser_v9" + tlsFetchResponsePartitionParserV0TailCall = "uprobe__kafka_tls_fetch_response_partition_parser_v0" + tlsFetchResponsePartitionParserV12TailCall = "uprobe__kafka_tls_fetch_response_partition_parser_v12" + tlsFetchResponseRecordBatchParserV0TailCall = "uprobe__kafka_tls_fetch_response_record_batch_parser_v0" + tlsFetchResponseRecordBatchParserV12TailCall = "uprobe__kafka_tls_fetch_response_record_batch_parser_v12" + tlsProduceResponsePartitionParserV0TailCall = "uprobe__kafka_tls_produce_response_partition_parser_v0" + tlsProduceResponsePartitionParserV9TailCall = "uprobe__kafka_tls_produce_response_partition_parser_v9" tlsTerminationTailCall = "uprobe__kafka_tls_termination" tlsDispatcherTailCall = "uprobe__tls_protocol_dispatcher_kafka" @@ -110,44 +110,44 @@ var Spec = &protocols.ProtocolSpec{ }, { ProgArrayName: protocols.ProtocolDispatcherProgramsMap, - Key: uint32(protocols.ProgramKafkaResponsePartitionParserV0), + Key: uint32(protocols.ProgramKafkaFetchResponsePartitionParserV0), ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: responsePartitionParserV0TailCall, + EBPFFuncName: fetchResponsePartitionParserV0TailCall, }, }, { ProgArrayName: protocols.ProtocolDispatcherProgramsMap, - Key: uint32(protocols.ProgramKafkaResponsePartitionParserV12), + Key: uint32(protocols.ProgramKafkaFetchResponsePartitionParserV12), ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: responsePartitionParserV12TailCall, + EBPFFuncName: fetchResponsePartitionParserV12TailCall, }, }, { ProgArrayName: protocols.ProtocolDispatcherProgramsMap, - Key: uint32(protocols.ProgramKafkaResponseRecordBatchParserV0), + Key: uint32(protocols.ProgramKafkaFetchResponseRecordBatchParserV0), ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: responseRecordBatchParserV0TailCall, + EBPFFuncName: fetchResponseRecordBatchParserV0TailCall, }, }, { ProgArrayName: protocols.ProtocolDispatcherProgramsMap, - Key: uint32(protocols.ProgramKafkaResponseRecordBatchParserV12), + Key: uint32(protocols.ProgramKafkaFetchResponseRecordBatchParserV12), ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: responseRecordBatchParserV12TailCall, + EBPFFuncName: fetchResponseRecordBatchParserV12TailCall, }, }, { ProgArrayName: protocols.ProtocolDispatcherProgramsMap, Key: uint32(protocols.ProgramKafkaProduceResponsePartitionParserV0), ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: ProduceResponsePartitionParserV0TailCall, + EBPFFuncName: produceResponsePartitionParserV0TailCall, }, }, { ProgArrayName: protocols.ProtocolDispatcherProgramsMap, Key: uint32(protocols.ProgramKafkaProduceResponsePartitionParserV9), ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: ProduceResponsePartitionParserV9TailCall, + EBPFFuncName: produceResponsePartitionParserV9TailCall, }, }, { @@ -166,30 +166,30 @@ var Spec = &protocols.ProtocolSpec{ }, { ProgArrayName: protocols.TLSDispatcherProgramsMap, - Key: uint32(protocols.ProgramKafkaResponsePartitionParserV0), + Key: uint32(protocols.ProgramKafkaFetchResponsePartitionParserV0), ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: tlsResponsePartitionParserV0TailCall, + EBPFFuncName: tlsFetchResponsePartitionParserV0TailCall, }, }, { ProgArrayName: protocols.TLSDispatcherProgramsMap, - Key: uint32(protocols.ProgramKafkaResponsePartitionParserV12), + Key: uint32(protocols.ProgramKafkaFetchResponsePartitionParserV12), ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: tlsResponsePartitionParserV12TailCall, + EBPFFuncName: tlsFetchResponsePartitionParserV12TailCall, }, }, { ProgArrayName: protocols.TLSDispatcherProgramsMap, - Key: uint32(protocols.ProgramKafkaResponseRecordBatchParserV0), + Key: uint32(protocols.ProgramKafkaFetchResponseRecordBatchParserV0), ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: tlsResponseRecordBatchParserV0TailCall, + EBPFFuncName: tlsFetchResponseRecordBatchParserV0TailCall, }, }, { ProgArrayName: protocols.TLSDispatcherProgramsMap, - Key: uint32(protocols.ProgramKafkaResponseRecordBatchParserV12), + Key: uint32(protocols.ProgramKafkaFetchResponseRecordBatchParserV12), ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: tlsResponseRecordBatchParserV12TailCall, + EBPFFuncName: tlsFetchResponseRecordBatchParserV12TailCall, }, }, { From c692f62423f93988b008b669008f9199a5ad196b Mon Sep 17 00:00:00 2001 From: Yoann Ghigoff Date: Tue, 10 Sep 2024 11:32:40 +0200 Subject: [PATCH 099/128] [CWS] keep updating the cache of disarmed rules (#29203) --- pkg/security/probe/process_killer.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/pkg/security/probe/process_killer.go b/pkg/security/probe/process_killer.go index 7c9a54d785028..d9b80b6e4bec1 100644 --- a/pkg/security/probe/process_killer.go +++ b/pkg/security/probe/process_killer.go @@ -417,10 +417,6 @@ func (kd *killDisarmer) allow(cache *disarmerCache[string, bool], typ disarmerTy kd.Lock() defer kd.Unlock() - if kd.disarmed { - return false - } - if cache == nil { return true } From 53fd0f00fed070bc883f805a263856c17d824a73 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 10:13:20 +0000 Subject: [PATCH 100/128] Bump golang.org/x/tools from 0.24.0 to 0.25.0 in /pkg/security/secl (#29195) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 10 +++++----- go.sum | 20 ++++++++++---------- pkg/security/secl/go.mod | 4 ++-- pkg/security/secl/go.sum | 8 ++++---- 4 files changed, 21 insertions(+), 21 deletions(-) diff --git a/go.mod b/go.mod index 0c50b1153c242..505850e83ed4c 100644 --- a/go.mod +++ b/go.mod @@ -303,12 +303,12 @@ require ( go4.org/netipx v0.0.0-20220812043211-3cc044ffd68d golang.org/x/arch v0.9.0 golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa - golang.org/x/net v0.28.0 + golang.org/x/net v0.29.0 golang.org/x/sync v0.8.0 golang.org/x/sys v0.25.0 golang.org/x/text v0.18.0 golang.org/x/time v0.6.0 - golang.org/x/tools v0.24.0 + golang.org/x/tools v0.25.0 golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 google.golang.org/genproto v0.0.0-20240617180043-68d350f18fd4 // indirect google.golang.org/grpc v1.66.0 @@ -568,10 +568,10 @@ require ( go.opentelemetry.io/otel/sdk/metric v1.27.0 // indirect go.opentelemetry.io/otel/trace v1.28.0 go.opentelemetry.io/proto/otlp v1.3.1 // indirect - golang.org/x/crypto v0.26.0 // indirect - golang.org/x/mod v0.20.0 + golang.org/x/crypto v0.27.0 // indirect + golang.org/x/mod v0.21.0 golang.org/x/oauth2 v0.21.0 // indirect - golang.org/x/term v0.23.0 // indirect + golang.org/x/term v0.24.0 // indirect gonum.org/v1/gonum v0.15.0 // indirect google.golang.org/api v0.185.0 // indirect google.golang.org/appengine v1.6.8 // indirect diff --git a/go.sum b/go.sum index 36b36758b1e70..4f35200c1a2c2 100644 --- a/go.sum +++ b/go.sum @@ -2849,8 +2849,8 @@ golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2919,8 +2919,8 @@ golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -3005,8 +3005,8 @@ golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -3209,8 +3209,8 @@ golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -3323,8 +3323,8 @@ golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/pkg/security/secl/go.mod b/pkg/security/secl/go.mod index 20522add6cd6e..a46de50a3a0b7 100644 --- a/pkg/security/secl/go.mod +++ b/pkg/security/secl/go.mod @@ -19,7 +19,7 @@ require ( github.com/xeipuuv/gojsonschema v1.2.0 golang.org/x/sys v0.25.0 golang.org/x/text v0.18.0 - golang.org/x/tools v0.24.0 + golang.org/x/tools v0.25.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 modernc.org/mathutil v1.6.0 @@ -40,7 +40,7 @@ require ( github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect golang.org/x/crypto v0.26.0 // indirect - golang.org/x/mod v0.20.0 // indirect + golang.org/x/mod v0.21.0 // indirect golang.org/x/sync v0.8.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect ) diff --git a/pkg/security/secl/go.sum b/pkg/security/secl/go.sum index 108e97fca09cc..d5c938d6789d2 100644 --- a/pkg/security/secl/go.sum +++ b/pkg/security/secl/go.sum @@ -71,16 +71,16 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= From 48257f86878ee8c7e98b92a0cd0a2e99601e0915 Mon Sep 17 00:00:00 2001 From: Florent Clarret Date: Tue, 10 Sep 2024 10:14:24 +0000 Subject: [PATCH 101/128] Configure the CI to run the e2e tests on GCP (#29198) --- .gitlab-ci.yml | 1 + .gitlab/e2e/e2e.yml | 3 +++ 2 files changed, 4 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 99a4d4ff8e646..bd2da974c7098 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -214,6 +214,7 @@ variables: E2E_TESTS_AZURE_CLIENT_SECRET: ci.datadog-agent.e2e_tests_azure_client_secret # agent-devx-loops E2E_TESTS_AZURE_TENANT_ID: ci.datadog-agent.e2e_tests_azure_tenant_id # agent-devx-loops E2E_TESTS_AZURE_SUBSCRIPTION_ID: ci.datadog-agent.e2e_tests_azure_subscription_id # agent-devx-loops + E2E_TESTS_GCP_CREDENTIALS: ci.datadog-agent.e2e_tests_gcp_credentials # agent-devx-loops KITCHEN_EC2_SSH_KEY: ci.datadog-agent.aws_ec2_kitchen_ssh_key # agent-devx-loops KITCHEN_AZURE_CLIENT_ID: ci.datadog-agent.azure_kitchen_client_id # agent-devx-loops KITCHEN_AZURE_CLIENT_SECRET: ci.datadog-agent.azure_kitchen_client_secret # agent-devx-loops diff --git a/.gitlab/e2e/e2e.yml b/.gitlab/e2e/e2e.yml index e62ca3fd3f58b..06c4e2234f9a2 100644 --- a/.gitlab/e2e/e2e.yml +++ b/.gitlab/e2e/e2e.yml @@ -24,6 +24,9 @@ - export ARM_CLIENT_SECRET=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $E2E_TESTS_AZURE_CLIENT_SECRET) - export ARM_TENANT_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $E2E_TESTS_AZURE_TENANT_ID) - export ARM_SUBSCRIPTION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $E2E_TESTS_AZURE_SUBSCRIPTION_ID) + # Setup GCP credentials. https://www.pulumi.com/registry/packages/gcp/installation-configuration/ + # The service account is called `agent-e2e-tests` + - export GOOGLE_CREDENTIALS=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $E2E_TESTS_GCP_CREDENTIALS) # Generate external links to CI VISIBILITY, used by artifacts:reports:annotations - inv -e gitlab.generate-ci-visibility-links --output=$EXTERNAL_LINKS_PATH variables: From d2e2032c39896c491805aba5e2fdfa7bf0070488 Mon Sep 17 00:00:00 2001 From: "agent-platform-auto-pr[bot]" <153269286+agent-platform-auto-pr[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 10:14:58 +0000 Subject: [PATCH 102/128] [test-infra-definitions][automated] Bump test-infra-definitions to 7be84fb14a743e80b8916e20f0cecd85d26bfc2d (#29199) Co-authored-by: agent-platform-auto-pr[bot] <153269286+agent-platform-auto-pr[bot]@users.noreply.github.com> --- .gitlab/common/test_infra_version.yml | 2 +- test/new-e2e/go.mod | 20 +++++++------- test/new-e2e/go.sum | 40 +++++++++++++-------------- 3 files changed, 31 insertions(+), 31 deletions(-) diff --git a/.gitlab/common/test_infra_version.yml b/.gitlab/common/test_infra_version.yml index f3612f51eea60..b956a16ab7c80 100644 --- a/.gitlab/common/test_infra_version.yml +++ b/.gitlab/common/test_infra_version.yml @@ -4,4 +4,4 @@ variables: # and check the job creating the image to make sure you have the right SHA prefix TEST_INFRA_DEFINITIONS_BUILDIMAGES_SUFFIX: "" # Make sure to update test-infra-definitions version in go.mod as well - TEST_INFRA_DEFINITIONS_BUILDIMAGES: e0fb9ce404ac + TEST_INFRA_DEFINITIONS_BUILDIMAGES: 7be84fb14a74 diff --git a/test/new-e2e/go.mod b/test/new-e2e/go.mod index 41578f1a45b19..b237c08f61928 100644 --- a/test/new-e2e/go.mod +++ b/test/new-e2e/go.mod @@ -32,8 +32,8 @@ require ( // `TEST_INFRA_DEFINITIONS_BUILDIMAGES` matches the commit sha in the module version // Example: github.com/DataDog/test-infra-definitions v0.0.0-YYYYMMDDHHmmSS-0123456789AB // => TEST_INFRA_DEFINITIONS_BUILDIMAGES: 0123456789AB - github.com/DataDog/test-infra-definitions v0.0.0-20240906113819-e0fb9ce404ac - github.com/aws/aws-sdk-go-v2 v1.30.4 + github.com/DataDog/test-infra-definitions v0.0.0-20240910071149-7be84fb14a74 + github.com/aws/aws-sdk-go-v2 v1.30.5 github.com/aws/aws-sdk-go-v2/config v1.27.19 github.com/aws/aws-sdk-go-v2/service/ec2 v1.164.2 github.com/aws/aws-sdk-go-v2/service/eks v1.44.1 @@ -50,7 +50,7 @@ require ( github.com/pulumi/pulumi-awsx/sdk/v2 v2.14.0 github.com/pulumi/pulumi-eks/sdk/v2 v2.7.8 github.com/pulumi/pulumi-kubernetes/sdk/v4 v4.17.1 - github.com/pulumi/pulumi/sdk/v3 v3.130.0 + github.com/pulumi/pulumi/sdk/v3 v3.131.0 github.com/samber/lo v1.47.0 github.com/stretchr/testify v1.9.0 github.com/xeipuuv/gojsonschema v1.2.0 @@ -87,11 +87,11 @@ require ( github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.17.19 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.6 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.12 // indirect - github.com/aws/aws-sdk-go-v2/service/ecr v1.32.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ecr v1.32.4 // indirect github.com/aws/aws-sdk-go-v2/service/ecs v1.45.2 github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.14 // indirect @@ -216,7 +216,6 @@ require ( github.com/stretchr/objx v0.5.2 // indirect github.com/texttheater/golang-levenshtein v1.0.1 // indirect github.com/tinylib/msgp v1.1.8 // indirect - github.com/tweekmonster/luser v0.0.0-20161003172636-3fa38070dbd7 // indirect github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect @@ -265,9 +264,10 @@ require ( require github.com/hairyhenderson/go-codeowners v0.5.0 require ( - github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.57.0 // indirect + github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.60.0 // indirect github.com/pulumi/pulumi-azure-native-sdk/compute/v2 v2.56.0 // indirect github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.59.0 // indirect - github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.58.0 // indirect - github.com/pulumi/pulumi-azure-native-sdk/v2 v2.59.0 // indirect + github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.59.0 // indirect + github.com/pulumi/pulumi-azure-native-sdk/v2 v2.60.0 // indirect + github.com/pulumi/pulumi-gcp/sdk/v6 v6.67.1 // indirect ) diff --git a/test/new-e2e/go.sum b/test/new-e2e/go.sum index a944c13f2e155..4103fed9d0cb4 100644 --- a/test/new-e2e/go.sum +++ b/test/new-e2e/go.sum @@ -14,8 +14,8 @@ github.com/DataDog/datadog-api-client-go/v2 v2.27.0 h1:AGZj41frjnjMufQHQbJH2fzmi github.com/DataDog/datadog-api-client-go/v2 v2.27.0/go.mod h1:QKOu6vscsh87fMY1lHfLEmNSunyXImj8BUaUWJXOehc= github.com/DataDog/mmh3 v0.0.0-20200805151601-30884ca2197a h1:m9REhmyaWD5YJ0P53ygRHxKKo+KM+nw+zz0hEdKztMo= github.com/DataDog/mmh3 v0.0.0-20200805151601-30884ca2197a/go.mod h1:SvsjzyJlSg0rKsqYgdcFxeEVflx3ZNAyFfkUHP0TxXg= -github.com/DataDog/test-infra-definitions v0.0.0-20240906113819-e0fb9ce404ac h1:epY6p93MEB4W6ViKzxPhaFP8iWBybU6G2jWr4h+oHEQ= -github.com/DataDog/test-infra-definitions v0.0.0-20240906113819-e0fb9ce404ac/go.mod h1:QEQPOdzBcxZly/1KtAPFgF1R7Tp98FajB06gZ75E+/U= +github.com/DataDog/test-infra-definitions v0.0.0-20240910071149-7be84fb14a74 h1:ebZDQrJIzcrVy/XpEzXSgw5ScJCtnRETbzo+3g3YsTc= +github.com/DataDog/test-infra-definitions v0.0.0-20240910071149-7be84fb14a74/go.mod h1:orHExiPWWT9f68UJZ92oIVX1OcTNlKvtbX7b6HM9e0Q= github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f h1:5Vuo4niPKFkfwW55jV4vY0ih3VQ9RaQqeqY67fvRn8A= @@ -45,8 +45,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4= github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI= -github.com/aws/aws-sdk-go-v2 v1.30.4 h1:frhcagrVNrzmT95RJImMHgabt99vkXGslubDaDagTk8= -github.com/aws/aws-sdk-go-v2 v1.30.4/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0= +github.com/aws/aws-sdk-go-v2 v1.30.5 h1:mWSRTwQAb0aLE17dSzztCVJWI9+cRMgqebndjwDyK0g= +github.com/aws/aws-sdk-go-v2 v1.30.5/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 h1:x6xsQXGSmW6frevwDA+vi/wqhp1ct18mVXYN08/93to= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg= github.com/aws/aws-sdk-go-v2/config v1.27.19 h1:+DBS8gJP6VsxYkZ6UEV0/VsRM2rYpbQCYsosW9RRmeQ= @@ -55,18 +55,18 @@ github.com/aws/aws-sdk-go-v2/credentials v1.17.19 h1:R18G7nBBGLby51CFEqUBFF2IVl7 github.com/aws/aws-sdk-go-v2/credentials v1.17.19/go.mod h1:xr9kUMnaLTB866HItT6pg58JgiBP77fSQLBwIa//zk8= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.6 h1:vVOuhRyslJ6T/HteG71ZWCTas1q2w6f0NKsNbkXHs/A= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.6/go.mod h1:jimWaqLiT0sJGLh51dKCLLtExRYPtMU7MpxuCgtbkxg= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 h1:TNyt/+X43KJ9IJJMjKfa3bNTiZbUP7DeCxfbTROESwY= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16/go.mod h1:2DwJF39FlNAUiX5pAc0UNeiz16lK2t7IaFcm0LFHEgc= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 h1:jYfy8UPmd+6kJW5YhY0L1/KftReOGxI/4NtVSTh9O/I= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16/go.mod h1:7ZfEPZxkW42Afq4uQB8H2E2e6ebh6mXTueEpYzjCzcs= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 h1:pI7Bzt0BJtYA0N/JEC6B8fJ4RBrEMi1LBrkMdFYNSnQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17/go.mod h1:Dh5zzJYMtxfIjYW+/evjQ8uj2OyR/ve2KROHGHlSFqE= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 h1:Mqr/V5gvrhA2gvgnF42Zh5iMiQNcOYthFYwCyrnuWlc= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17/go.mod h1:aLJpZlCmjE+V+KtN1q1uyZkfnUWpQGpbsn89XPKyzfU= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.12 h1:DXFWyt7ymx/l1ygdyTTS0X923e+Q2wXIxConJzrgwc0= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.12/go.mod h1:mVOr/LbvaNySK1/BTy4cBOCjhCNY2raWBwK4v+WR5J4= github.com/aws/aws-sdk-go-v2/service/ec2 v1.164.2 h1:Rts0EZgdi3tneJMXp+uKrZHbMxQIu0y5O/2MG6a2+hY= github.com/aws/aws-sdk-go-v2/service/ec2 v1.164.2/go.mod h1:j0V2ahvdX3mGIyXQSe9vjdIQvSxz3uaMM0bR7Y+0WCE= -github.com/aws/aws-sdk-go-v2/service/ecr v1.32.1 h1:PxM8EHsv1sd9eWGamMQCvqBEjxytK5kAwjrxlfG3tac= -github.com/aws/aws-sdk-go-v2/service/ecr v1.32.1/go.mod h1:kdk+WJbHcGVbIlRQfSrKyuKkbWDdD8I9NScyS5vZ8eQ= +github.com/aws/aws-sdk-go-v2/service/ecr v1.32.4 h1:nQAU2Yr+afkAvIV39mg7LrNYFNQP7ShwbmiJqx2fUKA= +github.com/aws/aws-sdk-go-v2/service/ecr v1.32.4/go.mod h1:keOS9j4fv5ASh7dV29lIpGw2QgoJwGFAyMU0uPvfax4= github.com/aws/aws-sdk-go-v2/service/ecs v1.45.2 h1:DSFxt4HBQjlgKNMyYdME9cbB11FFi7umpTGbqJaS9nw= github.com/aws/aws-sdk-go-v2/service/ecs v1.45.2/go.mod h1:er8WHbgZAl17Dmu41ifKmUrV7JPpiQnRc+XSrnu4qR8= github.com/aws/aws-sdk-go-v2/service/eks v1.44.1 h1:onUAzZXDsyXzyrmOGw/9p8Csl1NZkTDEs4URZ8covUY= @@ -401,22 +401,24 @@ github.com/pulumi/pulumi-aws/sdk/v6 v6.47.0 h1:DEbHd7krLB3p3Qr4PlAaEScA5mQR85jif github.com/pulumi/pulumi-aws/sdk/v6 v6.47.0/go.mod h1:gN/y6Gl/c6R2m1H0DlpyeyxpemtLJNhgHWcYz+vBPdo= github.com/pulumi/pulumi-awsx/sdk/v2 v2.14.0 h1:GknlrxIweg8X65VcxJaUVdZIHhclZjdzEWxsLGnMR2Y= github.com/pulumi/pulumi-awsx/sdk/v2 v2.14.0/go.mod h1:mB6jxy6GjMd1dmTA129GkHH5pyryYG/W0J1X2XznxW4= -github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.57.0 h1:0QxN2F/yiylylNjYMqqXc5RQoKan/Pq/x1v43QaxE/c= -github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.57.0/go.mod h1:pv7oEJtA6Tn8dnE8/xya/yCQd6GU0Br9c9nHRkW9LiQ= +github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.60.0 h1:qCpKZQECnZWXVMWfuTk6nfPfQoP+7zXPS5bHdeIh5Mc= +github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.60.0/go.mod h1:ILyyA8nuYMWOcU7sRqRVmakNeY4hxog7K4nMCL+IOjE= github.com/pulumi/pulumi-azure-native-sdk/compute/v2 v2.56.0 h1:MFOd6X9FPlixzriy14fBHv7pFCCh/mu1pwHtSSjqfJ4= github.com/pulumi/pulumi-azure-native-sdk/compute/v2 v2.56.0/go.mod h1:453Ff5wNscroYfq+zxME7Nbt7HdZv+dh0zLZwLyGBws= github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.59.0 h1:ijcCyi+SPlJn3aIEb4p23FTk6fxjPLtVMhfkRaKp85A= github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.59.0/go.mod h1:yQXpYXNeGVBcygd5Be/fzf+1Jcg4kDLAMZY6UDtIZvQ= -github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.58.0 h1:joRRPeYxXSaCGF7we0NNAMsOy7HJFd7O4cWAjmKveRI= -github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.58.0/go.mod h1:XFiuqPmtOASRrKU1q29sgzAuq9OcZ0bDzdBw9TSUyyo= -github.com/pulumi/pulumi-azure-native-sdk/v2 v2.59.0 h1:1S0sh1N+9MV2eUOidjS1LA01eZ6x6j+I7G8CE0RBb8E= -github.com/pulumi/pulumi-azure-native-sdk/v2 v2.59.0/go.mod h1:yVyaGAI0YLEcqfP/8Bmk9VgtRxE5JsBayO9i5QNneWg= +github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.59.0 h1:mqs2dlpcyYn2LsA20bC8xN30YaVs7x8M6tC7BtDiY64= +github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.59.0/go.mod h1:OTv2GUMWUktcvdjXFRaAdJDW1f/SuRSCKHdVCcQrN7U= +github.com/pulumi/pulumi-azure-native-sdk/v2 v2.60.0 h1:Q+we+HFtnNGkeXIhdWIKJZWJRwaIBUuMcZKG70YIYyw= +github.com/pulumi/pulumi-azure-native-sdk/v2 v2.60.0/go.mod h1:guTN5l9syK6v4+94APSi9np3rj1JPrPUEOG+B0dDaZE= github.com/pulumi/pulumi-command/sdk v1.0.1 h1:ZuBSFT57nxg/fs8yBymUhKLkjJ6qmyN3gNvlY/idiN0= github.com/pulumi/pulumi-command/sdk v1.0.1/go.mod h1:C7sfdFbUIoXKoIASfXUbP/U9xnwPfxvz8dBpFodohlA= github.com/pulumi/pulumi-docker/sdk/v4 v4.5.5 h1:7OjAfgLz5PAy95ynbgPAlWls5WBe4I/QW/61TdPWRlQ= github.com/pulumi/pulumi-docker/sdk/v4 v4.5.5/go.mod h1:XZKLFXbw13olxuztlWnmVUPYZp2a+BqzqhuMl0j/Ow8= github.com/pulumi/pulumi-eks/sdk/v2 v2.7.8 h1:NeCKFxyOLpAaG4pJDk7+ewnCuV2IbXR7PggYSNujOno= github.com/pulumi/pulumi-eks/sdk/v2 v2.7.8/go.mod h1:ARGNnIZENIpDUVSX21JEQJKrESj/0u0r0iT61rpb86I= +github.com/pulumi/pulumi-gcp/sdk/v6 v6.67.1 h1:PUH/sUbJmBmHjNFNthJ/dW2+riFuJV0FhrGAwuUuRIg= +github.com/pulumi/pulumi-gcp/sdk/v6 v6.67.1/go.mod h1:OmZeji3dNMwB1qldAlaQfcfJPc2BaZyweVGH7Ej4SJg= github.com/pulumi/pulumi-kubernetes/sdk/v4 v4.17.1 h1:VDX+hu+qK3fbf2FodgG5kfh2h1bHK0FKirW1YqKWkRc= github.com/pulumi/pulumi-kubernetes/sdk/v4 v4.17.1/go.mod h1:e69ohZtUePLLYNLXYgiOWp0FvRGg6ya/3fsq3o00nN0= github.com/pulumi/pulumi-libvirt/sdk v0.4.7 h1:/BBnqqx/Gbg2vINvJxXIVb58THXzw2lSqFqxlRSXH9M= @@ -425,8 +427,8 @@ github.com/pulumi/pulumi-random/sdk/v4 v4.16.3 h1:nlN42MRSIuDh5Pc5nLq4b0lwZaX2ZU github.com/pulumi/pulumi-random/sdk/v4 v4.16.3/go.mod h1:yRfWJSLEAVZvkwgXajr3S9OmFkAZTxfO44Ef2HfixXQ= github.com/pulumi/pulumi-tls/sdk/v4 v4.11.1 h1:tXemWrzeVTqG8zq6hBdv1TdPFXjgZ+dob63a/6GlF1o= github.com/pulumi/pulumi-tls/sdk/v4 v4.11.1/go.mod h1:hODo3iEmmXDFOXqPK+V+vwI0a3Ww7BLjs5Tgamp86Ng= -github.com/pulumi/pulumi/sdk/v3 v3.130.0 h1:gGJNd+akPqhZ+vrsZmAjSNJn6kGJkitjjkwrmIQMmn8= -github.com/pulumi/pulumi/sdk/v3 v3.130.0/go.mod h1:p1U24en3zt51agx+WlNboSOV8eLlPWYAkxMzVEXKbnY= +github.com/pulumi/pulumi/sdk/v3 v3.131.0 h1:w6+XFt4ajz7ZEoCBFo+oMmrQ4DYYBKtzuj/zBe/uyoo= +github.com/pulumi/pulumi/sdk/v3 v3.131.0/go.mod h1:J5kQEX8v87aeUhk6NdQXnjCo1DbiOnOiL3Sf2DuDda8= github.com/pulumiverse/pulumi-time/sdk v0.0.17 h1:JNYVLglXeMAjyD3upIwKZ9o7MnNo7kc3FVsgxs7bc+A= github.com/pulumiverse/pulumi-time/sdk v0.0.17/go.mod h1:NUa1zA74DF002WrM6iF111A6UjX9knPpXufVRvBwNyg= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= @@ -479,8 +481,6 @@ github.com/texttheater/golang-levenshtein v1.0.1 h1:+cRNoVrfiwufQPhoMzB6N0Yf/Mqa github.com/texttheater/golang-levenshtein v1.0.1/go.mod h1:PYAKrbF5sAiq9wd+H82hs7gNaen0CplQ9uvm6+enD/8= github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0= github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw= -github.com/tweekmonster/luser v0.0.0-20161003172636-3fa38070dbd7 h1:X9dsIWPuuEJlPX//UmRKophhOKCGXc46RVIGuttks68= -github.com/tweekmonster/luser v0.0.0-20161003172636-3fa38070dbd7/go.mod h1:UxoP3EypF8JfGEjAII8jx1q8rQyDnX8qdTCs/UQBVIE= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= From 1f82898da42118b177d189e6aea4276d7de0f922 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 12:26:31 +0000 Subject: [PATCH 103/128] Bump golang.org/x/arch from 0.9.0 to 0.10.0 (#29165) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 505850e83ed4c..7970eb5ca4f38 100644 --- a/go.mod +++ b/go.mod @@ -301,7 +301,7 @@ require ( go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 go4.org/netipx v0.0.0-20220812043211-3cc044ffd68d - golang.org/x/arch v0.9.0 + golang.org/x/arch v0.10.0 golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa golang.org/x/net v0.29.0 golang.org/x/sync v0.8.0 diff --git a/go.sum b/go.sum index 4f35200c1a2c2..b2ad5c0549b99 100644 --- a/go.sum +++ b/go.sum @@ -2819,8 +2819,8 @@ go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2/go.mod h1: go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6 h1:lGdhQUN/cnWdSH3291CUuxSEqc+AsGTiDxPP3r2J0l4= go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= golang.org/x/arch v0.0.0-20190927153633-4e8777c89be4/go.mod h1:flIaEI6LNU6xOCD5PaJvn9wGP0agmIOqjrtsKGRguv4= -golang.org/x/arch v0.9.0 h1:ub9TgUInamJ8mrZIGlBG6/4TqWeMszd4N8lNorbrr6k= -golang.org/x/arch v0.9.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= +golang.org/x/arch v0.10.0 h1:S3huipmSclq3PJMNe76NGwkBR504WFkQ5dhzWzP8ZW8= +golang.org/x/arch v0.10.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181127143415-eb0de9b17e85/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= From 31d57c7ad1c59166cd6036b9793780c7270d6827 Mon Sep 17 00:00:00 2001 From: "agent-platform-auto-pr[bot]" <153269286+agent-platform-auto-pr[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 12:26:35 +0000 Subject: [PATCH 104/128] [omnibus][automated] Bump OMNIBUS_SOFTWARE_VERSION (#28855) Co-authored-by: chouquette --- release.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/release.json b/release.json index d9207c7ee2a8d..34d4e70efb1cd 100644 --- a/release.json +++ b/release.json @@ -7,7 +7,7 @@ }, "nightly": { "INTEGRATIONS_CORE_VERSION": "master", - "OMNIBUS_SOFTWARE_VERSION": "b479a9f6605bf3c28284829608fd6365d95c11f5", + "OMNIBUS_SOFTWARE_VERSION": "5d4f6995c19b604d7fc876446e4350ce52b235fb", "OMNIBUS_RUBY_VERSION": "f3fc847e03ba7081e266b2d333210ba129128a14", "JMXFETCH_VERSION": "0.49.3", "JMXFETCH_HASH": "258085a94d529a6bdf914db36dd50faf6fde2cebc44b1f54a60eb209a5d8917c", @@ -26,7 +26,7 @@ }, "nightly-a7": { "INTEGRATIONS_CORE_VERSION": "master", - "OMNIBUS_SOFTWARE_VERSION": "b479a9f6605bf3c28284829608fd6365d95c11f5", + "OMNIBUS_SOFTWARE_VERSION": "5d4f6995c19b604d7fc876446e4350ce52b235fb", "OMNIBUS_RUBY_VERSION": "f3fc847e03ba7081e266b2d333210ba129128a14", "JMXFETCH_VERSION": "0.49.3", "JMXFETCH_HASH": "258085a94d529a6bdf914db36dd50faf6fde2cebc44b1f54a60eb209a5d8917c", From 9610ef9ed94a362b9319819a03e325e76708a14e Mon Sep 17 00:00:00 2001 From: David Ortiz Date: Tue, 10 Sep 2024 14:27:54 +0200 Subject: [PATCH 105/128] [ksm] Init a single kubelet reflector for all pod collectors (#29145) --- pkg/kubestatemetrics/builder/builder.go | 30 ++- pkg/kubestatemetrics/builder/kubelet_pods.go | 92 ++++++-- .../builder/kubelet_pods_stub.go | 19 +- .../builder/kubelet_pods_test.go | 200 +++++++++++------- 4 files changed, 247 insertions(+), 94 deletions(-) diff --git a/pkg/kubestatemetrics/builder/builder.go b/pkg/kubestatemetrics/builder/builder.go index 148318bc1d6fb..56fa8ff3e0c19 100644 --- a/pkg/kubestatemetrics/builder/builder.go +++ b/pkg/kubestatemetrics/builder/builder.go @@ -50,6 +50,7 @@ type Builder struct { collectPodsFromKubelet bool collectOnlyUnassignedPods bool + KubeletReflector *kubeletReflector } // New returns new Builder instance @@ -161,7 +162,17 @@ func (b *Builder) Build() metricsstore.MetricsWriterList { // BuildStores initializes and registers all enabled stores. // It returns metric cache stores. func (b *Builder) BuildStores() [][]cache.Store { - return b.ksmBuilder.BuildStores() + stores := b.ksmBuilder.BuildStores() + + if b.KubeletReflector != nil { + // Starting the reflector here allows us to start just one for all stores. + err := b.KubeletReflector.start(b.ctx) + if err != nil { + log.Errorf("Failed to start the kubelet reflector: %s", err) + } + } + + return stores } // WithResync is used if a resync period is configured @@ -302,7 +313,22 @@ func (c *cacheEnabledListerWatcher) List(options v1.ListOptions) (runtime.Object func handlePodCollection[T any](b *Builder, store cache.Store, client T, listWatchFunc func(kubeClient T, ns string, fieldSelector string) cache.ListerWatcher, namespace string, useAPIServerCache bool) { if b.collectPodsFromKubelet { - b.startKubeletPodWatcher(store, namespace) + if b.KubeletReflector == nil { + kr, err := newKubeletReflector(b.namespaces) + if err != nil { + log.Errorf("Failed to create kubeletReflector: %s", err) + return + } + b.KubeletReflector = &kr + } + + err := b.KubeletReflector.addStore(store) + if err != nil { + log.Errorf("Failed to add store to kubeletReflector: %s", err) + return + } + + // The kubelet reflector will be started when all stores are added. return } diff --git a/pkg/kubestatemetrics/builder/kubelet_pods.go b/pkg/kubestatemetrics/builder/kubelet_pods.go index ce7af8ce6683c..c0a50018c110a 100644 --- a/pkg/kubestatemetrics/builder/kubelet_pods.go +++ b/pkg/kubestatemetrics/builder/kubelet_pods.go @@ -10,6 +10,7 @@ package builder import ( "context" "fmt" + "slices" "strings" "time" @@ -22,57 +23,107 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/log" ) -// PodWatcher is an interface for a component that watches for changes in pods -type PodWatcher interface { +const ( + podWatcherExpiryDuration = 15 * time.Second + updateStoresPeriod = 5 * time.Second +) + +// podWatcher is an interface for a component that watches for changes in pods +type podWatcher interface { PullChanges(ctx context.Context) ([]*kubelet.Pod, error) Expire() ([]string, error) } -func (b *Builder) startKubeletPodWatcher(store cache.Store, namespace string) { - podWatcher, err := kubelet.NewPodWatcher(15 * time.Second) +type kubeletReflector struct { + namespaces []string + watchAllNamespaces bool + podWatcher podWatcher + + // Having an array of stores allows us to have a single watcher for all the + // collectors configured (by default it's the pods one plus "pods_extended") + stores []cache.Store + + started bool +} + +func newKubeletReflector(namespaces []string) (kubeletReflector, error) { + watcher, err := kubelet.NewPodWatcher(podWatcherExpiryDuration) if err != nil { - log.Warnf("Failed to create pod watcher: %s", err) + return kubeletReflector{}, fmt.Errorf("failed to create kubelet-based reflector: %w", err) + } + + watchAllNamespaces := slices.Contains(namespaces, corev1.NamespaceAll) + + return kubeletReflector{ + namespaces: namespaces, + watchAllNamespaces: watchAllNamespaces, + podWatcher: watcher, + }, nil +} + +func (kr *kubeletReflector) addStore(store cache.Store) error { + if kr.started { + return fmt.Errorf("cannot add store after reflector has started") } - ticker := time.NewTicker(5 * time.Second) + kr.stores = append(kr.stores, store) + + return nil +} + +// start starts the reflector. It should be called only once after all the +// stores have been added +func (kr *kubeletReflector) start(context context.Context) error { + if kr.started { + return fmt.Errorf("reflector already started") + } + + kr.started = true + + ticker := time.NewTicker(updateStoresPeriod) go func() { for { select { case <-ticker.C: - err = updateStore(b.ctx, store, podWatcher, namespace) + err := kr.updateStores(context) if err != nil { - log.Errorf("Failed to update store: %s", err) + log.Errorf("Failed to update stores: %s", err) } - case <-b.ctx.Done(): + case <-context.Done(): ticker.Stop() return } } }() + + return nil } -func updateStore(ctx context.Context, store cache.Store, podWatcher PodWatcher, namespace string) error { - pods, err := podWatcher.PullChanges(ctx) +func (kr *kubeletReflector) updateStores(ctx context.Context) error { + pods, err := kr.podWatcher.PullChanges(ctx) if err != nil { return fmt.Errorf("failed to pull changes from pod watcher: %w", err) } for _, pod := range pods { - if namespace != corev1.NamespaceAll && pod.Metadata.Namespace != namespace { + if !kr.watchAllNamespaces && !slices.Contains(kr.namespaces, pod.Metadata.Namespace) { continue } kubePod := kubelet.ConvertKubeletPodToK8sPod(pod) - err = store.Add(kubePod) - if err != nil { - log.Warnf("Failed to add pod to KSM store: %s", err) + for _, store := range kr.stores { + err := store.Add(kubePod) + if err != nil { + // log instead of returning error to continue updating other stores + log.Warnf("Failed to add pod to store: %s", err) + } } } - expiredEntities, err := podWatcher.Expire() + expiredEntities, err := kr.podWatcher.Expire() if err != nil { return fmt.Errorf("failed to expire pods: %w", err) } @@ -91,9 +142,12 @@ func updateStore(ctx context.Context, store cache.Store, podWatcher PodWatcher, }, } - err = store.Delete(&expiredPod) - if err != nil { - log.Warnf("Failed to delete pod from KSM store: %s", err) + for _, store := range kr.stores { + err := store.Delete(&expiredPod) + if err != nil { + // log instead of returning error to continue updating other stores + log.Warnf("Failed to delete pod from store: %s", err) + } } } diff --git a/pkg/kubestatemetrics/builder/kubelet_pods_stub.go b/pkg/kubestatemetrics/builder/kubelet_pods_stub.go index b4da17ab6227d..7682655232056 100644 --- a/pkg/kubestatemetrics/builder/kubelet_pods_stub.go +++ b/pkg/kubestatemetrics/builder/kubelet_pods_stub.go @@ -8,9 +8,24 @@ package builder import ( + "context" + "k8s.io/client-go/tools/cache" ) -func (b *Builder) startKubeletPodWatcher(_ cache.Store, _ string) { - // Do nothing +// When the Kubelet flag is not set, we don't need a kubeletReflector, so we can +// return a struct that does nothing + +type kubeletReflector struct{} + +func newKubeletReflector(_ []string) (kubeletReflector, error) { + return kubeletReflector{}, nil +} + +func (kr *kubeletReflector) addStore(_ cache.Store) error { + return nil +} + +func (kr *kubeletReflector) start(_ context.Context) error { + return nil } diff --git a/pkg/kubestatemetrics/builder/kubelet_pods_test.go b/pkg/kubestatemetrics/builder/kubelet_pods_test.go index 94f5f26a798ee..a9020b2143549 100644 --- a/pkg/kubestatemetrics/builder/kubelet_pods_test.go +++ b/pkg/kubestatemetrics/builder/kubelet_pods_test.go @@ -9,10 +9,11 @@ package builder import ( "context" + "slices" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -83,83 +84,140 @@ func (m *MockStore) Resync() error { return nil } -func TestUpdateStore_AddPodToStore(t *testing.T) { - store := new(MockStore) - podWatcher := new(MockPodWatcher) - - kubeletPod := &kubelet.Pod{ - Metadata: kubelet.PodMetadata{ - Name: "test-pod", - Namespace: "default", - UID: "12345", +func TestUpdateStores_AddPods(t *testing.T) { + tests := []struct { + name string + reflectorNamespaces []string + addedPodNamespace string + podShouldBeAdded bool + }{ + { + name: "add pod in watched namespace", + reflectorNamespaces: []string{"default"}, + addedPodNamespace: "default", + podShouldBeAdded: true, }, - } - - kubernetesPod := kubelet.ConvertKubeletPodToK8sPod(kubeletPod) - - podWatcher.On("PullChanges", mock.Anything).Return([]*kubelet.Pod{kubeletPod}, nil) - podWatcher.On("Expire").Return([]string{}, nil) - store.On("Add", kubernetesPod).Return(nil) - - err := updateStore(context.TODO(), store, podWatcher, "default") - assert.NoError(t, err) - - store.AssertCalled(t, "Add", kubernetesPod) -} - -func TestUpdateStore_FilterPodsByNamespace(t *testing.T) { - store := new(MockStore) - podWatcher := new(MockPodWatcher) - - kubeletPod := &kubelet.Pod{ - Metadata: kubelet.PodMetadata{ - Name: "test-pod", - Namespace: "other-namespace", - UID: "12345", + { + name: "add pod in non-watched namespace", + reflectorNamespaces: []string{"default"}, + addedPodNamespace: "other-namespace", + podShouldBeAdded: false, + }, + { + name: "reflector watches all pods", + reflectorNamespaces: []string{corev1.NamespaceAll}, + addedPodNamespace: "default", + podShouldBeAdded: true, }, } - store.On("Add", mock.Anything).Return(nil) - podWatcher.On("PullChanges", mock.Anything).Return([]*kubelet.Pod{kubeletPod}, nil) - podWatcher.On("Expire").Return([]string{}, nil) - - err := updateStore(context.TODO(), store, podWatcher, "default") - assert.NoError(t, err) - - // Add() shouldn't be called because the pod is in a different namespace - store.AssertNotCalled(t, "Add", mock.Anything) + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + stores := []*MockStore{ + new(MockStore), + new(MockStore), + } + for _, store := range stores { + store.On("Add", mock.Anything).Return(nil) + } + + watcher := new(MockPodWatcher) + + kubeletPod := &kubelet.Pod{ + Metadata: kubelet.PodMetadata{ + Namespace: test.addedPodNamespace, + Name: "test-pod", + UID: "12345", + }, + } + + kubernetesPod := kubelet.ConvertKubeletPodToK8sPod(kubeletPod) + + watcher.On("PullChanges", mock.Anything).Return([]*kubelet.Pod{kubeletPod}, nil) + watcher.On("Expire").Return([]string{}, nil) + + reflector := kubeletReflector{ + namespaces: test.reflectorNamespaces, + watchAllNamespaces: slices.Contains(test.reflectorNamespaces, corev1.NamespaceAll), + podWatcher: watcher, + } + + for _, store := range stores { + err := reflector.addStore(store) + require.NoError(t, err) + } + + err := reflector.updateStores(context.TODO()) + require.NoError(t, err) + + if test.podShouldBeAdded { + for _, store := range stores { + store.AssertCalled(t, "Add", kubernetesPod) + } + } else { + for _, store := range stores { + store.AssertNotCalled(t, "Add", mock.Anything) + } + } + }) + } } -func TestUpdateStore_HandleExpiredPods(t *testing.T) { - store := new(MockStore) - podWatcher := new(MockPodWatcher) - podUID := "kubernetes_pod://pod-12345" - kubernetesPod := corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - UID: types.UID("pod-12345"), +func TestUpdateStores_HandleExpired(t *testing.T) { + tests := []struct { + name string + expiredUID string + expectedPodToBeDeleted *corev1.Pod + }{ + { + name: "expired pod", + expiredUID: "kubernetes_pod://pod-12345", + expectedPodToBeDeleted: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + UID: types.UID("pod-12345"), + }, + }, + }, + { + name: "expired container", + expiredUID: "container-12345", + expectedPodToBeDeleted: nil, }, } - podWatcher.On("PullChanges", mock.Anything).Return([]*kubelet.Pod{}, nil) - podWatcher.On("Expire").Return([]string{podUID}, nil) - store.On("Delete", &kubernetesPod).Return(nil) - - err := updateStore(context.TODO(), store, podWatcher, "default") - assert.NoError(t, err) - - store.AssertCalled(t, "Delete", &kubernetesPod) -} - -func TestUpdateStore_HandleExpiredContainers(t *testing.T) { - store := new(MockStore) - podWatcher := new(MockPodWatcher) - - podWatcher.On("PullChanges", mock.Anything).Return([]*kubelet.Pod{}, nil) - podWatcher.On("Expire").Return([]string{"container-12345"}, nil) - - err := updateStore(context.TODO(), store, podWatcher, "default") - assert.NoError(t, err) - - // Delete() shouldn't be called because the expired entity is not a pod - store.AssertNotCalled(t, "Delete", mock.Anything) + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + stores := []*MockStore{ + new(MockStore), + new(MockStore), + } + for _, store := range stores { + store.On("Delete", mock.Anything).Return(nil) + } + + watcher := new(MockPodWatcher) + watcher.On("PullChanges", mock.Anything).Return([]*kubelet.Pod{}, nil) + watcher.On("Expire").Return([]string{test.expiredUID}, nil) + + reflector := kubeletReflector{ + namespaces: []string{"default"}, + podWatcher: watcher, + } + for _, store := range stores { + err := reflector.addStore(store) + require.NoError(t, err) + } + + err := reflector.updateStores(context.TODO()) + require.NoError(t, err) + + for _, store := range stores { + if test.expectedPodToBeDeleted != nil { + store.AssertCalled(t, "Delete", test.expectedPodToBeDeleted) + } else { + store.AssertNotCalled(t, "Delete", mock.Anything) + } + } + }) + } } From 14a3a914a432b8e06c59208852f18d2fdd819e6b Mon Sep 17 00:00:00 2001 From: Mackenzie <63265430+mackjmr@users.noreply.github.com> Date: Tue, 10 Sep 2024 15:17:24 +0200 Subject: [PATCH 106/128] Exclude ancostas and maascamp from OpenTelemetry QA (#29206) --- .ddqa/config.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/.ddqa/config.toml b/.ddqa/config.toml index 02913ac890130..e24656da4b464 100644 --- a/.ddqa/config.toml +++ b/.ddqa/config.toml @@ -78,6 +78,7 @@ jira_issue_type = "Task" jira_statuses = ["To Do", "In Progress", "Done"] github_team = "opentelemetry" github_labels = ["team/opentelemetry"] +exclude_members = ["ancostas", "Maascamp"] [teams."eBPF Platform"] jira_project = "EBPF" From 9a66e670047ed6423e7dde0f54a6646215e203bf Mon Sep 17 00:00:00 2001 From: Paul Cacheux Date: Tue, 10 Sep 2024 16:15:28 +0200 Subject: [PATCH 107/128] bump `github.com/DataDog/agent-payload` to v5.0.132 (#29210) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 7970eb5ca4f38..974179601f42e 100644 --- a/go.mod +++ b/go.mod @@ -610,7 +610,7 @@ require ( require ( github.com/DATA-DOG/go-sqlmock v1.5.2 - github.com/DataDog/agent-payload/v5 v5.0.130 + github.com/DataDog/agent-payload/v5 v5.0.132 github.com/DataDog/datadog-agent/cmd/agent/common/path v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/api/api/def v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/core/config v0.56.0-rc.3 diff --git a/go.sum b/go.sum index b2ad5c0549b99..0ec259f7bbf6d 100644 --- a/go.sum +++ b/go.sum @@ -685,8 +685,8 @@ github.com/CycloneDX/cyclonedx-go v0.8.0 h1:FyWVj6x6hoJrui5uRQdYZcSievw3Z32Z88uY github.com/CycloneDX/cyclonedx-go v0.8.0/go.mod h1:K2bA+324+Og0X84fA8HhN2X066K7Bxz4rpMQ4ZhjtSk= github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= -github.com/DataDog/agent-payload/v5 v5.0.130 h1:pVMRVKkUMmw2vOpmP92TO9jrS0om3K0uKteXHcy/6v0= -github.com/DataDog/agent-payload/v5 v5.0.130/go.mod h1:FgVQKmVdqdmZTbxIptqJC/l+xEzdiXsaAOs/vGAvWzs= +github.com/DataDog/agent-payload/v5 v5.0.132 h1:F9wy+iyAgN2QmkEsOlPp3RrQ4vOb4T6k3BXhjSpELS4= +github.com/DataDog/agent-payload/v5 v5.0.132/go.mod h1:FgVQKmVdqdmZTbxIptqJC/l+xEzdiXsaAOs/vGAvWzs= github.com/DataDog/appsec-internal-go v1.7.0 h1:iKRNLih83dJeVya3IoUfK+6HLD/hQsIbyBlfvLmAeb0= github.com/DataDog/appsec-internal-go v1.7.0/go.mod h1:wW0cRfWBo4C044jHGwYiyh5moQV2x0AhnwqMuiX7O/g= github.com/DataDog/aptly v1.5.3 h1:oLsRvjuXSVM4ia0N83dU3KiQeiJ6BaszYbTZOkSfDlw= From b0cd32eea2236be3f33490de00cdd88616b027b3 Mon Sep 17 00:00:00 2001 From: Lucas Liseth <36653792+soberpeach@users.noreply.github.com> Date: Tue, 10 Sep 2024 10:49:31 -0400 Subject: [PATCH 108/128] [AMLII-2015] Check agent ready before logging to it (#29159) --- .../linux-log/journald/journald_tailing_test.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/test/new-e2e/tests/agent-metrics-logs/log-agent/linux-log/journald/journald_tailing_test.go b/test/new-e2e/tests/agent-metrics-logs/log-agent/linux-log/journald/journald_tailing_test.go index 23aaedb2f92bd..14b22696bf37f 100644 --- a/test/new-e2e/tests/agent-metrics-logs/log-agent/linux-log/journald/journald_tailing_test.go +++ b/test/new-e2e/tests/agent-metrics-logs/log-agent/linux-log/journald/journald_tailing_test.go @@ -82,8 +82,13 @@ func (s *LinuxJournaldFakeintakeSuite) journaldLogCollection() { _, err := s.Env().RemoteHost.Execute("sudo usermod -a -G systemd-journal dd-agent") require.NoErrorf(t, err, "Unable to adjust permissions for dd-agent user: %s", err) - // Restart agent - s.Env().RemoteHost.Execute("sudo systemctl restart datadog-agent") + // Restart agent and make sure it's ready before adding logs + _, err = s.Env().RemoteHost.Execute("sudo systemctl restart datadog-agent") + assert.NoErrorf(t, err, "Failed to restart the agent: %s", err) + s.EventuallyWithT(func(_ *assert.CollectT) { + agentReady := s.Env().Agent.Client.IsReady() + assert.True(t, agentReady) + }, 1*time.Minute, 5*time.Second, "Agent was not ready") // Generate log appendJournaldLog(s, "hello-world", 1) From d6dc8d33bf52d21b942a73fa05c44c74f0d80812 Mon Sep 17 00:00:00 2001 From: Vincent Whitchurch Date: Tue, 10 Sep 2024 16:53:04 +0200 Subject: [PATCH 109/128] discovery: Normalize service name (#29151) Co-authored-by: Guy Arbitman --- .../module/impl_linux_test.go | 8 +++-- .../servicediscovery/service_detector.go | 21 ++++++++++-- .../servicediscovery/service_detector_test.go | 32 +++++++++++++++++++ .../tls/nodejs/testdata/package.json | 2 +- 4 files changed, 57 insertions(+), 6 deletions(-) create mode 100644 pkg/collector/corechecks/servicediscovery/service_detector_test.go diff --git a/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go b/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go index 2b37036a8f9d8..ba93cf883c933 100644 --- a/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go +++ b/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go @@ -335,7 +335,7 @@ func TestServiceName(t *testing.T) { cmd := exec.CommandContext(ctx, "sleep", "1000") cmd.Dir = "/tmp/" cmd.Env = append(cmd.Env, "OTHER_ENV=test") - cmd.Env = append(cmd.Env, "DD_SERVICE=foobar") + cmd.Env = append(cmd.Env, "DD_SERVICE=foo😀bar") cmd.Env = append(cmd.Env, "YET_OTHER_ENV=test") err = cmd.Start() require.NoError(t, err) @@ -346,7 +346,8 @@ func TestServiceName(t *testing.T) { require.EventuallyWithT(t, func(collect *assert.CollectT) { portMap := getServicesMap(t, url) assert.Contains(collect, portMap, pid) - assert.Equal(t, "foobar", portMap[pid].DDService) + // Non-ASCII character removed due to normalization. + assert.Equal(t, "foo_bar", portMap[pid].DDService) assert.Equal(t, portMap[pid].DDService, portMap[pid].Name) assert.Equal(t, "sleep", portMap[pid].GeneratedName) assert.False(t, portMap[pid].DDServiceInjected) @@ -644,7 +645,8 @@ func TestNodeDocker(t *testing.T) { require.EventuallyWithT(t, func(collect *assert.CollectT) { svcMap := getServicesMap(t, url) assert.Contains(collect, svcMap, pid) - assert.Equal(collect, "nodejs-https-server", svcMap[pid].GeneratedName) + // test@... changed to test_... due to normalization. + assert.Equal(collect, "test_nodejs-https-server", svcMap[pid].GeneratedName) assert.Equal(collect, svcMap[pid].GeneratedName, svcMap[pid].Name) assert.Equal(collect, "provided", svcMap[pid].APMInstrumentation) assertStat(collect, svcMap[pid]) diff --git a/pkg/collector/corechecks/servicediscovery/service_detector.go b/pkg/collector/corechecks/servicediscovery/service_detector.go index a491837c1f7f0..f403ec3bbde4a 100644 --- a/pkg/collector/corechecks/servicediscovery/service_detector.go +++ b/pkg/collector/corechecks/servicediscovery/service_detector.go @@ -11,6 +11,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/language" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/usm" + "github.com/DataDog/datadog-agent/pkg/trace/traceutil" ) // ServiceMetadata stores metadata about a service. @@ -41,11 +42,27 @@ func makeFinalName(meta usm.ServiceMetadata) string { return name } +// fixupMetadata performs additional adjustments on the meta data returned from +// the meta data extraction library. +func fixupMetadata(meta usm.ServiceMetadata, lang language.Language) usm.ServiceMetadata { + meta.Name = makeFinalName(meta) + + langName := "" + if lang != language.Unknown { + langName = string(lang) + } + meta.Name, _ = traceutil.NormalizeService(meta.Name, langName) + if meta.DDService != "" { + meta.DDService, _ = traceutil.NormalizeService(meta.DDService, langName) + } + + return meta +} + // GetServiceName gets the service name based on the command line arguments and // the list of environment variables. func GetServiceName(cmdline []string, env map[string]string, root string, lang language.Language, contextMap usm.DetectorContextMap) usm.ServiceMetadata { fs := usm.NewSubDirFS(root) meta, _ := usm.ExtractServiceMetadata(cmdline, env, fs, lang, contextMap) - meta.Name = makeFinalName(meta) - return meta + return fixupMetadata(meta, lang) } diff --git a/pkg/collector/corechecks/servicediscovery/service_detector_test.go b/pkg/collector/corechecks/servicediscovery/service_detector_test.go new file mode 100644 index 0000000000000..5f250161237c4 --- /dev/null +++ b/pkg/collector/corechecks/servicediscovery/service_detector_test.go @@ -0,0 +1,32 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +package servicediscovery + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/language" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/usm" +) + +func TestFixup(t *testing.T) { + meta := fixupMetadata(usm.ServiceMetadata{Name: "fOo", DDService: "BAR"}, language.Go) + assert.Equal(t, meta.Name, "foo") + assert.Equal(t, meta.DDService, "bar") + + meta = fixupMetadata(usm.ServiceMetadata{Name: ""}, language.Go) + assert.Equal(t, meta.Name, "unnamed-go-service") + assert.Equal(t, meta.DDService, "") + + meta = fixupMetadata(usm.ServiceMetadata{Name: ""}, language.Unknown) + assert.Equal(t, meta.Name, "unnamed-service") + assert.Equal(t, meta.DDService, "") + + meta = fixupMetadata(usm.ServiceMetadata{Name: "foo", AdditionalNames: []string{"bar", "baz"}}, language.Go) + assert.Equal(t, meta.Name, "foo-bar-baz") +} diff --git a/pkg/network/protocols/tls/nodejs/testdata/package.json b/pkg/network/protocols/tls/nodejs/testdata/package.json index d544e62306e42..18ab31594327b 100644 --- a/pkg/network/protocols/tls/nodejs/testdata/package.json +++ b/pkg/network/protocols/tls/nodejs/testdata/package.json @@ -1,5 +1,5 @@ { - "name": "nodejs-https-server", + "name": "test@nodejs-https-server", "dependencies": { "dd-trace": "^5.21.0" } From 3397d2a7a22ba8b6639e4244d15d8d5c3ea7040a Mon Sep 17 00:00:00 2001 From: "agent-platform-auto-pr[bot]" <153269286+agent-platform-auto-pr[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 14:54:20 +0000 Subject: [PATCH 110/128] [Backport main] [Backport 7.58.x] Changelog updates for 7.57.0 release (#29153) Co-authored-by: FlorentClarret --- CHANGELOG-DCA.rst | 52 ++++++++++++ CHANGELOG.rst | 203 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 255 insertions(+) diff --git a/CHANGELOG-DCA.rst b/CHANGELOG-DCA.rst index 7a45c65b38675..786b6903a4c0d 100644 --- a/CHANGELOG-DCA.rst +++ b/CHANGELOG-DCA.rst @@ -2,6 +2,58 @@ Release Notes ============= +.. _Release Notes_7.57.0: + +7.57.0 +====== + +.. _Release Notes_7.57.0_Prelude: + +Prelude +------- + +Released on: 2024-09-09 +Pinned to datadog-agent v7.57.0: `CHANGELOG `_. + +.. _Release Notes_7.57.0_New Features: + +New Features +------------ + +- The Cluster Agent now supports activating Continuous Profiling + using Admission Controller. + +- ``LimitRange`` and ``StorageClass`` resources are now collected by the orchestrator check. + + +.. _Release Notes_7.57.0_Enhancement Notes: + +Enhancement Notes +----------------- + +- The auto-instrumentation webhook (beta) uses a new injector library. + + +.. _Release Notes_7.57.0_Bug Fixes: + +Bug Fixes +--------- + +- Fixes a rare bug where some Kubernetes events would be emitted + without a timestamp and would be dropped upstream as a result. + +- Library package versions for auto-instrumentation are now set to the latest major + version of the library-package instead of `latest`. + + * java:v1 + * dotnet:v2 + * python:v2 + * ruby:v2 + * js:v5 + +- Fix APIServer error logs generated when external metrics endpoint is activated + + .. _Release Notes_7.56.2: 7.56.2 diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 10ea12b5fa3ac..f0ccf802fe2cd 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,209 @@ Release Notes ============= +.. _Release Notes_7.57.0: + +7.57.0 +====== + +.. _Release Notes_7.57.0_Prelude: + +Prelude +------- + +Release on: 2024-09-09 + +- Please refer to the `7.57.0 tag on integrations-core `_ for the list of changes on the Core Checks + + +.. _Release Notes_7.57.0_Upgrade Notes: + +Upgrade Notes +------------- + +- Update cURL to 8.7.1. + +- Update OpenSSL from 3.0.14 to 3.3.1 (on Linux and macOS). + + +.. _Release Notes_7.57.0_New Features: + +New Features +------------ + +- The `agent diagnose` command now includes a ``--json`` option to output the results in JSON format. + +- Add `integration` value for device metadata. + +- APM: In order to allow for automatic instrumentation to work in Kubernetes + clusters that enforce a ``Restricted`` `Pod Security Standard `_, + which require all containers to explicitly set a ``securityContext``, + an option to configure a `securityContext` to be used for all ``initContainers`` + created by the auto instrumentation has been added. + | This can be done through the ``DD_ADMISSION_CONTROLLER_AUTO_INSTRUMENTATION_INIT_SECURITY_CONTEXT`` + environment value, or ``admission_controller.auto_instrumentation.init_security_context`` configuration - + in both cases a ``json`` string should be supplied. + +- Adds a `kube_runtime_class` tag to metrics associated with Kubernetes + pods and their containers. + +- Expose the Agent's get host tags function to python checks using the new `datadog_agent.get_host_tags` method. + +- Implement static allowlist of Kubernetes events to send by default. + This feature is only enabled when ``filtering_enabled`` is set to + ``true`` in the ``kubernetes_apiserver`` integration configuration. + +- Adds a new launcher to handle incoming logs from integtrations. + +- Add optional reverse DNS enrichment of private IP addresses to NDM NetFlow. + +- On Windows, the default value for the service inference feature is now enabled. + + +.. _Release Notes_7.57.0_Enhancement Notes: + +Enhancement Notes +----------------- + +- Turn on Orchestrator Explorer by default in the core agent + +- Added new source_host tag to TCP/UDP logs to help users understand where their logs came from. + +- Added support to handling UDP/TCP Logs when running the containerized agent. + +- APM: Allow custom HTTP client to be provided when instantiating the + trace-agent configuration. This feature is primarily intended for the + OpenTelemetry exporter. + +- APM: Add default UDS listeners for traces (trace-agent) and + dogstatsd (core-agent) on /var/run/datadog/apm.socket and + /var/run/datadog/dsd.socket, respectively. + These are used in the Single Step APM Instrumentation, improving + the onboarding experience and minimizing the agent configuration. + +- For the [Inferred Service Dependencies beta](https://docs.datadoghq.com/tracing/guide/inferred-service-opt-in/?tab=java), add two new `peer.hostname` precursor attributes, `out.host` and `dns.hostname`. This will improve coverage of inferred services because some tracer integrations only place the peer hostname in one of those attributes. + +- APM stats for internal service overrides are now aggregated by the `_dd.base_service` tag only, enhancing visibility into specific base services. + +- Include spans with `span.kind=consumer` for aggregation of + stats on peer tags. + +- IP address quantization on all peer tags is done the backend during ingestion. This change updates the Agent to apply the same IP address quantization. This reduces unnecessary aggregation that is currently done on raw IP addresses. And therefore, improves the aggregation performance of stats on peer tags. + +- APM: Add new setting to disable the HTTP receiver in the + trace-agent. This setting should almost never be disabled and + is only a convenience parameter for OpenTelemetry extensions. + Disabling the receiver is semantically equivalent to setting the + receiver_port to 0 and receiver_socket to "". + +- Agents are now built with Go ``1.22.6``. + +- [NDM] Adds the option to collect BGP neighbors metrics from Cisco SD-WAN. + +- [NDM] Add option to collect cloud application metrics from Cisco SD-WAN. + +- [Cisco SD-WAN] Allow enabling/disabling metrics collection. + +- Report the hostname of Kubernetes events based on the associated + pod that the event relates to. + +- Introduces a parser to extract tags from integration logs and attach them to outgoing logs. + +- Implement External Data environment variable injection in the Admission Controller. + Format for this new environment variable is `it-INIT_CONTAINER,cn-CONTAINER_NAME,pu-POD_UID`. + This new variable is needed for the New Origin Detection spec. It is used for Origin Detection + in case Local Data are unavailable, for example with Kata Containers and CGroups v2. + +- Upgraded JMXFetch to `0.49.3 `_ which adds support for jsr77 j2ee statistics + and custom ConnectionFactory. See `0.49.3 `_ for more details. + +- Windows Agent Installer gives a better error message when a gMSA + account is provided for ``ddagentuser`` that Windows does not recognize. + +- Uninstalling the Windows Agent MSI Installer removes specific + subdirectories of the install path to help prevent data loss when + ``PROJECTLOCATION`` is misconfigured to an existing directory. + +- Adds a default upper limit of 10000 to the number of network traffic + paths that are captured at a single time. The user can increase or + decrease this limit as needed. + +- Language detection can run on the core Agent without needing a gRPC server. + +- Add Hostname and ExtraTags to `CollectorECSTask`. + +- Collect SystemInfo for Pods and ECS Tasks. + +- Implement API that allows Python checks to send logs for + eventual submission. + +- Users can use ``DD_ORCHESTRATOR_EXPLORER_CUSTOM_SENSITIVE_ANNOTATIONS_LABELS`` to remove sensitive annotations and labels. + For example: ``DD_ORCHESTRATOR_EXPLORER_CUSTOM_SENSITIVE_ANNOTATIONS_LABELS="sensitive-key-1 sensitive-key-2"``. + Keys should be separated by spaces. The agent removes any annotations and labels matching these keys. + +- Add the ability to tag interface metrics with user-defined tags. + + +.. _Release Notes_7.57.0_Security Notes: + +Security Notes +-------------- + +- Fix CVE-2024-41110. + + +.. _Release Notes_7.57.0_Bug Fixes: + +Bug Fixes +--------- + +- Results of `agent config` did not reflect the actual runtime config for the other services. This will have other Datadog Agent services (e.g. trace-agent) running as a systemd service read the same environment variables from a text file `/etc/datadog-agent/environment` as the core Agent process. + +- [DBM] Bump go-sqllexer to 0.0.13 to fix a bug where the table name is incorrectly collected on PostgreSQL SELECT ONLY statement. + +- [Cisco SD-WAN] Do not collect unspecified IP addresses. + +- Fix `container.net.*` metrics accuracy on Linux. Currently `container.net.*` metrics are always emitted with high cardinality tags while the values may not represent actual container-level values but POD-level values (multiple containers in a pod) or host-level values (containers running in host network). With this bug fix, the `container.net.*` metrics aren't emitted for containers running in host network and a single timeseries is emitted by pods when running multiple containers. Finally, in non-Kubernetes environments, if multiple containers share the same network namespace, `container.net.*` metrics won't be emitted. + +- Fix duplicate logging in Process Agent component's Enabled() method. + +- Fixed bug in kubelet check when running in core agent that + was causing `kubernetes.kubelet.container.log_filesystem.used_bytes` + to be reported by the check for excluded/non-existing containers. + The metric was being reported in this case without tags. + This bug does not exist in the python integration version of the + kubelet check. + +- Fixes a bug on Windows in the driver installation custom actions that could prevent rollback from working properly if an installation failed or was canceled. + +- Update pro-bing library to include fix for a Windows specific issue with large ICMP packets + +- [oracle] Fix wrong durations for cloud databases. + +- Stop chunking outputs in manual checks for container, process, and process_discovery checks to allow JSON unmarshaler to parse output. + +- Remove the original pod annotation on consul + +- Fix pod status for pods using native sidecars. + +- Fix a regression where the Agent would fail to start on systems with SysVinit. + +- APM: Fixes issue where the number of HTTP decoders was incorrectly set if setting GOMAXPROCS to milli-cpu values. + + +.. _Release Notes_7.57.0_Other Notes: + +Other Notes +----------- + +- Add metrics origins for vLLM integration. + +- Add deprecation warnings when running process checks on the Process Agent in Linux. + This change prepares for the deprecation of processes and container collection in the Process Agent, occurring in a future release. + +- Add metric origin for the AWS Neuron integration + + .. _Release Notes_7.56.2: 7.56.2 From 9743f2837dce1339934c08c8a631270784bd28eb Mon Sep 17 00:00:00 2001 From: Nicolas Schweitzer Date: Tue, 10 Sep 2024 17:23:32 +0200 Subject: [PATCH 111/128] feat(gitlab): Allow job retry on exit_codes (#29150) --- .gitlab-ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index bd2da974c7098..a813d836841a8 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -49,6 +49,7 @@ include: default: retry: max: 2 + exit_codes: 42 when: - runner_system_failure - stuck_or_timeout_failure From 51e8f68b01ceda5445f52627f9f9fc771d28d342 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 16:40:19 +0000 Subject: [PATCH 112/128] Bump github.com/tinylib/msgp from 1.2.0 to 1.2.1 (#29167) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 974179601f42e..da9bccadf63ab 100644 --- a/go.mod +++ b/go.mod @@ -269,7 +269,7 @@ require ( github.com/streadway/amqp v1.1.0 github.com/stretchr/testify v1.9.0 github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 - github.com/tinylib/msgp v1.2.0 + github.com/tinylib/msgp v1.2.1 github.com/twmb/murmur3 v1.1.8 github.com/uptrace/bun v1.2.1 github.com/uptrace/bun/dialect/pgdialect v1.2.1 diff --git a/go.sum b/go.sum index 0ec259f7bbf6d..07eac2849178d 100644 --- a/go.sum +++ b/go.sum @@ -2496,8 +2496,8 @@ github.com/tidwall/wal v1.1.7 h1:emc1TRjIVsdKKSnpwGBAcsAGg0767SvUk8+ygx7Bb+4= github.com/tidwall/wal v1.1.7/go.mod h1:r6lR1j27W9EPalgHiB7zLJDYu3mzW5BQP5KrzBpYY/E= github.com/tilinna/clock v1.1.0 h1:6IQQQCo6KoBxVudv6gwtY8o4eDfhHo8ojA5dP0MfhSs= github.com/tilinna/clock v1.1.0/go.mod h1:ZsP7BcY7sEEz7ktc0IVy8Us6boDrK8VradlKRUGfOao= -github.com/tinylib/msgp v1.2.0 h1:0uKB/662twsVBpYUPbokj4sTSKhWFKB7LopO2kWK8lY= -github.com/tinylib/msgp v1.2.0/go.mod h1:2vIGs3lcUo8izAATNobrCHevYZC/LMsJtw4JPiYPHro= +github.com/tinylib/msgp v1.2.1 h1:6ypy2qcCznxpP4hpORzhtXyTqrBs7cfM9MCCWY8zsmU= +github.com/tinylib/msgp v1.2.1/go.mod h1:2vIGs3lcUo8izAATNobrCHevYZC/LMsJtw4JPiYPHro= github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= From 525833811038bec9045ac0ac8d1390a8ed32b4a5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 17:50:47 +0000 Subject: [PATCH 113/128] Bump google.golang.org/grpc from 1.66.0 to 1.66.1 (#29163) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index da9bccadf63ab..a024f6d9ea5a8 100644 --- a/go.mod +++ b/go.mod @@ -311,7 +311,7 @@ require ( golang.org/x/tools v0.25.0 golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 google.golang.org/genproto v0.0.0-20240617180043-68d350f18fd4 // indirect - google.golang.org/grpc v1.66.0 + google.golang.org/grpc v1.66.1 google.golang.org/grpc/examples v0.0.0-20221020162917-9127159caf5a google.golang.org/protobuf v1.34.2 gopkg.in/DataDog/dd-trace-go.v1 v1.67.0 diff --git a/go.sum b/go.sum index 07eac2849178d..cc9e6112e673b 100644 --- a/go.sum +++ b/go.sum @@ -3620,8 +3620,8 @@ google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5v google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= -google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c= -google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= +google.golang.org/grpc v1.66.1 h1:hO5qAXR19+/Z44hmvIM4dQFMSYX9XcWsByfoxutBpAM= +google.golang.org/grpc v1.66.1/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/examples v0.0.0-20221020162917-9127159caf5a h1:p51n6zkL483uumoZhCSGtHCem9kDeU05G5jX/wYI9gw= google.golang.org/grpc/examples v0.0.0-20221020162917-9127159caf5a/go.mod h1:gxndsbNG1n4TZcHGgsYEfVGnTxqfEdfiDv6/DADXX9o= From c041737e28af82273a5aeefee3a77f9552111973 Mon Sep 17 00:00:00 2001 From: Nicolas Schweitzer Date: Tue, 10 Sep 2024 19:50:51 +0200 Subject: [PATCH 114/128] feat(secrets): Rename the wrapper script as secrets might come from vault as well (#29138) --- .gitlab/.pre/cancel-prev-pipelines.yml | 2 +- .gitlab/.pre/test_gitlab_configuration.yml | 4 +- .gitlab/choco_deploy/choco_deploy.yml | 2 +- .../container_publish_job_templates.yml | 2 +- .gitlab/common/shared.yml | 22 +++++----- .gitlab/container_build/docker_linux.yml | 4 +- .gitlab/container_build/fakeintake.yml | 4 +- .gitlab/deploy_packages/winget.yml | 2 +- .gitlab/e2e/e2e.yml | 18 ++++---- .gitlab/e2e_install_packages/common.yml | 6 +-- .gitlab/e2e_k8s/e2e_k8s.yml | 12 +++--- .../functional_test/regression_detector.yml | 10 ++--- .../install_script_testing.yml | 2 +- .gitlab/integration_test/windows.yml | 2 +- .../internal_image_deploy.yml | 8 ++-- .../internal_kubernetes_deploy.yml | 2 +- .../rc_kubernetes_deploy.yml | 2 +- .gitlab/kernel_matrix_testing/common.yml | 26 ++++++------ .../kernel_matrix_testing/security_agent.yml | 2 +- .../kernel_matrix_testing/system_probe.yml | 10 ++--- .gitlab/kitchen_deploy/kitchen_deploy.yml | 8 ++-- .gitlab/maintenance_jobs/docker.yml | 4 +- .gitlab/maintenance_jobs/kitchen.yml | 8 ++-- .gitlab/notify/notify.yml | 22 +++++----- .gitlab/packaging/rpm.yml | 8 ++-- .gitlab/pkg_metrics/pkg_metrics.yml | 2 +- .gitlab/post_rc_build/post_rc_tasks.yml | 2 +- .gitlab/setup/setup.yml | 16 ++++---- .gitlab/source_test/golang_deps_diff.yml | 4 +- .gitlab/source_test/linux.yml | 4 +- .gitlab/trigger_release/trigger_release.yml | 2 +- tasks/linter.py | 41 ++++++++----------- tasks/pipeline.py | 2 +- tasks/unit_tests/linter_tests.py | 19 ++++----- tasks/winbuildscripts/unittests.ps1 | 6 +-- test/kitchen/tasks/clean.sh | 8 ++-- test/kitchen/tasks/run-test-kitchen.sh | 10 ++--- test/kitchen/tasks/show-strays.sh | 8 ++-- tools/ci/docker-login.ps1 | 4 +- ...s_ssm_get_wrapper.ps1 => fetch_secret.ps1} | 0 ...aws_ssm_get_wrapper.sh => fetch_secret.sh} | 0 tools/ci/junit_upload.sh | 4 +- 42 files changed, 158 insertions(+), 166 deletions(-) rename tools/ci/{aws_ssm_get_wrapper.ps1 => fetch_secret.ps1} (100%) rename tools/ci/{aws_ssm_get_wrapper.sh => fetch_secret.sh} (100%) diff --git a/.gitlab/.pre/cancel-prev-pipelines.yml b/.gitlab/.pre/cancel-prev-pipelines.yml index c743ce4b73df7..488820ac33544 100644 --- a/.gitlab/.pre/cancel-prev-pipelines.yml +++ b/.gitlab/.pre/cancel-prev-pipelines.yml @@ -14,5 +14,5 @@ cancel-prev-pipelines: when: never - when: on_success script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN) + - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) - inv pipeline.auto-cancel-previous-pipelines diff --git a/.gitlab/.pre/test_gitlab_configuration.yml b/.gitlab/.pre/test_gitlab_configuration.yml index 85a4dbecfe0cd..1c17aa088a4a6 100644 --- a/.gitlab/.pre/test_gitlab_configuration.yml +++ b/.gitlab/.pre/test_gitlab_configuration.yml @@ -5,7 +5,7 @@ test_gitlab_configuration: rules: - !reference [.on_gitlab_changes] script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_FULL_API_TOKEN) + - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_FULL_API_TOKEN) - inv -e linter.gitlab-ci - inv -e linter.job-change-path - inv -e linter.gitlab-change-paths @@ -19,7 +19,7 @@ test_gitlab_compare_to: - !reference [.on_gitlab_changes] script: - source /root/.bashrc - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_FULL_API_TOKEN) + - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_FULL_API_TOKEN) - !reference [.setup_agent_github_app] - pip install -r tasks/requirements.txt - inv pipeline.compare-to-itself diff --git a/.gitlab/choco_deploy/choco_deploy.yml b/.gitlab/choco_deploy/choco_deploy.yml index f8ace52c393cc..715387a08f961 100644 --- a/.gitlab/choco_deploy/choco_deploy.yml +++ b/.gitlab/choco_deploy/choco_deploy.yml @@ -11,7 +11,7 @@ publish_choco_7_x64: ARCH: "x64" before_script: - $tmpfile = [System.IO.Path]::GetTempFileName() - - (& "$CI_PROJECT_DIR\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:CHOCOLATEY_API_KEY" "$tmpfile") + - (& "$CI_PROJECT_DIR\tools\ci\fetch_secret.ps1" "$Env:CHOCOLATEY_API_KEY" "$tmpfile") - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } - $chocolateyApiKey=$(cat "$tmpfile") - Remove-Item "$tmpfile" diff --git a/.gitlab/common/container_publish_job_templates.yml b/.gitlab/common/container_publish_job_templates.yml index c59618528f045..e87bc2d37860e 100644 --- a/.gitlab/common/container_publish_job_templates.yml +++ b/.gitlab/common/container_publish_job_templates.yml @@ -13,7 +13,7 @@ IMG_VARIABLES: "" IMG_SIGNING: "" script: # We can't use the 'trigger' keyword on manual jobs, otherwise they can't be run if the pipeline fails and is retried - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN) + - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) - | if [[ "$BUCKET_BRANCH" == "nightly" && ( "$IMG_SOURCES" =~ "$SRC_AGENT" || "$IMG_SOURCES" =~ "$SRC_DCA" || "$IMG_SOURCES" =~ "$SRC_CWS_INSTRUMENTATION" || "$IMG_VARIABLES" =~ "$SRC_AGENT" || "$IMG_VARIABLES" =~ "$SRC_DCA" || "$IMG_VARIABLES" =~ "$SRC_CWS_INSTRUMENTATION" ) ]]; then export ECR_RELEASE_SUFFIX="-nightly" diff --git a/.gitlab/common/shared.yml b/.gitlab/common/shared.yml index d85d04434062b..bb1d7e2198518 100644 --- a/.gitlab/common/shared.yml +++ b/.gitlab/common/shared.yml @@ -21,30 +21,30 @@ .setup_deb_signing_key: &setup_deb_signing_key - set +x - - DEB_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DEB_GPG_KEY) + - DEB_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DEB_GPG_KEY) - printf -- "${DEB_GPG_KEY}" | gpg --import --batch - - export DEB_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DEB_SIGNING_PASSPHRASE) + - export DEB_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DEB_SIGNING_PASSPHRASE) .setup_macos_github_app: # GitHub App rate-limits are per-app. # This balances the requests made to GitHub between the two apps we have set up. - | if [[ "$(( RANDOM % 2 ))" == "1" ]]; then - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_KEY) - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_APP_ID) - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_INSTALLATION_ID) + export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_KEY) + export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_APP_ID) + export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_INSTALLATION_ID) echo "Using GitHub App instance 1" else - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_KEY_2) - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_APP_ID_2) - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_INSTALLATION_ID_2) + export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_KEY_2) + export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_APP_ID_2) + export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_INSTALLATION_ID_2) echo "Using GitHub App instance 2" fi .setup_agent_github_app: - - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $AGENT_GITHUB_KEY) - - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $AGENT_GITHUB_APP_ID) - - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $AGENT_GITHUB_INSTALLATION_ID) + - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_GITHUB_KEY) + - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_GITHUB_APP_ID) + - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_GITHUB_INSTALLATION_ID) - echo "Using agent GitHub App" # Install `dd-pkg` and lint packages produced by Omnibus, supports only deb and rpm packages diff --git a/.gitlab/container_build/docker_linux.yml b/.gitlab/container_build/docker_linux.yml index bf06dc495cc04..3d93f364b430e 100644 --- a/.gitlab/container_build/docker_linux.yml +++ b/.gitlab/container_build/docker_linux.yml @@ -13,8 +13,8 @@ fi - TARGET_TAG=${IMAGE}${ECR_RELEASE_SUFFIX}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}$TAG_SUFFIX-$ARCH # DockerHub login for build to limit rate limit when pulling base images - - DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_LOGIN) - - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_PWD | docker login --username "$DOCKER_REGISTRY_LOGIN" --password-stdin "$DOCKER_REGISTRY_URL" + - DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_LOGIN) + - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_PWD | docker login --username "$DOCKER_REGISTRY_LOGIN" --password-stdin "$DOCKER_REGISTRY_URL" # Build image, use target none label to avoid replication - docker buildx build --no-cache --push --pull --platform linux/$ARCH --build-arg CIBUILD=true --build-arg GENERAL_ARTIFACTS_CACHE_BUCKET_URL=${GENERAL_ARTIFACTS_CACHE_BUCKET_URL} $BUILD_ARG --build-arg DD_GIT_REPOSITORY_URL=https://github.com/DataDog/datadog-agent --build-arg DD_GIT_COMMIT_SHA=${CI_COMMIT_SHA} --file $BUILD_CONTEXT/Dockerfile --tag ${TARGET_TAG} --label "org.opencontainers.image.created=$(date --rfc-3339=seconds)" --label "org.opencontainers.image.authors=Datadog " --label "org.opencontainers.image.source=https://github.com/DataDog/datadog-agent" --label "org.opencontainers.image.version=$(inv agent.version)" --label "org.opencontainers.image.revision=${CI_COMMIT_SHA}" --label "org.opencontainers.image.vendor=Datadog, Inc." --label "target=none" $BUILD_CONTEXT # Squash image diff --git a/.gitlab/container_build/fakeintake.yml b/.gitlab/container_build/fakeintake.yml index da3180eb025b9..92a2f94da565c 100644 --- a/.gitlab/container_build/fakeintake.yml +++ b/.gitlab/container_build/fakeintake.yml @@ -15,7 +15,7 @@ docker_build_fakeintake: BUILD_CONTEXT: . script: # DockerHub login for build to limit rate limit when pulling base images - - DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_LOGIN) - - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_PWD | docker login --username "$DOCKER_REGISTRY_LOGIN" --password-stdin "$DOCKER_REGISTRY_URL" + - DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_LOGIN) + - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_PWD | docker login --username "$DOCKER_REGISTRY_LOGIN" --password-stdin "$DOCKER_REGISTRY_URL" - docker buildx build --push --pull --platform ${PLATFORMS} --file ${DOCKERFILE} --tag ${TARGET} $BUILD_CONTEXT retry: 2 diff --git a/.gitlab/deploy_packages/winget.yml b/.gitlab/deploy_packages/winget.yml index 11e4945731ac6..a35239c948381 100644 --- a/.gitlab/deploy_packages/winget.yml +++ b/.gitlab/deploy_packages/winget.yml @@ -11,7 +11,7 @@ publish_winget_7_x64: ARCH: "x64" before_script: - $tmpfile = [System.IO.Path]::GetTempFileName() - - (& "$CI_PROJECT_DIR\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:WINGET_PAT" "$tmpfile") + - (& "$CI_PROJECT_DIR\tools\ci\fetch_secret.ps1" "$Env:WINGET_PAT" "$tmpfile") - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } - $wingetPat=$(cat "$tmpfile") - Remove-Item "$tmpfile" diff --git a/.gitlab/e2e/e2e.yml b/.gitlab/e2e/e2e.yml index 06c4e2234f9a2..cdb56ea050658 100644 --- a/.gitlab/e2e/e2e.yml +++ b/.gitlab/e2e/e2e.yml @@ -11,22 +11,22 @@ - !reference [.retrieve_linux_go_e2e_deps] # Setup AWS Credentials - mkdir -p ~/.aws - - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $AGENT_QA_PROFILE >> ~/.aws/config + - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_PROFILE >> ~/.aws/config - export AWS_PROFILE=agent-qa-ci # Now all `aws` commands target the agent-qa profile - - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SSH_PUBLIC_KEY_RSA > $E2E_PUBLIC_KEY_PATH - - touch $E2E_PRIVATE_KEY_PATH && chmod 600 $E2E_PRIVATE_KEY_PATH && $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SSH_KEY_RSA > $E2E_PRIVATE_KEY_PATH + - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SSH_PUBLIC_KEY_RSA > $E2E_PUBLIC_KEY_PATH + - touch $E2E_PRIVATE_KEY_PATH && chmod 600 $E2E_PRIVATE_KEY_PATH && $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SSH_KEY_RSA > $E2E_PRIVATE_KEY_PATH # Use S3 backend - pulumi login "s3://dd-pulumi-state?region=us-east-1&awssdk=v2&profile=$AWS_PROFILE" # Setup Azure credentials. https://www.pulumi.com/registry/packages/azure-native/installation-configuration/#set-configuration-using-pulumi-config # The app is called `agent-e2e-tests` - - export ARM_CLIENT_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $E2E_TESTS_AZURE_CLIENT_ID) - - export ARM_CLIENT_SECRET=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $E2E_TESTS_AZURE_CLIENT_SECRET) - - export ARM_TENANT_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $E2E_TESTS_AZURE_TENANT_ID) - - export ARM_SUBSCRIPTION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $E2E_TESTS_AZURE_SUBSCRIPTION_ID) + - export ARM_CLIENT_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_AZURE_CLIENT_ID) + - export ARM_CLIENT_SECRET=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_AZURE_CLIENT_SECRET) + - export ARM_TENANT_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_AZURE_TENANT_ID) + - export ARM_SUBSCRIPTION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_AZURE_SUBSCRIPTION_ID) # Setup GCP credentials. https://www.pulumi.com/registry/packages/gcp/installation-configuration/ # The service account is called `agent-e2e-tests` - - export GOOGLE_CREDENTIALS=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $E2E_TESTS_GCP_CREDENTIALS) + - export GOOGLE_CREDENTIALS=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_GCP_CREDENTIALS) # Generate external links to CI VISIBILITY, used by artifacts:reports:annotations - inv -e gitlab.generate-ci-visibility-links --output=$EXTERNAL_LINKS_PATH variables: @@ -471,7 +471,7 @@ generate-flakes-finder-pipeline: - qa_agent tags: ["arch:amd64"] script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_FULL_API_TOKEN) + - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_FULL_API_TOKEN) - inv -e testwasher.generate-flake-finder-pipeline artifacts: paths: diff --git a/.gitlab/e2e_install_packages/common.yml b/.gitlab/e2e_install_packages/common.yml index 934bb75a67f58..d1457ff8f7e20 100644 --- a/.gitlab/e2e_install_packages/common.yml +++ b/.gitlab/e2e_install_packages/common.yml @@ -33,7 +33,7 @@ - START_MAJOR_VERSION: [5, 6] END_MAJOR_VERSION: [6] script: - - export DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $INSTALL_SCRIPT_API_KEY) + - export DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $INSTALL_SCRIPT_API_KEY) - inv -e new-e2e-tests.run --targets $TARGETS --junit-tar "junit-${CI_JOB_ID}.tgz" ${EXTRA_PARAMS} --src-agent-version $START_MAJOR_VERSION --dest-agent-version $END_MAJOR_VERSION .new-e2e_script_upgrade7: @@ -47,7 +47,7 @@ - START_MAJOR_VERSION: [5, 6, 7] END_MAJOR_VERSION: [7] script: - - export DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $INSTALL_SCRIPT_API_KEY ) + - export DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $INSTALL_SCRIPT_API_KEY ) - inv -e new-e2e-tests.run --targets $TARGETS --junit-tar "junit-${CI_JOB_ID}.tgz" ${EXTRA_PARAMS} --src-agent-version $START_MAJOR_VERSION --dest-agent-version $END_MAJOR_VERSION .new-e2e_rpm: @@ -57,5 +57,5 @@ TEAM: agent-delivery EXTRA_PARAMS: --osversion $E2E_OSVERS --platform $E2E_PLATFORM --arch $E2E_ARCH script: - - export DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $INSTALL_SCRIPT_API_KEY) + - export DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $INSTALL_SCRIPT_API_KEY) - inv -e new-e2e-tests.run --targets $TARGETS --junit-tar "junit-${CI_JOB_ID}.tgz" ${EXTRA_PARAMS} diff --git a/.gitlab/e2e_k8s/e2e_k8s.yml b/.gitlab/e2e_k8s/e2e_k8s.yml index 98ee5289466cc..606c4e6b1bc9f 100644 --- a/.gitlab/e2e_k8s/e2e_k8s.yml +++ b/.gitlab/e2e_k8s/e2e_k8s.yml @@ -11,16 +11,16 @@ variables: LANG: C.UTF-8 before_script: - - export DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_LOGIN) - - export DOCKER_REGISTRY_PWD=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_PWD) - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_DDDEV) + - export DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_LOGIN) + - export DOCKER_REGISTRY_PWD=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_PWD) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_DDDEV) .k8s-e2e-cws-cspm-init: - set +x - export DATADOG_AGENT_SITE=datadoghq.com - - export DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $E2E_TESTS_API_KEY) - - export DATADOG_AGENT_APP_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $E2E_TESTS_APP_KEY) - - export DATADOG_AGENT_RC_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $E2E_TESTS_RC_KEY) + - export DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_API_KEY) + - export DATADOG_AGENT_APP_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_APP_KEY) + - export DATADOG_AGENT_RC_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_RC_KEY) .k8s_e2e_template_needs_dev: extends: .k8s_e2e_template diff --git a/.gitlab/functional_test/regression_detector.yml b/.gitlab/functional_test/regression_detector.yml index f48e6651b695b..51ef2cc71dcb8 100644 --- a/.gitlab/functional_test/regression_detector.yml +++ b/.gitlab/functional_test/regression_detector.yml @@ -42,12 +42,12 @@ single-machine-performance-regression_detector: - echo "Merge base is ${SMP_MERGE_BASE}" # Setup AWS credentials for single-machine-performance AWS account - AWS_NAMED_PROFILE="single-machine-performance" - - SMP_ACCOUNT_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SMP_ACCOUNT_ID) + - SMP_ACCOUNT_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SMP_ACCOUNT_ID) - SMP_ECR_URL=${SMP_ACCOUNT_ID}.dkr.ecr.us-west-2.amazonaws.com - - SMP_AGENT_TEAM_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SMP_AGENT_TEAM_ID) - - SMP_API=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SMP_API) - - aws configure set aws_access_key_id $($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SMP_BOT_ACCESS_KEY_ID) --profile ${AWS_NAMED_PROFILE} - - aws configure set aws_secret_access_key $($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SMP_BOT_ACCESS_KEY) --profile ${AWS_NAMED_PROFILE} + - SMP_AGENT_TEAM_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SMP_AGENT_TEAM_ID) + - SMP_API=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SMP_API) + - aws configure set aws_access_key_id $($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SMP_BOT_ACCESS_KEY_ID) --profile ${AWS_NAMED_PROFILE} + - aws configure set aws_secret_access_key $($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SMP_BOT_ACCESS_KEY) --profile ${AWS_NAMED_PROFILE} - aws configure set region us-west-2 --profile ${AWS_NAMED_PROFILE} # Download smp binary and prepare it for use - aws --profile single-machine-performance s3 cp s3://smp-cli-releases/v${SMP_VERSION}/x86_64-unknown-linux-gnu/smp smp diff --git a/.gitlab/install_script_testing/install_script_testing.yml b/.gitlab/install_script_testing/install_script_testing.yml index 7e649bf581d2e..1c24c0ebe9401 100644 --- a/.gitlab/install_script_testing/install_script_testing.yml +++ b/.gitlab/install_script_testing/install_script_testing.yml @@ -5,7 +5,7 @@ test_install_script: tags: ["arch:amd64"] script: - set +x - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN) + - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) - export TESTING_APT_URL=$DEB_TESTING_S3_BUCKET - export TESTING_YUM_URL=$RPM_TESTING_S3_BUCKET - export TEST_PIPELINE_ID=$CI_PIPELINE_ID diff --git a/.gitlab/integration_test/windows.yml b/.gitlab/integration_test/windows.yml index 99cbd9110c6bc..127454f00688f 100644 --- a/.gitlab/integration_test/windows.yml +++ b/.gitlab/integration_test/windows.yml @@ -8,7 +8,7 @@ tags: ["runner:windows-docker", "windowsversion:1809"] before_script: - $tmpfile = [System.IO.Path]::GetTempFileName() - - (& "$CI_PROJECT_DIR\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:VCPKG_BLOB_SAS_URL" "$tmpfile") + - (& "$CI_PROJECT_DIR\tools\ci\fetch_secret.ps1" "$Env:VCPKG_BLOB_SAS_URL" "$tmpfile") - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } - $vcpkgBlobSaSUrl=$(cat "$tmpfile") - Remove-Item "$tmpfile" diff --git a/.gitlab/internal_image_deploy/internal_image_deploy.yml b/.gitlab/internal_image_deploy/internal_image_deploy.yml index ff11b30f0083b..a39917217f5ae 100644 --- a/.gitlab/internal_image_deploy/internal_image_deploy.yml +++ b/.gitlab/internal_image_deploy/internal_image_deploy.yml @@ -22,7 +22,7 @@ docker_trigger_internal: TMPL_SRC_REPO: ci/datadog-agent/agent RELEASE_STAGING: "true" script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN) + - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) - if [ "$BUCKET_BRANCH" = "beta" ] || [ "$BUCKET_BRANCH" = "stable" ]; then TMPL_SRC_REPO="${TMPL_SRC_REPO}-release"; fi - | if [ "$BUCKET_BRANCH" = "nightly" ]; then @@ -68,7 +68,7 @@ docker_trigger_internal-ot: RELEASE_STAGING: "true" script: - source /root/.bashrc - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN) + - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) - if [ "$BUCKET_BRANCH" = "beta" ] || [ "$BUCKET_BRANCH" = "stable" ]; then TMPL_SRC_REPO="${TMPL_SRC_REPO}-release"; fi - | if [ "$BUCKET_BRANCH" = "nightly" ]; then @@ -114,7 +114,7 @@ docker_trigger_cluster_agent_internal: RELEASE_STAGING: "true" RELEASE_PROD: "true" script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN) + - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) - if [ "$BUCKET_BRANCH" = "beta" ] || [ "$BUCKET_BRANCH" = "stable" ]; then TMPL_SRC_REPO="${TMPL_SRC_REPO}-release"; fi - | if [ "$BUCKET_BRANCH" = "nightly" ]; then @@ -160,7 +160,7 @@ docker_trigger_cws_instrumentation_internal: RELEASE_STAGING: "true" RELEASE_PROD: "true" script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN) + - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) - if [ "$BUCKET_BRANCH" = "beta" ] || [ "$BUCKET_BRANCH" = "stable" ]; then TMPL_SRC_REPO="${TMPL_SRC_REPO}-release"; fi - | if [ "$BUCKET_BRANCH" = "nightly" ]; then diff --git a/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml b/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml index a4b3450af0d3a..3c9f414fae51c 100644 --- a/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml +++ b/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml @@ -36,7 +36,7 @@ internal_kubernetes_deploy_experimental: EXPLICIT_WORKFLOWS: "//workflows:beta_builds.agents_nightly.staging-deploy.publish,//workflows:beta_builds.agents_nightly.staging-validate.publish,//workflows:beta_builds.agents_nightly.prod-wait-business-hours.publish,//workflows:beta_builds.agents_nightly.prod-deploy.publish,//workflows:beta_builds.agents_nightly.prod-validate.publish,//workflows:beta_builds.agents_nightly.publish-image-confirmation.publish" BUNDLE_VERSION_OVERRIDE: "v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}" script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN) + - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) - "inv pipeline.trigger-child-pipeline --project-name DataDog/k8s-datadog-agent-ops --git-ref main --variable OPTION_AUTOMATIC_ROLLOUT --variable EXPLICIT_WORKFLOWS diff --git a/.gitlab/internal_kubernetes_deploy/rc_kubernetes_deploy.yml b/.gitlab/internal_kubernetes_deploy/rc_kubernetes_deploy.yml index 20855d891344a..179e1b64cbcd5 100644 --- a/.gitlab/internal_kubernetes_deploy/rc_kubernetes_deploy.yml +++ b/.gitlab/internal_kubernetes_deploy/rc_kubernetes_deploy.yml @@ -22,7 +22,7 @@ rc_kubernetes_deploy: EXPLICIT_WORKFLOWS: "//workflows:deploy_rc.agents_rc" AGENT_IMAGE_TAG: $CI_COMMIT_REF_NAME script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN) + - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) - "inv pipeline.trigger-child-pipeline --project-name DataDog/k8s-datadog-agent-ops --git-ref main --variable OPTION_AUTOMATIC_ROLLOUT --variable EXPLICIT_WORKFLOWS diff --git a/.gitlab/kernel_matrix_testing/common.yml b/.gitlab/kernel_matrix_testing/common.yml index fa8137de57d90..1dce667e038ab 100644 --- a/.gitlab/kernel_matrix_testing/common.yml +++ b/.gitlab/kernel_matrix_testing/common.yml @@ -29,7 +29,7 @@ .write_ssh_key_file: - touch $AWS_EC2_SSH_KEY_FILE && chmod 600 $AWS_EC2_SSH_KEY_FILE - - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SSH_KEY > $AWS_EC2_SSH_KEY_FILE + - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SSH_KEY > $AWS_EC2_SSH_KEY_FILE # Without the newline ssh silently fails and moves on to try other auth methods - echo "" >> $AWS_EC2_SSH_KEY_FILE - chmod 600 $AWS_EC2_SSH_KEY_FILE @@ -47,7 +47,7 @@ .kmt_new_profile: - mkdir -p ~/.aws - - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $AGENT_QA_PROFILE >> ~/.aws/config + - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_PROFILE >> ~/.aws/config - export AWS_PROFILE=agent-qa-ci .define_if_collect_complexity: @@ -60,7 +60,7 @@ - echo "COLLECT_COMPLEXITY=${COLLECT_COMPLEXITY}" .collect_outcomes_kmt: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) - export MICRO_VM_IP=$(jq --exit-status --arg TAG $TAG --arg ARCH $ARCH --arg TEST_SET $TEST_SET -r '.[$ARCH].microvms | map(select(."vmset-tags"| index($TEST_SET))) | map(select(.tag==$TAG)) | .[].ip' $CI_PROJECT_DIR/stack.output) # Collect setup-ddvm systemd service logs - mkdir -p $CI_PROJECT_DIR/logs @@ -114,7 +114,7 @@ scp $DD_AGENT_TESTING_DIR/kmt-dockers-$ARCH.tar.gz metal_instance:/opt/kernel-version-testing fi after_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) - !reference [.tag_kmt_ci_job] variables: AWS_EC2_SSH_KEY_FILE: $CI_PROJECT_DIR/ssh_key @@ -143,7 +143,7 @@ KUBERNETES_MEMORY_LIMIT: "16Gi" VMCONFIG_FILE: "${CI_PROJECT_DIR}/vmconfig-${CI_PIPELINE_ID}-${ARCH}.json" before_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) - !reference [.retrieve_linux_go_deps] - !reference [.kmt_new_profile] - !reference [.write_ssh_key_file] @@ -157,7 +157,7 @@ - jq "." $CI_PROJECT_DIR/stack.output - pulumi logout after_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) - export AWS_PROFILE=agent-qa-ci - !reference [.shared_filters_and_queries] - mkdir -p $CI_PROJECT_DIR/libvirt/log/$ARCH $CI_PROJECT_DIR/libvirt/xml $CI_PROJECT_DIR/libvirt/qemu $CI_PROJECT_DIR/libvirt/dnsmasq @@ -182,7 +182,7 @@ image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/test-infra-definitions/runner$TEST_INFRA_DEFINITIONS_BUILDIMAGES_SUFFIX:$TEST_INFRA_DEFINITIONS_BUILDIMAGES tags: ["arch:amd64"] before_script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN) + - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) - !reference [.kmt_new_profile] script: - !reference [.shared_filters_and_queries] @@ -199,7 +199,7 @@ aws ec2 terminate-instances --instance-ids "${INSTANCE_ID}" fi after_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) - !reference [.tag_kmt_ci_job] # Manual cleanup jobs, these will be used to cleanup the instances after the tests @@ -228,7 +228,7 @@ RETRY: 2 EXTERNAL_LINKS_PATH: external_links_$CI_JOB_ID.json before_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) - !reference [.kmt_new_profile] - !reference [.write_ssh_key_file] - echo "CI_JOB_URL=${CI_JOB_URL}" >> $DD_AGENT_TESTING_DIR/job_env.txt @@ -315,9 +315,9 @@ notify_ebpf_complexity_changes: - python3 -m pip install tabulate # Required for printing the tables - python3 -m pip install -r tasks/libs/requirements-github.txt - | - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITHUB_PR_COMMENTER_APP_KEY | base64) - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITHUB_PR_COMMENTER_INTEGRATION_ID) - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITHUB_PR_COMMENTER_INSTALLATION_ID) - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_FULL_API_TOKEN) + export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITHUB_PR_COMMENTER_APP_KEY | base64) + export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITHUB_PR_COMMENTER_INTEGRATION_ID) + export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITHUB_PR_COMMENTER_INSTALLATION_ID) + export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_FULL_API_TOKEN) script: - inv -e ebpf.generate-complexity-summary-for-pr diff --git a/.gitlab/kernel_matrix_testing/security_agent.yml b/.gitlab/kernel_matrix_testing/security_agent.yml index 5f21a58ad63a0..e70c923fcf24d 100644 --- a/.gitlab/kernel_matrix_testing/security_agent.yml +++ b/.gitlab/kernel_matrix_testing/security_agent.yml @@ -72,7 +72,7 @@ kmt_setup_env_secagent_x64: # upload connector to metal instance - scp $CI_PROJECT_DIR/connector-${ARCH} metal_instance:/home/ubuntu/connector after_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) - !reference [.tag_kmt_ci_job] variables: AWS_EC2_SSH_KEY_FILE: $CI_PROJECT_DIR/ssh_key diff --git a/.gitlab/kernel_matrix_testing/system_probe.yml b/.gitlab/kernel_matrix_testing/system_probe.yml index b0d58154e22ac..f01de83cc7116 100644 --- a/.gitlab/kernel_matrix_testing/system_probe.yml +++ b/.gitlab/kernel_matrix_testing/system_probe.yml @@ -28,13 +28,13 @@ upload_dependencies_sysprobe_arm64: stage: kernel_matrix_testing_prepare script: # DockerHub login for build to limit rate limit when pulling base images - - DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_LOGIN) - - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_PWD | crane auth login --username "$DOCKER_REGISTRY_LOGIN" --password-stdin "$DOCKER_REGISTRY_URL" + - DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_LOGIN) + - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_PWD | crane auth login --username "$DOCKER_REGISTRY_LOGIN" --password-stdin "$DOCKER_REGISTRY_URL" # Pull base images - mkdir $KMT_DOCKERS - inv -e system-probe.save-test-dockers --use-crane --output-dir $KMT_DOCKERS --arch $ARCH after_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) - !reference [.tag_kmt_ci_job] artifacts: expire_in: 1 day @@ -81,7 +81,7 @@ pull_test_dockers_arm64: - !reference [.setup_ssh_config] - scp $CI_PROJECT_DIR/kmt-deps/ci/$ARCH/$ARCHIVE_NAME metal_instance:/opt/kernel-version-testing/ after_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) - !reference [.tag_kmt_ci_job] variables: DEPENDENCIES: $CI_PROJECT_DIR/kmt-deps/ci/$ARCH/btfs @@ -160,7 +160,7 @@ kmt_setup_env_sysprobe_x64: # upload connector to metal instance - scp $CI_PROJECT_DIR/connector-${ARCH} metal_instance:/home/ubuntu/connector after_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) - !reference [.tag_kmt_ci_job] variables: AWS_EC2_SSH_KEY_FILE: $CI_PROJECT_DIR/ssh_key diff --git a/.gitlab/kitchen_deploy/kitchen_deploy.yml b/.gitlab/kitchen_deploy/kitchen_deploy.yml index 774f459e29921..1fd79c1796e7d 100644 --- a/.gitlab/kitchen_deploy/kitchen_deploy.yml +++ b/.gitlab/kitchen_deploy/kitchen_deploy.yml @@ -3,13 +3,13 @@ # Contains jobs which deploy Agent package to testing repsoitories that are used in kitchen tests. .setup_rpm_signing_key: &setup_rpm_signing_key - - RPM_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_GPG_KEY) + - RPM_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $RPM_GPG_KEY) - printf -- "$RPM_GPG_KEY" | gpg --import --batch - - export RPM_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_SIGNING_PASSPHRASE) + - export RPM_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $RPM_SIGNING_PASSPHRASE) .setup_apt_signing_key: &setup_apt_signing_key - - APT_SIGNING_PRIVATE_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DEB_GPG_KEY) - - APT_SIGNING_KEY_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DEB_SIGNING_PASSPHRASE) + - APT_SIGNING_PRIVATE_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DEB_GPG_KEY) + - APT_SIGNING_KEY_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DEB_SIGNING_PASSPHRASE) - printf -- "$APT_SIGNING_PRIVATE_KEY" | gpg --import --batch diff --git a/.gitlab/maintenance_jobs/docker.yml b/.gitlab/maintenance_jobs/docker.yml index 899f74156feac..67a169f4dce8b 100644 --- a/.gitlab/maintenance_jobs/docker.yml +++ b/.gitlab/maintenance_jobs/docker.yml @@ -60,8 +60,8 @@ delete_docker_tag: TAG: "" # tag name, for example "6.9.0" ORGANIZATION: "datadog" before_script: - - DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_LOGIN) - - PASS=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_PWD) + - DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_LOGIN) + - PASS=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_PWD) - python3 -m pip install -r requirements.txt - | export DOCKER_TOKEN=`curl -s -H "Content-Type: application/json" -X POST -d '{"username": "'$DOCKER_REGISTRY_LOGIN'", "password": "'$PASS'"}' https://hub.docker.com/v2/users/login/ | python -c 'import sys, json; print(json.load(sys.stdin)["token"].strip())'` diff --git a/.gitlab/maintenance_jobs/kitchen.yml b/.gitlab/maintenance_jobs/kitchen.yml index 69021c398dff2..56cd45ef1fa9c 100644 --- a/.gitlab/maintenance_jobs/kitchen.yml +++ b/.gitlab/maintenance_jobs/kitchen.yml @@ -26,10 +26,10 @@ periodic_kitchen_cleanup_azure: # the job to be run one at a time. resource_group: azure_cleanup script: - - export ARM_SUBSCRIPTION_ID=`$CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_SUBSCRIPTION_ID` - - export ARM_CLIENT_ID=`$CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_ID` - - export ARM_CLIENT_SECRET=`$CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_SECRET` - - export ARM_TENANT_ID=`$CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_TENANT_ID` + - export ARM_SUBSCRIPTION_ID=`$CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_SUBSCRIPTION_ID` + - export ARM_CLIENT_ID=`$CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_CLIENT_ID` + - export ARM_CLIENT_SECRET=`$CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_CLIENT_SECRET` + - export ARM_TENANT_ID=`$CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_TENANT_ID` # Remove kitchen resources for all existing test suite prefixes - RESOURCE_GROUP_PREFIX=kitchen-chef python3 /deploy_scripts/cleanup_azure.py - RESOURCE_GROUP_PREFIX=kitchen-win python3 /deploy_scripts/cleanup_azure.py diff --git a/.gitlab/notify/notify.yml b/.gitlab/notify/notify.yml index 48831192e5307..1f6f08ba9c6a3 100644 --- a/.gitlab/notify/notify.yml +++ b/.gitlab/notify/notify.yml @@ -25,8 +25,8 @@ notify: resource_group: notification timeout: 15 minutes # Added to prevent a stuck job blocking the resource_group defined above script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_READ_API_TOKEN) - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) + - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_READ_API_TOKEN) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) - python3 -m pip install -r requirements.txt -r tasks/libs/requirements-notifications.txt - | # Do not send notifications if this is a child pipeline of another repo @@ -53,8 +53,8 @@ send_pipeline_stats: when: always dependencies: [] script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_READ_API_TOKEN) - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) + - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_READ_API_TOKEN) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) - invoke -e notify.send-stats notify_github: @@ -110,10 +110,10 @@ notify_gitlab_ci_changes: - source /root/.bashrc - python3 -m pip install -r tasks/libs/requirements-github.txt - | - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITHUB_PR_COMMENTER_APP_KEY | base64) - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITHUB_PR_COMMENTER_INTEGRATION_ID) - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITHUB_PR_COMMENTER_INSTALLATION_ID) - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_FULL_API_TOKEN) + export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITHUB_PR_COMMENTER_APP_KEY | base64) + export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITHUB_PR_COMMENTER_INTEGRATION_ID) + export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITHUB_PR_COMMENTER_INSTALLATION_ID) + export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_FULL_API_TOKEN) - inv -e notify.gitlab-ci-diff --pr-comment .failure_summary_job: @@ -125,9 +125,9 @@ notify_gitlab_ci_changes: timeout: 15 minutes # Added to prevent a stuck job blocking the resource_group defined above .failure_summary_setup: - - export SLACK_API_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SLACK_AGENT_CI_TOKEN) - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_READ_API_TOKEN) - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) + - export SLACK_API_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SLACK_AGENT_CI_TOKEN) + - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_READ_API_TOKEN) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) - python3 -m pip install -r requirements.txt -r tasks/libs/requirements-notifications.txt # Upload failure summary data to S3 at the end of each main pipeline diff --git a/.gitlab/packaging/rpm.yml b/.gitlab/packaging/rpm.yml index 1e710de458837..d03aa99f9212f 100644 --- a/.gitlab/packaging/rpm.yml +++ b/.gitlab/packaging/rpm.yml @@ -8,9 +8,9 @@ script: - echo "About to build for $RELEASE_VERSION" - !reference [.cache_omnibus_ruby_deps, setup] - - RPM_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_GPG_KEY) + - RPM_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $RPM_GPG_KEY) - printf -- "$RPM_GPG_KEY" | gpg --import --batch - - export RPM_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_SIGNING_PASSPHRASE) + - export RPM_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $RPM_SIGNING_PASSPHRASE) - inv -e omnibus.build --release-version "$RELEASE_VERSION" --major-version "$AGENT_MAJOR_VERSION" --base-dir $OMNIBUS_BASE_DIR --skip-deps --target-project=${DD_PROJECT} ${OMNIBUS_EXTRA_ARGS} - ls -la $OMNIBUS_PACKAGE_DIR/ - !reference [.lint_linux_packages] @@ -137,9 +137,9 @@ installer_suse_rpm-arm64: script: - echo "About to build for $RELEASE_VERSION" - !reference [.cache_omnibus_ruby_deps, setup] - - RPM_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_GPG_KEY) + - RPM_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $RPM_GPG_KEY) - printf -- "$RPM_GPG_KEY" | gpg --import --batch - - export RPM_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_SIGNING_PASSPHRASE) + - export RPM_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $RPM_SIGNING_PASSPHRASE) - inv -e omnibus.build --release-version "$RELEASE_VERSION" --base-dir $OMNIBUS_BASE_DIR --skip-deps --flavor=iot ${OMNIBUS_EXTRA_ARGS} - ls -la $OMNIBUS_PACKAGE_DIR/ - !reference [.lint_linux_packages] diff --git a/.gitlab/pkg_metrics/pkg_metrics.yml b/.gitlab/pkg_metrics/pkg_metrics.yml index 38e6ce3b23c70..94a48c2fe1004 100644 --- a/.gitlab/pkg_metrics/pkg_metrics.yml +++ b/.gitlab/pkg_metrics/pkg_metrics.yml @@ -57,7 +57,7 @@ send_pkg_size: optional: true script: # Get API key to send metrics - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) # Allow failures: some packages are not always built, and therefore stats cannot be sent for them - set +e diff --git a/.gitlab/post_rc_build/post_rc_tasks.yml b/.gitlab/post_rc_build/post_rc_tasks.yml index f39c490c1f9bb..8cfab2abbd124 100644 --- a/.gitlab/post_rc_build/post_rc_tasks.yml +++ b/.gitlab/post_rc_build/post_rc_tasks.yml @@ -11,7 +11,7 @@ update_rc_build_links: image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:amd64"] script: - - export ATLASSIAN_PASSWORD=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $JIRA_READ_API_TOKEN) + - export ATLASSIAN_PASSWORD=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $JIRA_READ_API_TOKEN) - export ATLASSIAN_USERNAME=robot-jira-agentplatform@datadoghq.com - python3 -m pip install -r tasks/requirements_release_tasks.txt - PATCH=$(echo "$CI_COMMIT_REF_NAME" | cut -d'.' -f3 | cut -c1) diff --git a/.gitlab/setup/setup.yml b/.gitlab/setup/setup.yml index f70fefb111181..7649437386917 100644 --- a/.gitlab/setup/setup.yml +++ b/.gitlab/setup/setup.yml @@ -18,15 +18,15 @@ github_rate_limit_info: script: - python3 -m pip install -r tasks/libs/requirements-github.txt datadog_api_client # Send stats for app 1 - - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_KEY) - - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_APP_ID) - - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_INSTALLATION_ID) - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) + - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_KEY) + - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_APP_ID) + - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_INSTALLATION_ID) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) - inv github.send-rate-limit-info-datadog --pipeline-id $CI_PIPELINE_ID --app-instance 1 # Send stats for app 2 - - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_KEY_2) - - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_APP_ID_2) - - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_INSTALLATION_ID_2) - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) + - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_KEY_2) + - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_APP_ID_2) + - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_INSTALLATION_ID_2) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) - inv github.send-rate-limit-info-datadog --pipeline-id $CI_PIPELINE_ID --app-instance 2 allow_failure: true diff --git a/.gitlab/source_test/golang_deps_diff.yml b/.gitlab/source_test/golang_deps_diff.yml index 6b129d845ccf8..491a99cd520f2 100644 --- a/.gitlab/source_test/golang_deps_diff.yml +++ b/.gitlab/source_test/golang_deps_diff.yml @@ -15,7 +15,7 @@ golang_deps_diff: - !reference [.retrieve_linux_go_deps] script: # Get API key to send metrics - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) - inv -e diff.go-deps --report-file=deps-report.md --report-metrics --git-ref "${CI_COMMIT_REF_NAME}" artifacts: paths: @@ -64,7 +64,7 @@ golang_deps_send_count_metrics: - !reference [.retrieve_linux_go_deps] script: # Get API key to send metrics - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2) + - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) - inv -e go-deps.send-count-metrics --git-sha "${CI_COMMIT_SHA}" --git-ref "${CI_COMMIT_REF_NAME}" golang_deps_test: diff --git a/.gitlab/source_test/linux.yml b/.gitlab/source_test/linux.yml index a83becefe59bb..4b49e23974b35 100644 --- a/.gitlab/source_test/linux.yml +++ b/.gitlab/source_test/linux.yml @@ -51,7 +51,7 @@ .upload_coverage: # Upload coverage files to Codecov. Never fail on coverage upload. - source /root/.bashrc - - export CODECOV_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $CODECOV_TOKEN) + - export CODECOV_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $CODECOV_TOKEN) - inv -e coverage.upload-to-codecov $COVERAGE_CACHE_FLAG || true .linux_lint: @@ -267,7 +267,7 @@ new-e2e-unit-tests: before_script: # Setup AWS Credentials - mkdir -p ~/.aws - - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $AGENT_QA_PROFILE >> ~/.aws/config + - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_PROFILE >> ~/.aws/config - export AWS_PROFILE=agent-qa-ci # Use S3 backend - pulumi login "s3://dd-pulumi-state?region=us-east-1&awssdk=v2&profile=$AWS_PROFILE" diff --git a/.gitlab/trigger_release/trigger_release.yml b/.gitlab/trigger_release/trigger_release.yml index 3446bbf960627..cf2a4b3591a84 100644 --- a/.gitlab/trigger_release/trigger_release.yml +++ b/.gitlab/trigger_release/trigger_release.yml @@ -19,7 +19,7 @@ # agent-release-management creates pipeline for both Agent 6 and Agent 7 # when triggered with major version 7 - export RELEASE_VERSION=$(inv agent.version --major-version 7 --url-safe --omnibus-format)-1 - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN) + - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) - 'inv pipeline.trigger-child-pipeline --project-name "DataDog/agent-release-management" --git-ref "main" --variable ACTION --variable AUTO_RELEASE diff --git a/tasks/linter.py b/tasks/linter.py index 2136ae60f25d0..1d32a7e38ea43 100644 --- a/tasks/linter.py +++ b/tasks/linter.py @@ -349,11 +349,11 @@ def ssm_parameters(ctx, mode="all", folders=None): for filename in error_files: print(f" - {filename}") raise Exit(code=1) - print(f"[{color_message('OK', Color.GREEN)}] All files are correctly using wrapper for aws ssm parameters.") + print(f"[{color_message('OK', Color.GREEN)}] All files are correctly using wrapper for secret parameters.") class SSMParameterCall: - def __init__(self, file, line_nb, with_wrapper=False, with_env_var=False, standard=True): + def __init__(self, file, line_nb, with_wrapper=False, with_env_var=False): """ Initialize an SSMParameterCall instance. @@ -362,18 +362,16 @@ def __init__(self, file, line_nb, with_wrapper=False, with_env_var=False, standa line_nb (int): The line number in the file where the SSM parameter call is located. with_wrapper (bool, optional): If the call is using the wrapper. Defaults to False. with_env_var (bool, optional): If the call is using an environment variable defined in .gitlab-ci.yml. Defaults to False. - not_standard (bool, optional): If the call is standard (matching either "aws ssm get-parameter --name" or "aws_ssm_get_wrapper"). Defaults to True. """ self.file = file self.line_nb = line_nb self.with_wrapper = with_wrapper self.with_env_var = with_env_var - self.standard = standard def __str__(self): message = "" - if not self.with_wrapper or not self.standard: - message += "Please use the dedicated `aws_ssm_get_wrapper.(sh|ps1)`." + if not self.with_wrapper: + message += "Please use the dedicated `fetch_secret.(sh|ps1)`." if not self.with_env_var: message += " Save your parameter name as environment variable in .gitlab-ci.yml file." return f"{self.file}:{self.line_nb + 1}. {message}" @@ -383,29 +381,24 @@ def __repr__(self): def list_get_parameter_calls(file): - ssm_get = re.compile(r"^.+ssm.get.+$") aws_ssm_call = re.compile(r"^.+ssm get-parameter.+--name +(?P[^ ]+).*$") - # remove the 'a' of 'aws' because '\a' is badly interpreted for windows paths - ssm_wrapper_call = re.compile(r"^.+ws_ssm_get_wrapper.(sh|ps1)[\"]? +(?P[^ )]+).*$") + # remove the first letter of the script name because '\f' is badly interpreted for windows paths + wrapper_call = re.compile(r"^.+etch_secret.(sh|ps1)[\"]? +(?P[^ )]+).*$") calls = [] with open(file) as f: try: for nb, line in enumerate(f): - is_ssm_get = ssm_get.match(line.strip()) - if is_ssm_get: - m = aws_ssm_call.match(line.strip()) - if m: - # Remove possible quotes - param = m["param"].replace('"', '').replace("'", "") - calls.append( - SSMParameterCall(file, nb, with_env_var=(param.startswith("$") or "os.environ" in param)) - ) - m = ssm_wrapper_call.match(line.strip()) - param = m["param"].replace('"', '').replace("'", "") if m else None - if m and not (param.startswith("$") or "os.environ" in param): - calls.append(SSMParameterCall(file, nb, with_wrapper=True)) - if not m: - calls.append(SSMParameterCall(file, nb, standard=False)) + m = aws_ssm_call.match(line.strip()) + if m: + # Remove possible quotes + param = m["param"].replace('"', '').replace("'", "") + calls.append( + SSMParameterCall(file, nb, with_env_var=(param.startswith("$") or "os.environ" in param)) + ) + m = wrapper_call.match(line.strip()) + param = m["param"].replace('"', '').replace("'", "") if m else None + if m and not (param.startswith("$") or "os.environ" in param): + calls.append(SSMParameterCall(file, nb, with_wrapper=True)) except UnicodeDecodeError: pass return calls diff --git a/tasks/pipeline.py b/tasks/pipeline.py index 67300a91d3507..1d300a7f38ee2 100644 --- a/tasks/pipeline.py +++ b/tasks/pipeline.py @@ -529,7 +529,7 @@ def changelog(ctx, new_commit_sha): else: parent_dir = os.getcwd() old_commit_sha = ctx.run( - f"{parent_dir}/tools/ci/aws_ssm_get_wrapper.sh {os.environ['CHANGELOG_COMMIT_SHA']}", + f"{parent_dir}/tools/ci/fetch_secret.sh {os.environ['CHANGELOG_COMMIT_SHA']}", hide=True, ).stdout.strip() if not new_commit_sha: diff --git a/tasks/unit_tests/linter_tests.py b/tasks/unit_tests/linter_tests.py index 5af0e1ffdd64f..b6a2cd3a5f283 100644 --- a/tasks/unit_tests/linter_tests.py +++ b/tasks/unit_tests/linter_tests.py @@ -41,7 +41,7 @@ def test_without_wrapper_with_env(self): def test_with_wrapper_no_env(self): with open(self.test_file, "w") as f: f.write( - "export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh test.datadog-agent.datadog_api_key_org2)" + "export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh test.datadog-agent.datadog_api_key_org2)" ) matched = linter.list_get_parameter_calls(self.test_file)[0] self.assertTrue(matched.with_wrapper) @@ -49,25 +49,24 @@ def test_with_wrapper_no_env(self): def test_with_wrapper_with_env(self): with open(self.test_file, "w") as f: - f.write("export DD_APP_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $APP_KEY_ORG2)") + f.write("export DD_APP_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $APP_KEY_ORG2)") matched = linter.list_get_parameter_calls(self.test_file) self.assertListEqual([], matched) def test_multi_match_windows(self): with open(self.test_file, "w") as f: f.write( - 'DD_API_KEY=$(& "$CI_PROJECT_DIR\tools \\ci\aws_ssm_get_wrapper.ps1" test.datadog-agent.datadog_api_key_org2 $tmpfile)\n' - 'DD_API_KEY=$(& "$CI_PROJECT_DIR\tools \\ci\aws_ssm_get wrapper.ps1" "$Env:MISSING_UNDERSCORE" $tmpfile)\n' - '`DD_APP_KEY=$(& "$CI_PROJECT_DIR\tools\\ci\aws_ssm_get_wrapper.ps1" "bad.name" "$tmpfile")\n' - 'DD_APP=$(& "$CI_PROJECT_DIR\tools\\ci\aws_ssm_get_wrapper.ps1" "$Env:TEST" $tmpfile)\n' + 'DD_API_KEY=$(& "$CI_PROJECT_DIR\tools \\ci\fetch_secret.ps1" test.datadog-agent.datadog_api_key_org2 $tmpfile)\n' + 'DD_API_KEY=$(& "$CI_PROJECT_DIR\tools \\ci\fetch secret.ps1" "$Env:MISSING_UNDERSCORE" $tmpfile)\n' + '`DD_APP_KEY=$(& "$CI_PROJECT_DIR\tools\\ci\fetch_secret.ps1" "bad.name" "$tmpfile")\n' + 'DD_APP=$(& "$CI_PROJECT_DIR\tools\\ci\fetch_secret.ps1" "$Env:TEST" $tmpfile)\n' ) matched = linter.list_get_parameter_calls(self.test_file) - self.assertEqual(3, len(matched)) + self.assertEqual(2, len(matched)) self.assertTrue(matched[0].with_wrapper) self.assertFalse(matched[0].with_env_var) - self.assertFalse(matched[1].standard) - self.assertTrue(matched[2].with_wrapper) - self.assertFalse(matched[2].with_env_var) + self.assertTrue(matched[1].with_wrapper) + self.assertFalse(matched[1].with_env_var) class TestGitlabChangePaths(unittest.TestCase): diff --git a/tasks/winbuildscripts/unittests.ps1 b/tasks/winbuildscripts/unittests.ps1 index c0faecf550fa8..ad28ec0540ad6 100644 --- a/tasks/winbuildscripts/unittests.ps1 +++ b/tasks/winbuildscripts/unittests.ps1 @@ -63,7 +63,7 @@ $ErrorActionPreference = "Continue" $tmpfile = [System.IO.Path]::GetTempFileName() # 1. Upload coverage reports to Codecov -& "$UT_BUILD_ROOT\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:CODECOV_TOKEN" "$tmpfile" +& "$UT_BUILD_ROOT\tools\ci\fetch_secret.ps1" "$Env:CODECOV_TOKEN" "$tmpfile" If ($LASTEXITCODE -ne "0") { exit $LASTEXITCODE } @@ -75,12 +75,12 @@ $Env:CODECOV_TOKEN=$(cat "$tmpfile") Get-ChildItem -Path "$UT_BUILD_ROOT" -Filter "junit-out-*.xml" -Recurse | ForEach-Object { Copy-Item -Path $_.FullName -Destination C:\mnt } -& "$UT_BUILD_ROOT\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:API_KEY_ORG2" "$tmpfile" +& "$UT_BUILD_ROOT\tools\ci\fetch_secret.ps1" "$Env:API_KEY_ORG2" "$tmpfile" If ($LASTEXITCODE -ne "0") { exit $LASTEXITCODE } $Env:DATADOG_API_KEY=$(cat "$tmpfile") -& "$UT_BUILD_ROOT\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:GITLAB_TOKEN" "$tmpfile" +& "$UT_BUILD_ROOT\tools\ci\fetch_secret.ps1" "$Env:GITLAB_TOKEN" "$tmpfile" If ($LASTEXITCODE -ne "0") { exit $LASTEXITCODE } diff --git a/test/kitchen/tasks/clean.sh b/test/kitchen/tasks/clean.sh index f407d31cf2a1d..c351060cc08ca 100755 --- a/test/kitchen/tasks/clean.sh +++ b/test/kitchen/tasks/clean.sh @@ -8,19 +8,19 @@ set -euo pipefail # These should not be printed out if [ -z ${AZURE_CLIENT_ID+x} ]; then - AZURE_CLIENT_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_ID) + AZURE_CLIENT_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_CLIENT_ID) export AZURE_CLIENT_ID fi if [ -z ${AZURE_CLIENT_SECRET+x} ]; then - AZURE_CLIENT_SECRET=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_SECRET) + AZURE_CLIENT_SECRET=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_CLIENT_SECRET) export AZURE_CLIENT_SECRET fi if [ -z ${AZURE_TENANT_ID+x} ]; then - AZURE_TENANT_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_TENANT_ID) + AZURE_TENANT_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_TENANT_ID) export AZURE_TENANT_ID fi if [ -z ${AZURE_SUBSCRIPTION_ID+x} ]; then - AZURE_SUBSCRIPTION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_SUBSCRIPTION_ID) + AZURE_SUBSCRIPTION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_SUBSCRIPTION_ID) export AZURE_SUBSCRIPTION_ID fi if [ -z ${DD_PIPELINE_ID+x} ]; then diff --git a/test/kitchen/tasks/run-test-kitchen.sh b/test/kitchen/tasks/run-test-kitchen.sh index a1b15fe997730..6e51d4013da71 100755 --- a/test/kitchen/tasks/run-test-kitchen.sh +++ b/test/kitchen/tasks/run-test-kitchen.sh @@ -54,25 +54,25 @@ if [ "$KITCHEN_PROVIDER" == "azure" ]; then # These should not be printed out set +x if [ -z ${AZURE_CLIENT_ID+x} ]; then - AZURE_CLIENT_ID=$($PARENT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_ID) + AZURE_CLIENT_ID=$($PARENT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_CLIENT_ID) # make sure whitespace is removed AZURE_CLIENT_ID="$(echo -e "${AZURE_CLIENT_ID}" | tr -d '[:space:]')" export AZURE_CLIENT_ID fi if [ -z ${AZURE_CLIENT_SECRET+x} ]; then - AZURE_CLIENT_SECRET=$($PARENT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_SECRET) + AZURE_CLIENT_SECRET=$($PARENT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_CLIENT_SECRET) # make sure whitespace is removed AZURE_CLIENT_SECRET="$(echo -e "${AZURE_CLIENT_SECRET}" | tr -d '[:space:]')" export AZURE_CLIENT_SECRET fi if [ -z ${AZURE_TENANT_ID+x} ]; then - AZURE_TENANT_ID=$($PARENT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_TENANT_ID) + AZURE_TENANT_ID=$($PARENT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_TENANT_ID) # make sure whitespace is removed AZURE_TENANT_ID="$(echo -e "${AZURE_TENANT_ID}" | tr -d '[:space:]')" export AZURE_TENANT_ID fi if [ -z ${AZURE_SUBSCRIPTION_ID+x} ]; then - AZURE_SUBSCRIPTION_ID=$($PARENT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_SUBSCRIPTION_ID) + AZURE_SUBSCRIPTION_ID=$($PARENT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_SUBSCRIPTION_ID) # make sure whitespace is removed AZURE_SUBSCRIPTION_ID="$(echo -e "${AZURE_SUBSCRIPTION_ID}" | tr -d '[:space:]')" export AZURE_SUBSCRIPTION_ID @@ -101,7 +101,7 @@ elif [ "$KITCHEN_PROVIDER" == "ec2" ]; then export KITCHEN_EC2_SSH_KEY_ID="datadog-agent-kitchen" export KITCHEN_EC2_SSH_KEY_PATH="$(pwd)/aws-ssh-key" touch $KITCHEN_EC2_SSH_KEY_PATH && chmod 600 $KITCHEN_EC2_SSH_KEY_PATH - $PARENT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_EC2_SSH_KEY > $KITCHEN_EC2_SSH_KEY_PATH + $PARENT_DIR/tools/ci/fetch_secret.sh $KITCHEN_EC2_SSH_KEY > $KITCHEN_EC2_SSH_KEY_PATH fi fi diff --git a/test/kitchen/tasks/show-strays.sh b/test/kitchen/tasks/show-strays.sh index d5ea5a3315ef6..996c95f6ac04b 100755 --- a/test/kitchen/tasks/show-strays.sh +++ b/test/kitchen/tasks/show-strays.sh @@ -10,19 +10,19 @@ set -euo pipefail # These should not be printed out set +x if [ -z ${AZURE_CLIENT_ID+x} ]; then - AZURE_CLIENT_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_ID) + AZURE_CLIENT_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_CLIENT_ID) export AZURE_CLIENT_ID fi if [ -z ${AZURE_CLIENT_SECRET+x} ]; then - AZURE_CLIENT_SECRET=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_SECRET) + AZURE_CLIENT_SECRET=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_CLIENT_SECRET) export AZURE_CLIENT_SECRET fi if [ -z ${AZURE_TENANT_ID+x} ]; then - AZURE_TENANT_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_TENANT_ID) + AZURE_TENANT_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_TENANT_ID) export AZURE_TENANT_ID fi if [ -z ${AZURE_SUBSCRIPTION_ID+x} ]; then - AZURE_SUBSCRIPTION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_SUBSCRIPTION_ID) + AZURE_SUBSCRIPTION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_SUBSCRIPTION_ID) export AZURE_SUBSCRIPTION_ID fi if [ -z ${DD_PIPELINE_ID+x} ]; then diff --git a/tools/ci/docker-login.ps1 b/tools/ci/docker-login.ps1 index c74dbf79900b4..e85da22733afa 100644 --- a/tools/ci/docker-login.ps1 +++ b/tools/ci/docker-login.ps1 @@ -7,12 +7,12 @@ If ($lastExitCode -ne "0") { } # DockerHub login $tmpfile = [System.IO.Path]::GetTempFileName() -& "C:\mnt\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:DOCKER_REGISTRY_LOGIN" "$tmpfile" +& "C:\mnt\tools\ci\fetch_secret.ps1" "$Env:DOCKER_REGISTRY_LOGIN" "$tmpfile" If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } $DOCKER_REGISTRY_LOGIN = $(cat "$tmpfile") -& "C:\mnt\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:DOCKER_REGISTRY_PWD" "$tmpfile" +& "C:\mnt\tools\ci\fetch_secret.ps1" "$Env:DOCKER_REGISTRY_PWD" "$tmpfile" If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } diff --git a/tools/ci/aws_ssm_get_wrapper.ps1 b/tools/ci/fetch_secret.ps1 similarity index 100% rename from tools/ci/aws_ssm_get_wrapper.ps1 rename to tools/ci/fetch_secret.ps1 diff --git a/tools/ci/aws_ssm_get_wrapper.sh b/tools/ci/fetch_secret.sh similarity index 100% rename from tools/ci/aws_ssm_get_wrapper.sh rename to tools/ci/fetch_secret.sh diff --git a/tools/ci/junit_upload.sh b/tools/ci/junit_upload.sh index 3b2ea12aeb6db..e4ab90ee9b70a 100755 --- a/tools/ci/junit_upload.sh +++ b/tools/ci/junit_upload.sh @@ -6,8 +6,8 @@ if [[ -n "$1" ]]; then junit_files="$1" fi -GITLAB_TOKEN="$("$CI_PROJECT_DIR"/tools/ci/aws_ssm_get_wrapper.sh "$GITLAB_READ_API_TOKEN")" -DATADOG_API_KEY="$("$CI_PROJECT_DIR"/tools/ci/aws_ssm_get_wrapper.sh "$API_KEY_ORG2")" +GITLAB_TOKEN="$("$CI_PROJECT_DIR"/tools/ci/fetch_secret.sh "$GITLAB_READ_API_TOKEN")" +DATADOG_API_KEY="$("$CI_PROJECT_DIR"/tools/ci/fetch_secret.sh "$API_KEY_ORG2")" export DATADOG_API_KEY export GITLAB_TOKEN error=0 From 6a0da081a64ed52689072b613a8fe103fe2d73ad Mon Sep 17 00:00:00 2001 From: Guy Arbitman Date: Tue, 10 Sep 2024 20:50:56 +0300 Subject: [PATCH 115/128] [USM] Add symbol extraction using pclntab (#29127) --- .../corechecks/servicediscovery/apm/detect.go | 12 +- .../servicediscovery/apm/detect_nix_test.go | 25 +- .../apm/testutil/instrumented/instrumented.go | 3 + pkg/network/go/bininspect/pclntab.go | 307 ++++++++++++++++++ pkg/network/go/bininspect/symbols.go | 16 + .../usm/testutil/generic_testutil_builder.go | 24 +- 6 files changed, 373 insertions(+), 14 deletions(-) create mode 100644 pkg/network/go/bininspect/pclntab.go diff --git a/pkg/collector/corechecks/servicediscovery/apm/detect.go b/pkg/collector/corechecks/servicediscovery/apm/detect.go index 8a5419676d0f3..289e6fc42ee96 100644 --- a/pkg/collector/corechecks/servicediscovery/apm/detect.go +++ b/pkg/collector/corechecks/servicediscovery/apm/detect.go @@ -118,12 +118,16 @@ func goDetector(pid int, _ []string, _ map[string]string, _ usm.DetectorContextM } defer elfFile.Close() - _, err = bininspect.GetAnySymbolWithPrefix(elfFile, ddTraceGoPrefix, ddTraceGoMaxLength) - if err != nil { - return None + if _, err = bininspect.GetAnySymbolWithPrefix(elfFile, ddTraceGoPrefix, ddTraceGoMaxLength); err == nil { + return Provided } - return Provided + // We failed to find symbols in the regular symbols section, now we can try the pclntab + if _, err = bininspect.GetAnySymbolWithPrefixPCLNTAB(elfFile, ddTraceGoPrefix, ddTraceGoMaxLength); err == nil { + return Provided + } + return None + } func pythonDetectorFromMapsReader(reader io.Reader) Instrumentation { diff --git a/pkg/collector/corechecks/servicediscovery/apm/detect_nix_test.go b/pkg/collector/corechecks/servicediscovery/apm/detect_nix_test.go index f2977cd14f998..b2e551d63fb5a 100644 --- a/pkg/collector/corechecks/servicediscovery/apm/detect_nix_test.go +++ b/pkg/collector/corechecks/servicediscovery/apm/detect_nix_test.go @@ -176,18 +176,29 @@ func Test_pythonDetector(t *testing.T) { func TestGoDetector(t *testing.T) { curDir, err := testutil.CurDir() require.NoError(t, err) - serverBin, err := usmtestutil.BuildGoBinaryWrapper(filepath.Join(curDir, "testutil"), "instrumented") + serverBinWithSymbols, err := usmtestutil.BuildGoBinaryWrapper(filepath.Join(curDir, "testutil"), "instrumented") + require.NoError(t, err) + serverBinWithoutSymbols, err := usmtestutil.BuildGoBinaryWrapperWithoutSymbols(filepath.Join(curDir, "testutil"), "instrumented") require.NoError(t, err) - cmd := exec.Command(serverBin) - require.NoError(t, cmd.Start()) + cmdWithSymbols := exec.Command(serverBinWithSymbols) + require.NoError(t, cmdWithSymbols.Start()) t.Cleanup(func() { - _ = cmd.Process.Kill() + _ = cmdWithSymbols.Process.Kill() + }) + + cmdWithoutSymbols := exec.Command(serverBinWithoutSymbols) + require.NoError(t, cmdWithoutSymbols.Start()) + t.Cleanup(func() { + _ = cmdWithoutSymbols.Process.Kill() }) result := goDetector(os.Getpid(), nil, nil, nil) - require.Equal(t, result, None) + require.Equal(t, None, result) + + result = goDetector(cmdWithSymbols.Process.Pid, nil, nil, nil) + require.Equal(t, Provided, result) - result = goDetector(cmd.Process.Pid, nil, nil, nil) - require.Equal(t, result, Provided) + result = goDetector(cmdWithoutSymbols.Process.Pid, nil, nil, nil) + require.Equal(t, Provided, result) } diff --git a/pkg/collector/corechecks/servicediscovery/apm/testutil/instrumented/instrumented.go b/pkg/collector/corechecks/servicediscovery/apm/testutil/instrumented/instrumented.go index ab82025a4488a..74b92599e66cb 100644 --- a/pkg/collector/corechecks/servicediscovery/apm/testutil/instrumented/instrumented.go +++ b/pkg/collector/corechecks/servicediscovery/apm/testutil/instrumented/instrumented.go @@ -8,9 +8,12 @@ package main import ( + "time" + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" ) func main() { tracer.Start() + time.Sleep(time.Second * 20) } diff --git a/pkg/network/go/bininspect/pclntab.go b/pkg/network/go/bininspect/pclntab.go new file mode 100644 index 0000000000000..be68da2ad03bb --- /dev/null +++ b/pkg/network/go/bininspect/pclntab.go @@ -0,0 +1,307 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build linux + +package bininspect + +import ( + "bytes" + "debug/elf" + "encoding/binary" + "errors" + "fmt" + "io" +) + +const ( + pclntabSectionName = ".gopclntab" + + go116magic = 0xfffffffa + go118magic = 0xfffffff0 + go120magic = 0xfffffff1 +) + +// version of the pclntab +type version int + +const ( + verUnknown version = iota + ver11 + ver12 + ver116 + ver118 + ver120 +) + +var ( + // ErrMissingPCLNTABSection is returned when the pclntab section is missing. + ErrMissingPCLNTABSection = errors.New("failed to find pclntab section") + + // ErrUnsupportedPCLNTABVersion is returned when the pclntab version is not supported. + ErrUnsupportedPCLNTABVersion = errors.New("unsupported pclntab version") + + // ErrFailedToFindAllSymbols is returned when not all symbols were found. + ErrFailedToFindAllSymbols = errors.New("failed to find all symbols") +) + +// sectionAccess is a wrapper around elf.Section to provide ReadAt functionality. +// This is used to lazy read from the pclntab section, as the pclntab is large and we don't want to read it all at once, +// or store it in memory. +type sectionAccess struct { + section *elf.Section + baseOffset int64 +} + +// ReadAt reads len(p) bytes from the section starting at the given offset. +func (s *sectionAccess) ReadAt(outBuffer []byte, offset int64) (int, error) { + return s.section.ReadAt(outBuffer, s.baseOffset+offset) +} + +// pclntanSymbolParser is a parser for pclntab symbols. +// Similar to LineTable struct in https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L43 +type pclntanSymbolParser struct { + // section is the pclntab section. + section *elf.Section + // symbolFilter is the filter for the symbols. + symbolFilter symbolFilter + + // byteOrderParser is the binary.ByteOrder for the pclntab. + byteOrderParser binary.ByteOrder + // cachedVersion is the version of the pclntab. + cachedVersion version + // funcNameTable is the sectionAccess for the function name table. + funcNameTable sectionAccess + // funcData is the sectionAccess for the function data. + funcData sectionAccess + // funcTable is the sectionAccess for the function table. + funcTable sectionAccess + // funcTableSize is the size of the function table. + funcTableSize uint32 + // ptrSize is the size of a pointer in the architecture of the binary. + ptrSize uint32 + // ptrBufferSizeHelper is a buffer for reading pointers of the size ptrSize. + ptrBufferSizeHelper []byte + // funcNameHelper is a buffer for reading function names. Of the maximum size of the symbol names. + funcNameHelper []byte + // funcTableFieldSize is the size of a field in the function table. + funcTableFieldSize int + // funcTableBuffer is a buffer for reading fields in the function table. + funcTableBuffer []byte +} + +// GetPCLNTABSymbolParser returns the matching symbols from the pclntab section. +func GetPCLNTABSymbolParser(f *elf.File, symbolFilter symbolFilter) (map[string]*elf.Symbol, error) { + section := f.Section(pclntabSectionName) + if section == nil { + return nil, ErrMissingPCLNTABSection + } + + parser := &pclntanSymbolParser{section: section, symbolFilter: symbolFilter} + + if err := parser.parsePclntab(); err != nil { + return nil, err + } + // Late initialization, to prevent allocation if the binary is not supported. + _, maxSymbolsSize := symbolFilter.getMinMaxLength() + parser.funcNameHelper = make([]byte, maxSymbolsSize) + parser.funcTableFieldSize = getFuncTableFieldSize(parser.cachedVersion, int(parser.ptrSize)) + // Allocate the buffer for reading the function table. + // TODO: Do we need 2*funcTableFieldSize? + parser.funcTableBuffer = make([]byte, 2*parser.funcTableFieldSize) + return parser.getSymbols() +} + +// parsePclntab parses the pclntab, setting the version and verifying the header. +// Based on parsePclnTab in https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L194 +func (p *pclntanSymbolParser) parsePclntab() error { + p.cachedVersion = ver11 + + pclntabHeader := make([]byte, 8) + if n, err := p.section.ReadAt(pclntabHeader, 0); err != nil || n != len(pclntabHeader) { + return fmt.Errorf("failed to read pclntab header: %w", err) + } + // Matching the condition https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L216-L220 + // Check header: 4-byte magic, two zeros, pc quantum, pointer size. + if p.section.Size < 16 || pclntabHeader[4] != 0 || pclntabHeader[5] != 0 || + (pclntabHeader[6] != 1 && pclntabHeader[6] != 2 && pclntabHeader[6] != 4) || // pc quantum + (pclntabHeader[7] != 4 && pclntabHeader[7] != 8) { // pointer size + // TODO: add explicit error message + return errors.New("invalid pclntab header") + } + + leMagic := binary.LittleEndian.Uint32(pclntabHeader) + beMagic := binary.BigEndian.Uint32(pclntabHeader) + switch { + case leMagic == go116magic: + p.byteOrderParser, p.cachedVersion = binary.LittleEndian, ver116 + case beMagic == go116magic: + p.byteOrderParser, p.cachedVersion = binary.BigEndian, ver116 + case leMagic == go118magic: + p.byteOrderParser, p.cachedVersion = binary.LittleEndian, ver118 + case beMagic == go118magic: + p.byteOrderParser, p.cachedVersion = binary.BigEndian, ver118 + case leMagic == go120magic: + p.byteOrderParser, p.cachedVersion = binary.LittleEndian, ver120 + case beMagic == go120magic: + p.byteOrderParser, p.cachedVersion = binary.BigEndian, ver120 + default: + return ErrUnsupportedPCLNTABVersion + } + + p.ptrSize = uint32(pclntabHeader[7]) + p.ptrBufferSizeHelper = make([]byte, p.ptrSize) + + // offset is based on https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L252 + offset := func(word uint32) uint64 { + off := 8 + word*p.ptrSize + if n, err := p.section.ReadAt(p.ptrBufferSizeHelper, int64(off)); err != nil || n != int(p.ptrSize) { + return 0 + } + return p.uintptr(p.ptrBufferSizeHelper) + } + + switch p.cachedVersion { + case ver118, ver120: + p.funcTableSize = uint32(offset(0)) + p.funcNameTable = sectionAccess{ + section: p.section, + baseOffset: int64(offset(3)), + } + p.funcData = sectionAccess{ + section: p.section, + baseOffset: int64(offset(7)), + } + p.funcTable = sectionAccess{ + section: p.section, + baseOffset: int64(offset(7)), + } + case ver116: + p.funcTableSize = uint32(offset(0)) + p.funcNameTable = sectionAccess{ + section: p.section, + baseOffset: int64(offset(2)), + } + p.funcData = sectionAccess{ + section: p.section, + baseOffset: int64(offset(6)), + } + p.funcTable = sectionAccess{ + section: p.section, + baseOffset: int64(offset(6)), + } + } + + return nil +} + +// uintptr returns the pointer-sized value encoded at b. +// The pointer size is dictated by the table being read. +// based on https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L186. +func (p *pclntanSymbolParser) uintptr(b []byte) uint64 { + if p.ptrSize == 4 { + return uint64(p.byteOrderParser.Uint32(b)) + } + return p.byteOrderParser.Uint64(b) +} + +// getFuncTableFieldSize returns the size of a field in the function table. +// based on https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L388-L392 +func getFuncTableFieldSize(version version, ptrSize int) int { + if version >= ver118 { + return 4 + } + return ptrSize +} + +// getSymbols returns the symbols from the pclntab section that match the symbol filter. +// based on https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L300-L329 +func (p *pclntanSymbolParser) getSymbols() (map[string]*elf.Symbol, error) { + numWanted := p.symbolFilter.getNumWanted() + symbols := make(map[string]*elf.Symbol, numWanted) + data := sectionAccess{section: p.section} + for currentIdx := uint32(0); currentIdx < p.funcTableSize; currentIdx++ { + // based on https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L315 + _, err := p.funcTable.ReadAt(p.funcTableBuffer, int64((2*currentIdx+1)*uint32(p.funcTableFieldSize))) + if err != nil { + continue + } + + // based on https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L321 + data.baseOffset = int64(p.uint(p.funcTableBuffer)) + p.funcData.baseOffset + funcName := p.funcName(data) + + if funcName == "" { + continue + } + symbols[funcName] = &elf.Symbol{ + Name: funcName, + } + if len(symbols) == numWanted { + break + } + } + if len(symbols) < numWanted { + return symbols, ErrFailedToFindAllSymbols + } + return symbols, nil +} + +// funcName returns the name of the function found at off. +func (p *pclntanSymbolParser) funcName(data sectionAccess) string { + off := funcNameOffset(p.ptrSize, p.cachedVersion, p.byteOrderParser, data, p.ptrBufferSizeHelper) + n, err := p.funcNameTable.ReadAt(p.funcNameHelper, int64(off)) + if n == 0 || (err != nil && !errors.Is(err, io.EOF)) { + return "" + } + idxToNull := bytes.IndexByte(p.funcNameHelper, 0) + if idxToNull == -1 || idxToNull == 0 || idxToNull >= n { + return "" + } + + if p.symbolFilter.want(string(p.funcNameHelper[:idxToNull])) { + return string(p.funcNameHelper[:idxToNull]) + } + return "" +} + +// uint returns the uint stored at b. +// based on https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L427-L432 +func (p *pclntanSymbolParser) uint(b []byte) uint64 { + if p.funcTableFieldSize == 4 { + return uint64(p.byteOrderParser.Uint32(b)) + } + return p.byteOrderParser.Uint64(b) +} + +// funcNameOffset returns the offset of the function name. +// based on https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L472-L485 +// We can only for the usage of this function for getting the name of the function (https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L463) +// So we explicitly set `n = 1` in the original implementation. +func funcNameOffset(ptrSize uint32, version version, binary binary.ByteOrder, data sectionAccess, helper []byte) uint32 { + // In Go 1.18, the struct _func has changed. The original (prior to 1.18) was: + // type _func struct { + // entry uintptr + // nameoff int32 + // ... + // } + // In Go 1.18, the struct is: + // type _func struct { + // entryoff uint32 + // nameoff int32 + // ... + // } + // Thus, to read the nameoff, for Go 1.18 and later, we need to skip the entryoff field (4 bytes). + // for Go 1.17 and earlier, We need to skip the sizeof(uintptr) which is ptrSize. + off := ptrSize + if version >= ver118 { + off = 4 + } + // We read only 4 bytes, as the nameoff is an int32. + if n, err := data.ReadAt(helper[:4], int64(off)); err != nil || n != 4 { + return 0 + } + return binary.Uint32(helper[:4]) +} diff --git a/pkg/network/go/bininspect/symbols.go b/pkg/network/go/bininspect/symbols.go index 90dd50c6932d1..910bc37d3ff30 100644 --- a/pkg/network/go/bininspect/symbols.go +++ b/pkg/network/go/bininspect/symbols.go @@ -281,3 +281,19 @@ func GetAnySymbolWithPrefix(elfFile *elf.File, prefix string, maxLength int) (*e // Shouldn't happen return nil, errors.New("empty symbols map") } + +// GetAnySymbolWithPrefixPCLNTAB returns any one symbol with the given prefix and the +// specified maximum length from the pclntab section in ELF file. +func GetAnySymbolWithPrefixPCLNTAB(elfFile *elf.File, prefix string, maxLength int) (*elf.Symbol, error) { + symbols, err := GetPCLNTABSymbolParser(elfFile, newPrefixSymbolFilter(prefix, maxLength)) + if err != nil { + return nil, err + } + + for key := range symbols { + return symbols[key], nil + } + + // Shouldn't happen + return nil, errors.New("empty symbols map") +} diff --git a/pkg/network/usm/testutil/generic_testutil_builder.go b/pkg/network/usm/testutil/generic_testutil_builder.go index af5cacfcfcdb8..623aeccb284e5 100644 --- a/pkg/network/usm/testutil/generic_testutil_builder.go +++ b/pkg/network/usm/testutil/generic_testutil_builder.go @@ -13,10 +13,14 @@ import ( "path" ) -// BuildGoBinaryWrapper builds a Go binary and returns the path to it. +const ( + baseLDFlags = "-ldflags=-extldflags '-static'" +) + +// buildGoBinary builds a Go binary and returns the path to it. // If the binary is already built (meanly in the CI), it returns the // path to the binary. -func BuildGoBinaryWrapper(curDir, binaryDir string) (string, error) { +func buildGoBinary(curDir, binaryDir, buildFlags string) (string, error) { serverSrcDir := path.Join(curDir, binaryDir) cachedServerBinaryPath := path.Join(serverSrcDir, binaryDir) @@ -26,7 +30,7 @@ func BuildGoBinaryWrapper(curDir, binaryDir string) (string, error) { return cachedServerBinaryPath, nil } - c := exec.Command("go", "build", "-buildvcs=false", "-a", "-tags=test", "-ldflags=-extldflags '-static'", "-o", cachedServerBinaryPath, serverSrcDir) + c := exec.Command("go", "build", "-buildvcs=false", "-a", "-tags=test,netgo", buildFlags, "-o", cachedServerBinaryPath, serverSrcDir) out, err := c.CombinedOutput() if err != nil { return "", fmt.Errorf("could not build unix transparent proxy server test binary: %s\noutput: %s", err, string(out)) @@ -34,3 +38,17 @@ func BuildGoBinaryWrapper(curDir, binaryDir string) (string, error) { return cachedServerBinaryPath, nil } + +// BuildGoBinaryWrapper builds a Go binary and returns the path to it. +// If the binary is already built (meanly in the CI), it returns the +// path to the binary. +func BuildGoBinaryWrapper(curDir, binaryDir string) (string, error) { + return buildGoBinary(curDir, binaryDir, baseLDFlags) +} + +// BuildGoBinaryWrapperWithoutSymbols builds a Go binary without symbols and returns the path to it. +// If the binary is already built (meanly in the CI), it returns the +// path to the binary. +func BuildGoBinaryWrapperWithoutSymbols(curDir, binaryDir string) (string, error) { + return buildGoBinary(curDir, binaryDir, baseLDFlags+" -s -w") +} From fd5486a8c4f445f40470d57f193db90f850bb93e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Guillermo=20Juli=C3=A1n?= Date: Tue, 10 Sep 2024 20:33:10 +0200 Subject: [PATCH 116/128] [EBPF] Replace generated pointers to structs with uintptr (#29221) --- pkg/ebpf/cgo/genpost.go | 4 ++++ pkg/network/ebpf/kprobe_types_linux.go | 8 ++++---- tasks/system_probe.py | 2 +- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/pkg/ebpf/cgo/genpost.go b/pkg/ebpf/cgo/genpost.go index 62863734b35f2..512d0542c62d9 100644 --- a/pkg/ebpf/cgo/genpost.go +++ b/pkg/ebpf/cgo/genpost.go @@ -44,6 +44,10 @@ func main() { convertInt8ArrayToByteArrayRegex := regexp.MustCompile(`(` + strings.Join(int8variableNames, "|") + `)(\s+)\[(\d+)\]u?int8`) b = convertInt8ArrayToByteArrayRegex.ReplaceAll(b, []byte("$1$2[$3]byte")) + // Convert generated pointers to CGo structs to uintptr + convertPointerToUint64Regex := regexp.MustCompile(`\*_Ctype_struct_(\w+)`) + b = convertPointerToUint64Regex.ReplaceAll(b, []byte("uintptr")) + b, err = format.Source(b) if err != nil { log.Fatal(err) diff --git a/pkg/network/ebpf/kprobe_types_linux.go b/pkg/network/ebpf/kprobe_types_linux.go index 63dd4b4c6e393..7916d343dcd10 100644 --- a/pkg/network/ebpf/kprobe_types_linux.go +++ b/pkg/network/ebpf/kprobe_types_linux.go @@ -77,12 +77,12 @@ type PIDFD struct { Fd uint32 } type UDPRecvSock struct { - Sk *_Ctype_struct_sock - Msg *_Ctype_struct_msghdr + Sk uintptr + Msg uintptr } type BindSyscallArgs struct { - Addr *_Ctype_struct_sockaddr - Sk *_Ctype_struct_sock + Addr uintptr + Sk uintptr } type ProtocolStack struct { Api uint8 diff --git a/tasks/system_probe.py b/tasks/system_probe.py index ddb1ee61bb2a9..a0e218e222fbd 100644 --- a/tasks/system_probe.py +++ b/tasks/system_probe.py @@ -516,7 +516,7 @@ def ninja_cgo_type_files(nw: NinjaWriter): inputs=[f], outputs=[os.path.join(in_dir, out_file)], rule="godefs", - implicit=headers, + implicit=headers + [script_path], variables={ "in_dir": in_dir, "in_file": in_file, From d03ac92a5aa0b628059c1ee93ec8f21f87b13ee0 Mon Sep 17 00:00:00 2001 From: Adel Haj Hassan <41540817+adel121@users.noreply.github.com> Date: Tue, 10 Sep 2024 22:20:57 +0200 Subject: [PATCH 117/128] use sync.Once to avoid recalculating tagger.tagstore_use_composite_entity_id on each call (#29211) --- pkg/util/tagger/tagger.go | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/pkg/util/tagger/tagger.go b/pkg/util/tagger/tagger.go index 018c9052b8063..b8ae45a2f97c5 100644 --- a/pkg/util/tagger/tagger.go +++ b/pkg/util/tagger/tagger.go @@ -6,11 +6,21 @@ // Package tagger provides function to check if the tagger should use composite entity id and object store package tagger -import pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" +import ( + "sync" + + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" +) + +var useCompositeStore bool +var doOnce sync.Once // ShouldUseCompositeStore indicates whether the tagger should use the default or composite implementation // of entity ID and object store. // TODO: remove this when we switch over fully to the composite implementation func ShouldUseCompositeStore() bool { - return pkgconfigsetup.Datadog().GetBool("tagger.tagstore_use_composite_entity_id") + doOnce.Do(func() { + useCompositeStore = pkgconfigsetup.Datadog().GetBool("tagger.tagstore_use_composite_entity_id") + }) + return useCompositeStore } From f2ee8d878d52ed237c00d4be9d4649bbbf50b1e0 Mon Sep 17 00:00:00 2001 From: Keisuke Umegaki <41987730+keisku@users.noreply.github.com> Date: Wed, 11 Sep 2024 06:43:11 +0900 Subject: [PATCH 118/128] APM: Use `compressor.Encoding()` for error log messages (#29207) --- pkg/trace/writer/trace.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/trace/writer/trace.go b/pkg/trace/writer/trace.go index 2a7b5ab6b9314..d2879dc7f0c2b 100644 --- a/pkg/trace/writer/trace.go +++ b/pkg/trace/writer/trace.go @@ -294,14 +294,14 @@ func (w *TraceWriter) serialize(pl *pb.AgentPayload) { if err != nil { // it will never happen, unless an invalid compression is chosen; // we know gzip.BestSpeed is valid. - log.Errorf("Failed to initialize gzip writer. No traces can be sent: %v", err) + log.Errorf("Failed to initialize %s writer. No traces can be sent: %v", w.compressor.Encoding(), err) return } if _, err := writer.Write(b); err != nil { - log.Errorf("Error gzipping trace payload: %v", err) + log.Errorf("Error %s trace payload: %v", w.compressor.Encoding(), err) } if err := writer.Close(); err != nil { - log.Errorf("Error closing gzip stream when writing trace payload: %v", err) + log.Errorf("Error closing %s stream when writing trace payload: %v", w.compressor.Encoding(), err) } sendPayloads(w.senders, p, w.syncMode) From 61bc84ece0899eb7c071000891ee9c71fe7f4503 Mon Sep 17 00:00:00 2001 From: Baptiste Foy Date: Wed, 11 Sep 2024 09:43:11 +0200 Subject: [PATCH 119/128] fix(fleet): Properly unset log level after a flare (#29152) Co-authored-by: Dario Meloni --- .../rcclient/rcclientimpl/rcclient.go | 2 +- pkg/config/model/viper.go | 19 +++++++++++++++++-- pkg/config/model/viper_test.go | 17 +++++++++++++++++ 3 files changed, 35 insertions(+), 3 deletions(-) diff --git a/comp/remote-config/rcclient/rcclientimpl/rcclient.go b/comp/remote-config/rcclient/rcclientimpl/rcclient.go index 693c7d42e1971..bbe779df219cd 100644 --- a/comp/remote-config/rcclient/rcclientimpl/rcclient.go +++ b/comp/remote-config/rcclient/rcclientimpl/rcclient.go @@ -269,8 +269,8 @@ func (rc rcClient) agentConfigUpdateCallback(updates map[string]state.RawConfig, // - we want to change (once again) the log level through RC // - we want to fall back to the log level we had saved as fallback (in that case mergedConfig.LogLevel == "") if len(mergedConfig.LogLevel) == 0 { - pkglog.Infof("Removing remote-config log level override, falling back to '%s'", config.Datadog().Get("log_level")) config.Datadog().UnsetForSource("log_level", model.SourceRC) + pkglog.Infof("Removing remote-config log level override, falling back to '%s'", config.Datadog().Get("log_level")) } else { newLevel := mergedConfig.LogLevel pkglog.Infof("Changing log level to '%s' through remote config", newLevel) diff --git a/pkg/config/model/viper.go b/pkg/config/model/viper.go index f4266c10de8f2..8b4477cdfe55f 100644 --- a/pkg/config/model/viper.go +++ b/pkg/config/model/viper.go @@ -161,12 +161,25 @@ func (c *safeConfig) SetDefault(key string, value interface{}) { c.Viper.SetDefault(key, value) } -// UnsetForSource wraps Viper for concurrent access +// UnsetForSource unsets a config entry for a given source func (c *safeConfig) UnsetForSource(key string, source Source) { + // modify the config then release the lock to avoid deadlocks while notifying + var receivers []NotificationReceiver c.Lock() - defer c.Unlock() + previousValue := c.Viper.Get(key) c.configSources[source].Set(key, nil) c.mergeViperInstances(key) + newValue := c.Viper.Get(key) // Can't use nil, so we get the newly computed value + if previousValue != nil { + // if the value has not changed, do not duplicate the slice so that no callback is called + receivers = slices.Clone(c.notificationReceivers) + } + c.Unlock() + + // notifying all receiver about the updated setting + for _, receiver := range receivers { + receiver(key, previousValue, newValue) + } } // mergeViperInstances is called after a change in an instance of Viper @@ -640,6 +653,8 @@ func (c *safeConfig) MergeConfig(in io.Reader) error { // MergeFleetPolicy merges the configuration from the reader given with an existing config // it overrides the existing values with the new ones in the FleetPolicies source, and updates the main config // according to sources priority order. +// +// Note: this should only be called at startup, as notifiers won't receive a notification when this loads func (c *safeConfig) MergeFleetPolicy(configPath string) error { c.Lock() defer c.Unlock() diff --git a/pkg/config/model/viper_test.go b/pkg/config/model/viper_test.go index 83bec5ee7a66b..22209d122c8fd 100644 --- a/pkg/config/model/viper_test.go +++ b/pkg/config/model/viper_test.go @@ -449,3 +449,20 @@ func TestParseEnvAsSliceMapString(t *testing.T) { t.Setenv("DD_MAP", "__some_data__") assert.Equal(t, []map[string]string{{"a": "a", "b": "b", "c": "c"}}, config.Get("map")) } + +func TestListenersUnsetForSource(t *testing.T) { + config := NewConfig("test", "DD", strings.NewReplacer(".", "_")) + + // Create a listener that will keep track of the changes + logLevels := []string{} + config.OnUpdate(func(_ string, _, next any) { + nextString := next.(string) + logLevels = append(logLevels, nextString) + }) + + config.Set("log_level", "info", SourceFile) + config.Set("log_level", "debug", SourceRC) + config.UnsetForSource("log_level", SourceRC) + + assert.Equal(t, []string{"info", "debug", "info"}, logLevels) +} From f3313b8f4c1ffb22f4f12fcb78a047eff7eff23a Mon Sep 17 00:00:00 2001 From: Florent Clarret Date: Wed, 11 Sep 2024 08:05:37 +0000 Subject: [PATCH 120/128] Add a VM provisionner for GCP (#29214) --- test/new-e2e/examples/gcp_vm_test.go | 32 ++++ test/new-e2e/go.mod | 1 + test/new-e2e/go.sum | 2 + .../pkg/environments/gcp/host/linux/host.go | 124 ++++++++++++++ .../pkg/environments/gcp/host/linux/params.go | 152 ++++++++++++++++++ test/new-e2e/pkg/runner/local_profile.go | 1 + 6 files changed, 312 insertions(+) create mode 100644 test/new-e2e/examples/gcp_vm_test.go create mode 100644 test/new-e2e/pkg/environments/gcp/host/linux/host.go create mode 100644 test/new-e2e/pkg/environments/gcp/host/linux/params.go diff --git a/test/new-e2e/examples/gcp_vm_test.go b/test/new-e2e/examples/gcp_vm_test.go new file mode 100644 index 0000000000000..1f897a87bbfe3 --- /dev/null +++ b/test/new-e2e/examples/gcp_vm_test.go @@ -0,0 +1,32 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package examples + +import ( + gcphost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/gcp/host/linux" + "testing" + + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" +) + +type gcpVMSuite struct { + e2e.BaseSuite[environments.Host] +} + +// TestGCPVMSuite runs tests for the VM interface to ensure its implementation is correct. +func TestGCPVMSuite(t *testing.T) { + suiteParams := []e2e.SuiteOption{e2e.WithProvisioner(gcphost.ProvisionerNoAgentNoFakeIntake())} + e2e.Run(t, &gcpVMSuite{}, suiteParams...) +} + +func (v *gcpVMSuite) TestExecute() { + vm := v.Env().RemoteHost + + out, err := vm.Execute("whoami") + v.Require().NoError(err) + v.Require().NotEmpty(out) +} diff --git a/test/new-e2e/go.mod b/test/new-e2e/go.mod index b237c08f61928..529c6d10ec489 100644 --- a/test/new-e2e/go.mod +++ b/test/new-e2e/go.mod @@ -270,4 +270,5 @@ require ( github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.59.0 // indirect github.com/pulumi/pulumi-azure-native-sdk/v2 v2.60.0 // indirect github.com/pulumi/pulumi-gcp/sdk/v6 v6.67.1 // indirect + github.com/pulumi/pulumi-gcp/sdk/v7 v7.38.0 // indirect ) diff --git a/test/new-e2e/go.sum b/test/new-e2e/go.sum index 4103fed9d0cb4..10fe8e4f663e7 100644 --- a/test/new-e2e/go.sum +++ b/test/new-e2e/go.sum @@ -419,6 +419,8 @@ github.com/pulumi/pulumi-eks/sdk/v2 v2.7.8 h1:NeCKFxyOLpAaG4pJDk7+ewnCuV2IbXR7Pg github.com/pulumi/pulumi-eks/sdk/v2 v2.7.8/go.mod h1:ARGNnIZENIpDUVSX21JEQJKrESj/0u0r0iT61rpb86I= github.com/pulumi/pulumi-gcp/sdk/v6 v6.67.1 h1:PUH/sUbJmBmHjNFNthJ/dW2+riFuJV0FhrGAwuUuRIg= github.com/pulumi/pulumi-gcp/sdk/v6 v6.67.1/go.mod h1:OmZeji3dNMwB1qldAlaQfcfJPc2BaZyweVGH7Ej4SJg= +github.com/pulumi/pulumi-gcp/sdk/v7 v7.38.0 h1:21oSj+TKlKTzQcxN9Hik7iSNNHPUQXN4s3itOnahy/w= +github.com/pulumi/pulumi-gcp/sdk/v7 v7.38.0/go.mod h1:YaEZms1NgXFqGhObKVofcAeWXu2V+3t/BAXdHQZq7fU= github.com/pulumi/pulumi-kubernetes/sdk/v4 v4.17.1 h1:VDX+hu+qK3fbf2FodgG5kfh2h1bHK0FKirW1YqKWkRc= github.com/pulumi/pulumi-kubernetes/sdk/v4 v4.17.1/go.mod h1:e69ohZtUePLLYNLXYgiOWp0FvRGg6ya/3fsq3o00nN0= github.com/pulumi/pulumi-libvirt/sdk v0.4.7 h1:/BBnqqx/Gbg2vINvJxXIVb58THXzw2lSqFqxlRSXH9M= diff --git a/test/new-e2e/pkg/environments/gcp/host/linux/host.go b/test/new-e2e/pkg/environments/gcp/host/linux/host.go new file mode 100644 index 0000000000000..0e479d8a51bdf --- /dev/null +++ b/test/new-e2e/pkg/environments/gcp/host/linux/host.go @@ -0,0 +1,124 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package gcphost contains the definition of the GCP Host environment. +package gcphost + +import ( + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + "github.com/DataDog/test-infra-definitions/resources/gcp" + "github.com/DataDog/test-infra-definitions/scenarios/gcp/compute" + "github.com/DataDog/test-infra-definitions/scenarios/gcp/fakeintake" + + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" + + "github.com/DataDog/test-infra-definitions/components/datadog/agent" + "github.com/DataDog/test-infra-definitions/components/datadog/agentparams" + "github.com/DataDog/test-infra-definitions/components/datadog/updater" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +) + +const ( + provisionerBaseID = "gcp-vm-" + defaultVMName = "vm" +) + +// Provisioner creates a VM environment with an VM, a FakeIntake and a Host Agent configured to talk to each other. +// FakeIntake and Agent creation can be deactivated by using [WithoutFakeIntake] and [WithoutAgent] options. +func Provisioner(opts ...ProvisionerOption) e2e.TypedProvisioner[environments.Host] { + // We need to build params here to be able to use params.name in the provisioner name + params := GetProvisionerParams(opts...) + + provisioner := e2e.NewTypedPulumiProvisioner(provisionerBaseID+params.name, func(ctx *pulumi.Context, env *environments.Host) error { + // We ALWAYS need to make a deep copy of `params`, as the provisioner can be called multiple times. + // and it's easy to forget about it, leading to hard-to-debug issues. + params := GetProvisionerParams(opts...) + return Run(ctx, env, RunParams{ProvisionerParams: params}) + }, params.extraConfigParams) + + return provisioner +} + +// Run deploys an environment given a pulumi.Context +func Run(ctx *pulumi.Context, env *environments.Host, runParams RunParams) error { + var gcpEnv gcp.Environment + if runParams.Environment == nil { + var err error + gcpEnv, err = gcp.NewEnvironment(ctx) + if err != nil { + return err + } + } else { + gcpEnv = *runParams.Environment + } + params := runParams.ProvisionerParams + + host, err := compute.NewVM(gcpEnv, params.name, params.instanceOptions...) + if err != nil { + return err + } + err = host.Export(ctx, &env.RemoteHost.HostOutput) + if err != nil { + return err + } + + // Create FakeIntake if required + if params.fakeintakeOptions != nil { + fakeIntake, err := fakeintake.NewVMInstance(gcpEnv, params.fakeintakeOptions...) + if err != nil { + return err + } + err = fakeIntake.Export(ctx, &env.FakeIntake.FakeintakeOutput) + if err != nil { + return err + } + + // Normally if FakeIntake is enabled, Agent is enabled, but just in case + if params.agentOptions != nil { + // Prepend in case it's overridden by the user + newOpts := []agentparams.Option{agentparams.WithFakeintake(fakeIntake)} + params.agentOptions = append(newOpts, params.agentOptions...) + } + } else { + // Suite inits all fields by default, so we need to explicitly set it to nil + env.FakeIntake = nil + } + if !params.installUpdater { + // Suite inits all fields by default, so we need to explicitly set it to nil + env.Updater = nil + } + + // Create Agent if required + if params.installUpdater && params.agentOptions != nil { + updater, err := updater.NewHostUpdater(&gcpEnv, host, params.agentOptions...) + if err != nil { + return err + } + + err = updater.Export(ctx, &env.Updater.HostUpdaterOutput) + if err != nil { + return err + } + // todo: add agent once updater installs agent on bootstrap + env.Agent = nil + } else if params.agentOptions != nil { + agent, err := agent.NewHostAgent(&gcpEnv, host, params.agentOptions...) + if err != nil { + return err + } + + err = agent.Export(ctx, &env.Agent.HostAgentOutput) + if err != nil { + return err + } + + env.Agent.ClientOptions = params.agentClientOptions + } else { + // Suite inits all fields by default, so we need to explicitly set it to nil + env.Agent = nil + } + + return nil +} diff --git a/test/new-e2e/pkg/environments/gcp/host/linux/params.go b/test/new-e2e/pkg/environments/gcp/host/linux/params.go new file mode 100644 index 0000000000000..442fd28b889b0 --- /dev/null +++ b/test/new-e2e/pkg/environments/gcp/host/linux/params.go @@ -0,0 +1,152 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package gcphost + +import ( + "fmt" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclientparams" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/optional" + "github.com/DataDog/test-infra-definitions/components/datadog/agentparams" + "github.com/DataDog/test-infra-definitions/resources/gcp" + "github.com/DataDog/test-infra-definitions/scenarios/gcp/compute" + "github.com/DataDog/test-infra-definitions/scenarios/gcp/fakeintake" +) + +// ProvisionerParams is a set of parameters for the Provisioner. +type ProvisionerParams struct { + name string + + instanceOptions []compute.VMOption + agentOptions []agentparams.Option + agentClientOptions []agentclientparams.Option + fakeintakeOptions []fakeintake.Option + extraConfigParams runner.ConfigMap + installUpdater bool +} + +func newProvisionerParams() *ProvisionerParams { + // We use nil arrays to decide if we should create or not + return &ProvisionerParams{ + name: defaultVMName, + instanceOptions: []compute.VMOption{}, + agentOptions: []agentparams.Option{}, + agentClientOptions: []agentclientparams.Option{}, + fakeintakeOptions: []fakeintake.Option{}, + extraConfigParams: runner.ConfigMap{}, + } +} + +// GetProvisionerParams return ProvisionerParams from options opts setup +func GetProvisionerParams(opts ...ProvisionerOption) *ProvisionerParams { + params := newProvisionerParams() + err := optional.ApplyOptions(params, opts) + if err != nil { + panic(fmt.Errorf("unable to apply ProvisionerOption, err: %w", err)) + } + return params +} + +// ProvisionerOption is a provisioner option. +type ProvisionerOption func(*ProvisionerParams) error + +// WithName sets the name of the provisioner. +func WithName(name string) ProvisionerOption { + return func(params *ProvisionerParams) error { + params.name = name + return nil + } +} + +// WithInstanceOptions adds options to the EC2 VM. +func WithInstanceOptions(opts ...compute.VMOption) ProvisionerOption { + return func(params *ProvisionerParams) error { + params.instanceOptions = append(params.instanceOptions, opts...) + return nil + } +} + +// WithAgentOptions adds options to the Agent. +func WithAgentOptions(opts ...agentparams.Option) ProvisionerOption { + return func(params *ProvisionerParams) error { + params.agentOptions = append(params.agentOptions, opts...) + return nil + } +} + +// WithAgentClientOptions adds options to the Agent client. +func WithAgentClientOptions(opts ...agentclientparams.Option) ProvisionerOption { + return func(params *ProvisionerParams) error { + params.agentClientOptions = append(params.agentClientOptions, opts...) + return nil + } +} + +// WithFakeIntakeOptions adds options to the FakeIntake. +func WithFakeIntakeOptions(opts ...fakeintake.Option) ProvisionerOption { + return func(params *ProvisionerParams) error { + params.fakeintakeOptions = append(params.fakeintakeOptions, opts...) + return nil + } +} + +// WithExtraConfigParams adds extra config parameters to the ConfigMap. +func WithExtraConfigParams(configMap runner.ConfigMap) ProvisionerOption { + return func(params *ProvisionerParams) error { + params.extraConfigParams = configMap + return nil + } +} + +// WithoutFakeIntake disables the creation of the FakeIntake. +func WithoutFakeIntake() ProvisionerOption { + return func(params *ProvisionerParams) error { + params.fakeintakeOptions = nil + return nil + } +} + +// WithoutAgent disables the creation of the Agent. +func WithoutAgent() ProvisionerOption { + return func(params *ProvisionerParams) error { + params.agentOptions = nil + return nil + } +} + +// WithUpdater installs the agent through the updater. +func WithUpdater() ProvisionerOption { + return func(params *ProvisionerParams) error { + params.installUpdater = true + return nil + } +} + +// ProvisionerNoAgentNoFakeIntake wraps Provisioner with hardcoded WithoutAgent and WithoutFakeIntake options. +func ProvisionerNoAgentNoFakeIntake(opts ...ProvisionerOption) e2e.TypedProvisioner[environments.Host] { + mergedOpts := make([]ProvisionerOption, 0, len(opts)+2) + mergedOpts = append(mergedOpts, opts...) + mergedOpts = append(mergedOpts, WithoutAgent(), WithoutFakeIntake()) + + return Provisioner(mergedOpts...) +} + +// ProvisionerNoFakeIntake wraps Provisioner with hardcoded WithoutFakeIntake option. +func ProvisionerNoFakeIntake(opts ...ProvisionerOption) e2e.TypedProvisioner[environments.Host] { + mergedOpts := make([]ProvisionerOption, 0, len(opts)+1) + mergedOpts = append(mergedOpts, opts...) + mergedOpts = append(mergedOpts, WithoutFakeIntake()) + + return Provisioner(mergedOpts...) +} + +// RunParams is a set of parameters for the Run function. +type RunParams struct { + Environment *gcp.Environment + ProvisionerParams *ProvisionerParams +} diff --git a/test/new-e2e/pkg/runner/local_profile.go b/test/new-e2e/pkg/runner/local_profile.go index 633e0ccf0972c..2cba95a568cd2 100644 --- a/test/new-e2e/pkg/runner/local_profile.go +++ b/test/new-e2e/pkg/runner/local_profile.go @@ -19,6 +19,7 @@ import ( var defaultLocalEnvironments = map[string]string{ "aws": "agent-sandbox", "az": "agent-sandbox", + "gcp": "agent-sandbox", } // NewLocalProfile creates a new local profile From e7ddf3c7e5a679a6f6d744d23438316942108286 Mon Sep 17 00:00:00 2001 From: "agent-platform-auto-pr[bot]" <153269286+agent-platform-auto-pr[bot]@users.noreply.github.com> Date: Wed, 11 Sep 2024 08:21:23 +0000 Subject: [PATCH 121/128] [test-infra-definitions][automated] Bump test-infra-definitions to ce6a4aad9299f833164ee6acad8dbd9168d18705 (#29216) Co-authored-by: agent-platform-auto-pr[bot] <153269286+agent-platform-auto-pr[bot]@users.noreply.github.com> --- .gitlab/common/test_infra_version.yml | 2 +- test/new-e2e/go.mod | 2 +- test/new-e2e/go.sum | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.gitlab/common/test_infra_version.yml b/.gitlab/common/test_infra_version.yml index b956a16ab7c80..d095cd25f8513 100644 --- a/.gitlab/common/test_infra_version.yml +++ b/.gitlab/common/test_infra_version.yml @@ -4,4 +4,4 @@ variables: # and check the job creating the image to make sure you have the right SHA prefix TEST_INFRA_DEFINITIONS_BUILDIMAGES_SUFFIX: "" # Make sure to update test-infra-definitions version in go.mod as well - TEST_INFRA_DEFINITIONS_BUILDIMAGES: 7be84fb14a74 + TEST_INFRA_DEFINITIONS_BUILDIMAGES: ce6a4aad9299 diff --git a/test/new-e2e/go.mod b/test/new-e2e/go.mod index 529c6d10ec489..bbe8063d8489a 100644 --- a/test/new-e2e/go.mod +++ b/test/new-e2e/go.mod @@ -32,7 +32,7 @@ require ( // `TEST_INFRA_DEFINITIONS_BUILDIMAGES` matches the commit sha in the module version // Example: github.com/DataDog/test-infra-definitions v0.0.0-YYYYMMDDHHmmSS-0123456789AB // => TEST_INFRA_DEFINITIONS_BUILDIMAGES: 0123456789AB - github.com/DataDog/test-infra-definitions v0.0.0-20240910071149-7be84fb14a74 + github.com/DataDog/test-infra-definitions v0.0.0-20240910143843-ce6a4aad9299 github.com/aws/aws-sdk-go-v2 v1.30.5 github.com/aws/aws-sdk-go-v2/config v1.27.19 github.com/aws/aws-sdk-go-v2/service/ec2 v1.164.2 diff --git a/test/new-e2e/go.sum b/test/new-e2e/go.sum index 10fe8e4f663e7..e31485a46f4bb 100644 --- a/test/new-e2e/go.sum +++ b/test/new-e2e/go.sum @@ -14,8 +14,8 @@ github.com/DataDog/datadog-api-client-go/v2 v2.27.0 h1:AGZj41frjnjMufQHQbJH2fzmi github.com/DataDog/datadog-api-client-go/v2 v2.27.0/go.mod h1:QKOu6vscsh87fMY1lHfLEmNSunyXImj8BUaUWJXOehc= github.com/DataDog/mmh3 v0.0.0-20200805151601-30884ca2197a h1:m9REhmyaWD5YJ0P53ygRHxKKo+KM+nw+zz0hEdKztMo= github.com/DataDog/mmh3 v0.0.0-20200805151601-30884ca2197a/go.mod h1:SvsjzyJlSg0rKsqYgdcFxeEVflx3ZNAyFfkUHP0TxXg= -github.com/DataDog/test-infra-definitions v0.0.0-20240910071149-7be84fb14a74 h1:ebZDQrJIzcrVy/XpEzXSgw5ScJCtnRETbzo+3g3YsTc= -github.com/DataDog/test-infra-definitions v0.0.0-20240910071149-7be84fb14a74/go.mod h1:orHExiPWWT9f68UJZ92oIVX1OcTNlKvtbX7b6HM9e0Q= +github.com/DataDog/test-infra-definitions v0.0.0-20240910143843-ce6a4aad9299 h1:lMzRshj0zEnNId74hiUsXSClnB0qKmQlC3VQ9kC6p+0= +github.com/DataDog/test-infra-definitions v0.0.0-20240910143843-ce6a4aad9299/go.mod h1:orHExiPWWT9f68UJZ92oIVX1OcTNlKvtbX7b6HM9e0Q= github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f h1:5Vuo4niPKFkfwW55jV4vY0ih3VQ9RaQqeqY67fvRn8A= From 431d20e9deedf005c008a93bf6753cedd0286204 Mon Sep 17 00:00:00 2001 From: Nicolas Schweitzer Date: Wed, 11 Sep 2024 12:15:46 +0200 Subject: [PATCH 122/128] feat(github): Force default permissions of GITHUB_TOKEN (#28832) --- .github/workflows/add_milestone.yml | 4 ++++ .github/workflows/backport-pr.yml | 5 +++++ .github/workflows/buildimages-update.yml | 2 ++ .github/workflows/chase_release_managers.yml | 1 + .github/workflows/codeql-analysis.yml | 2 ++ .github/workflows/create_rc_pr.yml | 5 +++++ .github/workflows/create_release_schedule.yml | 2 ++ .github/workflows/cws-btfhub-sync.yml | 5 +++++ .github/workflows/datadog-static-analysis.yml | 2 ++ .github/workflows/do-not-merge.yml | 2 ++ .github/workflows/docs-dev.yml | 4 ++++ .github/workflows/external-contributor.yml | 4 ++++ .github/workflows/go-update-commenter.yml | 2 ++ .github/workflows/gohai.yml | 2 ++ .github/workflows/label-analysis.yml | 4 ++++ .github/workflows/labeler.yml | 2 ++ .github/workflows/markdown-lint-check.yml | 2 ++ .github/workflows/serverless-benchmarks.yml | 2 ++ .github/workflows/serverless-binary-size.yml | 2 ++ .github/workflows/serverless-integration.yml | 2 ++ .github/workflows/slapr.yml | 2 ++ 21 files changed, 58 insertions(+) diff --git a/.github/workflows/add_milestone.yml b/.github/workflows/add_milestone.yml index ef43c0869e896..cc647378a5460 100644 --- a/.github/workflows/add_milestone.yml +++ b/.github/workflows/add_milestone.yml @@ -8,11 +8,15 @@ on: - main - "[0-9]+.[0-9]+.x" +permissions: {} + jobs: add-milestone-pr: name: Add Milestone on PR if: github.event.pull_request.merged == true runs-on: ubuntu-latest + permissions: + pull-requests: write env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GH_REPO: ${{ github.repository }} diff --git a/.github/workflows/backport-pr.yml b/.github/workflows/backport-pr.yml index 188cd54aadb8d..3ee7eeeb276b4 100644 --- a/.github/workflows/backport-pr.yml +++ b/.github/workflows/backport-pr.yml @@ -5,6 +5,8 @@ on: - closed - labeled +permissions: {} + jobs: backport: name: Backport PR @@ -18,6 +20,9 @@ jobs: && contains(github.event.label.name, 'backport') ) ) + permissions: + contents: write + pull-requests: write steps: - uses: actions/create-github-app-token@31c86eb3b33c9b601a1f60f98dcbfd1d70f379b4 # v1.10.3 id: app-token diff --git a/.github/workflows/buildimages-update.yml b/.github/workflows/buildimages-update.yml index 9a04aceed38e4..523018890c0f6 100644 --- a/.github/workflows/buildimages-update.yml +++ b/.github/workflows/buildimages-update.yml @@ -24,6 +24,8 @@ on: required: true type: boolean +permissions: {} + jobs: open-go-update-pr: runs-on: ubuntu-latest diff --git a/.github/workflows/chase_release_managers.yml b/.github/workflows/chase_release_managers.yml index 652746f1e93a9..bcf922f93d575 100644 --- a/.github/workflows/chase_release_managers.yml +++ b/.github/workflows/chase_release_managers.yml @@ -8,6 +8,7 @@ on: required: true type: string +permissions: {} jobs: create_release_schedule: diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 4613f73359f16..f828f7b66779d 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -10,6 +10,8 @@ on: - main - "[0-9]+.[0-9]+.x" +permissions: {} + jobs: CodeQL-Build: runs-on: ubuntu-20.04 diff --git a/.github/workflows/create_rc_pr.yml b/.github/workflows/create_rc_pr.yml index 0d190cb7bb606..bfed4df0f66b4 100644 --- a/.github/workflows/create_rc_pr.yml +++ b/.github/workflows/create_rc_pr.yml @@ -9,6 +9,8 @@ on: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} +permissions: {} + jobs: find_release_branches: runs-on: ubuntu-latest @@ -48,6 +50,9 @@ jobs: create_rc_pr: runs-on: ubuntu-latest needs: find_release_branches + permissions: + contents: write + pull-requests: write strategy: matrix: value: ${{fromJSON(needs.find_release_branches.outputs.branches)}} diff --git a/.github/workflows/create_release_schedule.yml b/.github/workflows/create_release_schedule.yml index e19372fa8f1d7..4fc749d9b280c 100644 --- a/.github/workflows/create_release_schedule.yml +++ b/.github/workflows/create_release_schedule.yml @@ -12,6 +12,8 @@ on: required: true type: string +permissions: {} + jobs: create_release_schedule: diff --git a/.github/workflows/cws-btfhub-sync.yml b/.github/workflows/cws-btfhub-sync.yml index 970a0fef308f8..2e3152fb10763 100644 --- a/.github/workflows/cws-btfhub-sync.yml +++ b/.github/workflows/cws-btfhub-sync.yml @@ -16,6 +16,8 @@ on: schedule: - cron: '30 4 * * 5' # at 4:30 UTC on Friday +permissions: {} + jobs: generate: runs-on: ubuntu-latest @@ -91,6 +93,9 @@ jobs: combine: needs: generate runs-on: ubuntu-latest + permissions: + contents: write + pull-requests: write steps: - name: Checkout datadog-agent repository uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 diff --git a/.github/workflows/datadog-static-analysis.yml b/.github/workflows/datadog-static-analysis.yml index c738e875c2991..e7030c87f71a9 100644 --- a/.github/workflows/datadog-static-analysis.yml +++ b/.github/workflows/datadog-static-analysis.yml @@ -2,6 +2,8 @@ on: [push] name: Datadog Static Analysis +permissions: {} + jobs: static-analysis: if: github.triggering_actor != 'dependabot[bot]' diff --git a/.github/workflows/do-not-merge.yml b/.github/workflows/do-not-merge.yml index 13886c696f679..a21f9e03d1fb2 100644 --- a/.github/workflows/do-not-merge.yml +++ b/.github/workflows/do-not-merge.yml @@ -10,6 +10,8 @@ on: branches: - mq-working-branch-* +permissions: {} + jobs: do-not-merge: if: ${{ contains(github.event.*.labels.*.name, 'do-not-merge/hold') || contains(github.event.*.labels.*.name, 'do-not-merge/WIP') }} diff --git a/.github/workflows/docs-dev.yml b/.github/workflows/docs-dev.yml index 7dba335ed58b7..4ce377865f81b 100644 --- a/.github/workflows/docs-dev.yml +++ b/.github/workflows/docs-dev.yml @@ -14,6 +14,8 @@ on: - docs/** - .github/workflows/docs-dev.yml +permissions: {} + concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: ${{ github.event_name == 'pull_request' && true || false }} @@ -54,6 +56,8 @@ jobs: publish: runs-on: ubuntu-latest + permissions: + contents: write if: github.event_name == 'push' && github.ref == 'refs/heads/main' needs: diff --git a/.github/workflows/external-contributor.yml b/.github/workflows/external-contributor.yml index 3cc35d4cc12e2..d4092f41492ae 100644 --- a/.github/workflows/external-contributor.yml +++ b/.github/workflows/external-contributor.yml @@ -6,10 +6,14 @@ on: pull_request_target: types: [opened, reopened] +permissions: {} + jobs: external-contributor-prs: name: Handle Fork PRs runs-on: ubuntu-latest + permissions: + pull-requests: write if: github.event.pull_request.head.repo.full_name != github.repository steps: - name: Checkout repository diff --git a/.github/workflows/go-update-commenter.yml b/.github/workflows/go-update-commenter.yml index 9925fba1614ca..ff1d104c691ec 100644 --- a/.github/workflows/go-update-commenter.yml +++ b/.github/workflows/go-update-commenter.yml @@ -5,6 +5,8 @@ on: # Only run on PR label events (in particular not on every commit) types: [ labeled ] +permissions: {} + jobs: old-versions-match: # Only run if the PR is labeled with 'go-update' diff --git a/.github/workflows/gohai.yml b/.github/workflows/gohai.yml index bb20f0e0104df..a328f67c5b853 100644 --- a/.github/workflows/gohai.yml +++ b/.github/workflows/gohai.yml @@ -12,6 +12,8 @@ on: paths: - "pkg/gohai/**" +permissions: {} + jobs: gohai_test: strategy: diff --git a/.github/workflows/label-analysis.yml b/.github/workflows/label-analysis.yml index 08980653d1d83..1f0601757941f 100644 --- a/.github/workflows/label-analysis.yml +++ b/.github/workflows/label-analysis.yml @@ -13,10 +13,14 @@ env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GH_REPO: ${{ github.repository }} +permissions: {} + jobs: assign-team-label: if: github.triggering_actor != 'dd-devflow[bot]' runs-on: ubuntu-latest + permissions: + pull-requests: write steps: - name: Checkout repository uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml index 5cade58e6495c..b92075b895975 100644 --- a/.github/workflows/labeler.yml +++ b/.github/workflows/labeler.yml @@ -6,6 +6,8 @@ on: - main - "[0-9]+.[0-9]+.x" +permissions: {} + jobs: label: permissions: diff --git a/.github/workflows/markdown-lint-check.yml b/.github/workflows/markdown-lint-check.yml index 94386e05e6621..6ade9a5ec8560 100644 --- a/.github/workflows/markdown-lint-check.yml +++ b/.github/workflows/markdown-lint-check.yml @@ -3,6 +3,8 @@ name: Check Markdown links on: pull_request: +permissions: {} + jobs: markdown-link-check: runs-on: ubuntu-latest diff --git a/.github/workflows/serverless-benchmarks.yml b/.github/workflows/serverless-benchmarks.yml index 37742948b0d36..40ac0953e42a4 100644 --- a/.github/workflows/serverless-benchmarks.yml +++ b/.github/workflows/serverless-benchmarks.yml @@ -14,6 +14,8 @@ concurrency: group: ${{ github.workflow }}/PR#${{ github.event.pull_request.number }} cancel-in-progress: true +permissions: {} + jobs: baseline: name: Baseline diff --git a/.github/workflows/serverless-binary-size.yml b/.github/workflows/serverless-binary-size.yml index e396e8a78751d..c8880d3306d01 100644 --- a/.github/workflows/serverless-binary-size.yml +++ b/.github/workflows/serverless-binary-size.yml @@ -6,6 +6,8 @@ on: env: SIZE_ALLOWANCE: fromJSON(1000000) # 1 MB +permissions: {} + jobs: comment: runs-on: ubuntu-latest diff --git a/.github/workflows/serverless-integration.yml b/.github/workflows/serverless-integration.yml index 8bd8459b6c52f..c2866e77f69b4 100644 --- a/.github/workflows/serverless-integration.yml +++ b/.github/workflows/serverless-integration.yml @@ -12,6 +12,8 @@ on: schedule: - cron: '0 14 * * *' # cron schedule uses UTC timezone. Run tests at the beginning of the day in US-East +permissions: {} + jobs: test: runs-on: ubuntu-latest diff --git a/.github/workflows/slapr.yml b/.github/workflows/slapr.yml index 48be5e393fd38..e88d67945b5a2 100644 --- a/.github/workflows/slapr.yml +++ b/.github/workflows/slapr.yml @@ -7,6 +7,8 @@ # pull_request: # types: [closed] # +# permissions: {} +# # jobs: # run_slapr_datadog_agent: # runs-on: ubuntu-latest From 93ffba075522f0a0f597e29d7865c2f75e7d0ccd Mon Sep 17 00:00:00 2001 From: Florent Clarret Date: Wed, 11 Sep 2024 10:15:59 +0000 Subject: [PATCH 123/128] Temporarily mark the new-e2e-installer tests as allowed to fail (#29232) --- .gitlab/e2e/e2e.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitlab/e2e/e2e.yml b/.gitlab/e2e/e2e.yml index cdb56ea050658..8cb38a1e6f869 100644 --- a/.gitlab/e2e/e2e.yml +++ b/.gitlab/e2e/e2e.yml @@ -337,6 +337,7 @@ new-e2e-installer: TARGETS: ./tests/installer TEAM: fleet FLEET_INSTALL_METHOD: "install_script" + allow_failure: true # incident-30484 new-e2e-installer-ansible: extends: .new_e2e_template @@ -359,6 +360,7 @@ new-e2e-installer-ansible: TARGETS: ./tests/installer TEAM: fleet FLEET_INSTALL_METHOD: "ansible" + allow_failure: true # incident-30484 new-e2e-ndm-netflow: extends: .new_e2e_template From 4a122d243ee9d038a67555c67c9565a555f4e26d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?I=C3=B1igo=20L=C3=B3pez=20de=20Heredia?= Date: Wed, 11 Sep 2024 12:20:37 +0200 Subject: [PATCH 124/128] [APM] Do not crash on UDS listener failure (#29218) --- pkg/trace/api/api.go | 19 ++++++++++--------- pkg/trace/api/api_nix_test.go | 16 ++++++++++++++++ 2 files changed, 26 insertions(+), 9 deletions(-) diff --git a/pkg/trace/api/api.go b/pkg/trace/api/api.go index 816a14eef938d..7062efc03e307 100644 --- a/pkg/trace/api/api.go +++ b/pkg/trace/api/api.go @@ -279,17 +279,18 @@ func (r *HTTPReceiver) Start() { if _, err := os.Stat(filepath.Dir(path)); !os.IsNotExist(err) { ln, err := r.listenUnix(path) if err != nil { + log.Errorf("Error creating UDS listener: %v", err) r.telemetryCollector.SendStartupError(telemetry.CantStartUdsServer, err) - killProcess("Error creating UDS listener: %v", err) + } else { + go func() { + defer watchdog.LogOnPanic(r.statsd) + if err := r.server.Serve(ln); err != nil && err != http.ErrServerClosed { + log.Errorf("Could not start UDS server: %v. UDS receiver disabled.", err) + r.telemetryCollector.SendStartupError(telemetry.CantStartUdsServer, err) + } + }() + log.Infof("Listening for traces at unix://%s", path) } - go func() { - defer watchdog.LogOnPanic(r.statsd) - if err := r.server.Serve(ln); err != nil && err != http.ErrServerClosed { - log.Errorf("Could not start UDS server: %v. UDS receiver disabled.", err) - r.telemetryCollector.SendStartupError(telemetry.CantStartUdsServer, err) - } - }() - log.Infof("Listening for traces at unix://%s", path) } else { log.Errorf("Could not start UDS listener: socket directory does not exist: %s", path) } diff --git a/pkg/trace/api/api_nix_test.go b/pkg/trace/api/api_nix_test.go index 8c871ad875f9d..fbefae776493e 100644 --- a/pkg/trace/api/api_nix_test.go +++ b/pkg/trace/api/api_nix_test.go @@ -13,6 +13,7 @@ import ( "fmt" "net" "net/http" + "os" "path/filepath" "testing" "time" @@ -83,6 +84,21 @@ func TestUDS(t *testing.T) { t.Fatalf("expected http.StatusOK, got response: %#v", resp) } }) + + t.Run("uds_permission_err", func(t *testing.T) { + dir := t.TempDir() + err := os.Chmod(dir, 0444) // read-only + assert.NoError(t, err) + + conf := config.New() + conf.Endpoints[0].APIKey = "apikey_2" + conf.ReceiverSocket = filepath.Join(dir, "apm.socket") + + r := newTestReceiverFromConfig(conf) + // should not crash + r.Start() + r.Stop() + }) } func TestHTTPReceiverStart(t *testing.T) { From 3fa4f01592c5444c303cd2a11bc4106d6f5a995e Mon Sep 17 00:00:00 2001 From: Paul Cacheux Date: Wed, 11 Sep 2024 15:16:58 +0200 Subject: [PATCH 125/128] [CWS] do not route and parse DNS/IMDS packets if the event is not enabled (#29240) --- .../ebpf/c/include/hooks/network/router.h | 12 ++++++++---- pkg/security/probe/probe_ebpf.go | 17 +++++++++++------ 2 files changed, 19 insertions(+), 10 deletions(-) diff --git a/pkg/security/ebpf/c/include/hooks/network/router.h b/pkg/security/ebpf/c/include/hooks/network/router.h index 11f42d748c195..81978692e5113 100644 --- a/pkg/security/ebpf/c/include/hooks/network/router.h +++ b/pkg/security/ebpf/c/include/hooks/network/router.h @@ -44,13 +44,17 @@ __attribute__((always_inline)) int route_pkt(struct __sk_buff *skb, struct packe // TODO: l3 / l4 firewall // route DNS requests - if (pkt->l4_protocol == IPPROTO_UDP && pkt->translated_ns_flow.flow.dport == htons(53)) { - tail_call_to_classifier(skb, DNS_REQUEST); + if (is_event_enabled(EVENT_DNS)) { + if (pkt->l4_protocol == IPPROTO_UDP && pkt->translated_ns_flow.flow.dport == htons(53)) { + tail_call_to_classifier(skb, DNS_REQUEST); + } } // route IMDS requests - if (pkt->l4_protocol == IPPROTO_TCP && ((pkt->ns_flow.flow.saddr[0] & 0xFFFFFFFF) == get_imds_ip() || (pkt->ns_flow.flow.daddr[0] & 0xFFFFFFFF) == get_imds_ip())) { - tail_call_to_classifier(skb, IMDS_REQUEST); + if (is_event_enabled(EVENT_IMDS)) { + if (pkt->l4_protocol == IPPROTO_TCP && ((pkt->ns_flow.flow.saddr[0] & 0xFFFFFFFF) == get_imds_ip() || (pkt->ns_flow.flow.daddr[0] & 0xFFFFFFFF) == get_imds_ip())) { + tail_call_to_classifier(skb, IMDS_REQUEST); + } } return ACT_OK; diff --git a/pkg/security/probe/probe_ebpf.go b/pkg/security/probe/probe_ebpf.go index ae9a8c70350cc..ca88215596b59 100644 --- a/pkg/security/probe/probe_ebpf.go +++ b/pkg/security/probe/probe_ebpf.go @@ -1208,17 +1208,17 @@ func (p *EBPFProbe) validEventTypeForConfig(eventType string) bool { // of the applied approvers for it. func (p *EBPFProbe) updateProbes(ruleEventTypes []eval.EventType, needRawSyscalls bool) error { // event types enabled either by event handlers or by rules - eventTypes := append([]eval.EventType{}, defaultEventTypes...) - eventTypes = append(eventTypes, ruleEventTypes...) + requestedEventTypes := append([]eval.EventType{}, defaultEventTypes...) + requestedEventTypes = append(requestedEventTypes, ruleEventTypes...) for eventType, handlers := range p.probe.eventHandlers { if len(handlers) == 0 { continue } - if slices.Contains(eventTypes, model.EventType(eventType).String()) { + if slices.Contains(requestedEventTypes, model.EventType(eventType).String()) { continue } if eventType != int(model.UnknownEventType) && eventType != int(model.MaxAllEventType) { - eventTypes = append(eventTypes, model.EventType(eventType).String()) + requestedEventTypes = append(requestedEventTypes, model.EventType(eventType).String()) } } @@ -1226,8 +1226,13 @@ func (p *EBPFProbe) updateProbes(ruleEventTypes []eval.EventType, needRawSyscall // extract probe to activate per the event types for eventType, selectors := range probes.GetSelectorsPerEventType(p.useFentry) { - if (eventType == "*" || slices.Contains(eventTypes, eventType) || p.isNeededForActivityDump(eventType) || p.isNeededForSecurityProfile(eventType) || p.config.Probe.EnableAllProbes) && p.validEventTypeForConfig(eventType) { + if (eventType == "*" || slices.Contains(requestedEventTypes, eventType) || p.isNeededForActivityDump(eventType) || p.isNeededForSecurityProfile(eventType) || p.config.Probe.EnableAllProbes) && p.validEventTypeForConfig(eventType) { activatedProbes = append(activatedProbes, selectors...) + + // to ensure the `enabled_events` map is correctly set with events that are enabled because of ADs + if !slices.Contains(requestedEventTypes, eventType) { + requestedEventTypes = append(requestedEventTypes, eventType) + } } } @@ -1292,7 +1297,7 @@ func (p *EBPFProbe) updateProbes(ruleEventTypes []eval.EventType, needRawSyscall } enabledEvents := uint64(0) - for _, eventName := range eventTypes { + for _, eventName := range requestedEventTypes { if eventName != "*" { eventType := config.ParseEvalEventType(eventName) if eventType == model.UnknownEventType { From 27a68e2240b6d75977d7728075899b80470fcf6f Mon Sep 17 00:00:00 2001 From: Stephen Wakely Date: Wed, 11 Sep 2024 14:20:32 +0100 Subject: [PATCH 126/128] Update the `zstd` for metrics default compression level to 1 (#29242) Signed-off-by: Stephen Wakely --- pkg/config/setup/config.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pkg/config/setup/config.go b/pkg/config/setup/config.go index 4cf537291aae8..9ed374b26e64b 100644 --- a/pkg/config/setup/config.go +++ b/pkg/config/setup/config.go @@ -78,8 +78,10 @@ const ( // DefaultCompressorKind is the default compressor. Options available are 'zlib' and 'zstd' DefaultCompressorKind = "zlib" - // DefaultZstdCompressionLevel should mirror the default compression level defined in https://github.com/DataDog/zstd/blob/1.x/zstd.go#L23 - DefaultZstdCompressionLevel = 5 + // DefaultZstdCompressionLevel is the default compression level for `zstd`. + // Compression level 1 provides the lowest compression ratio, but uses much less RSS especially + // in situations where we have a high value for `GOMAXPROCS`. + DefaultZstdCompressionLevel = 1 // DefaultLogsSenderBackoffFactor is the default logs sender backoff randomness factor DefaultLogsSenderBackoffFactor = 2.0 From 7fedd32b5a302b85e91a1d135cf8a7cbc46affd8 Mon Sep 17 00:00:00 2001 From: Adel Haj Hassan <41540817+adel121@users.noreply.github.com> Date: Wed, 11 Sep 2024 15:21:09 +0200 Subject: [PATCH 127/128] update codeowners file (#29244) --- .github/CODEOWNERS | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index c05ff3a97e27b..fbdd00e85974f 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -426,6 +426,7 @@ /pkg/util/orchestrator/ @DataDog/container-app /pkg/util/podman/ @DataDog/container-integrations /pkg/util/prometheus @DataDog/container-integrations +/pkg/util/tagger @DataDog/container-platform /pkg/util/trivy/ @DataDog/container-integrations @DataDog/agent-security /pkg/util/uuid/ @DataDog/agent-shared-components /pkg/util/cgroups/ @DataDog/container-integrations From f704138ac6e9a10c59168f0f0a21b482c838aa83 Mon Sep 17 00:00:00 2001 From: Nicolas Schweitzer Date: Wed, 11 Sep 2024 15:41:41 +0200 Subject: [PATCH 128/128] fix(actions): Set correct permissions for codeql-action workflow (#29250) --- .github/workflows/codeql-analysis.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index f828f7b66779d..748cd3e5aaeba 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -15,6 +15,8 @@ permissions: {} jobs: CodeQL-Build: runs-on: ubuntu-20.04 + permissions: + security-events: write strategy: matrix: language: ["go", "javascript", "python", "cpp"]