diff --git a/README.md b/README.md index 1573ce4..34a8643 100644 --- a/README.md +++ b/README.md @@ -223,4 +223,13 @@ standard output this command comes into help. ### `clean`: will remove all the krkn containers from the container runtime, will delete all the kubeconfig files -and logfiles created by the tool in the current folder. \ No newline at end of file +and logfiles created by the tool in the current folder. + +### `query-status [--graph ]`: + +Will query the container platform to return container informations of a container name or Id if the `--graph` flag is not +set, else will query the status of all the container names contained in the graph file. +If a single container Id or name is queried the tool will exit with the same exit status of the container. + +>[!TIP] +> This function can be integrated into CI/CD pipelines to halt execution if the chaos run encounters any failure. diff --git a/cmd/list.go b/cmd/list.go index da20f7a..b6c5c9c 100644 --- a/cmd/list.go +++ b/cmd/list.go @@ -36,7 +36,7 @@ func NewListScenariosCommand(factory *providerfactory.ProviderFactory, config co provider := GetProvider(false, factory) s := NewSpinnerWithSuffix("fetching scenarios...") s.Start() - scenarios, err := provider.GetScenarios(dataSource) + scenarios, err := provider.GetRegistryImages(dataSource) if err != nil { s.Stop() log.Fatalf("failed to fetch scenarios: %v", err) diff --git a/cmd/query_status.go b/cmd/query_status.go new file mode 100644 index 0000000..83b7d6a --- /dev/null +++ b/cmd/query_status.go @@ -0,0 +1,128 @@ +package cmd + +import ( + "context" + "encoding/json" + "fmt" + "github.com/krkn-chaos/krknctl/internal/config" + provider_models "github.com/krkn-chaos/krknctl/pkg/provider/models" + "github.com/krkn-chaos/krknctl/pkg/scenario_orchestrator" + "github.com/krkn-chaos/krknctl/pkg/scenario_orchestrator/models" + "github.com/spf13/cobra" + "os" +) + +func resolveContainerIdOrName(orchestrator scenario_orchestrator.ScenarioOrchestrator, arg string, conn context.Context, conf config.Config) error { + var scenarioContainer *models.ScenarioContainer + var containerId *string + + containerId, err := orchestrator.ResolveContainerName(arg, conn) + if err != nil { + return err + } + if containerId == nil { + containerId = &arg + } + + scenarioContainer, err = orchestrator.InspectScenario(models.Container{Id: *containerId}, conn) + + if err != nil { + return err + } + + if scenarioContainer == nil { + return fmt.Errorf("scenarioContainer with id or name %s not found", arg) + } + + containerJson, err := json.Marshal(scenarioContainer.Container) + if err != nil { + return err + } + fmt.Println(string(containerJson)) + if scenarioContainer.Container.ExitStatus != 0 { + return fmt.Errorf("%s %d", conf.ContainerExitStatusPrefix, scenarioContainer.Container.ExitStatus) + } + return nil +} + +func resolveGraphFile(orchestrator scenario_orchestrator.ScenarioOrchestrator, filename string, conn context.Context, conf config.Config) error { + var scenarioFile = make(map[string]provider_models.ScenarioDetail) + var containers = make([]models.Container, 0) + + fileData, err := os.ReadFile(filename) + if err != nil { + return err + } + err = json.Unmarshal(fileData, &scenarioFile) + if err != nil { + return err + } + for key, _ := range scenarioFile { + scenario, err := orchestrator.ResolveContainerName(key, conn) + if err != nil { + return err + } + if scenario != nil { + containerScenario, err := orchestrator.InspectScenario(models.Container{Id: *scenario}, conn) + if err != nil { + return err + } + if containerScenario != nil { + if (*containerScenario).Container != nil { + containers = append(containers, *(*containerScenario).Container) + } + } + } + } + containersJson, err := json.Marshal(containers) + if err != nil { + return err + } + fmt.Println(string(containersJson)) + return nil +} + +func NewQueryStatusCommand(scenarioOrchestrator *scenario_orchestrator.ScenarioOrchestrator, config config.Config) *cobra.Command { + var command = &cobra.Command{ + Use: "query-status", + Short: "checks the status of a container or a list of containers", + Long: `checks the status of a container or a list of containers by container name or container Id`, + Args: cobra.MaximumNArgs(1), + SilenceUsage: true, + RunE: func(cmd *cobra.Command, args []string) error { + socket, err := (*scenarioOrchestrator).GetContainerRuntimeSocket(nil) + if err != nil { + return err + } + conn, err := (*scenarioOrchestrator).Connect(*socket) + if err != nil { + return err + } + if len(args) > 0 { + err = resolveContainerIdOrName(*scenarioOrchestrator, args[0], conn, config) + return err + } + + graphPath, err := cmd.Flags().GetString("graph") + if err != nil { + return err + } + + if graphPath == "" { + return fmt.Errorf("neither container Id or name nor graph plan file specified") + } + + if CheckFileExists(graphPath) == false { + return fmt.Errorf("graph file %s not found", graphPath) + } + + err = resolveGraphFile(*scenarioOrchestrator, graphPath, conn, config) + + if err != nil { + return err + } + return nil + }, + } + return command +} diff --git a/cmd/root.go b/cmd/root.go index 60885fb..84350b0 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -7,6 +7,8 @@ import ( "github.com/krkn-chaos/krknctl/pkg/scenario_orchestrator" "github.com/spf13/cobra" "os" + "strconv" + "strings" ) func Execute(providerFactory *factory.ProviderFactory, scenarioOrchestrator *scenario_orchestrator.ScenarioOrchestrator, config config.Config) { @@ -73,9 +75,23 @@ func Execute(providerFactory *factory.ProviderFactory, scenarioOrchestrator *sce attachCmd := NewAttachCmd(scenarioOrchestrator) rootCmd.AddCommand(attachCmd) + queryCmd := NewQueryStatusCommand(scenarioOrchestrator, config) + queryCmd.Flags().String("graph", "", "to query the exit status of a previously run graph file") + rootCmd.AddCommand(queryCmd) if err := rootCmd.Execute(); err != nil { - fmt.Println(err) + // intercept the propagated exit status from the container and exits with the same code + if strings.Contains(err.Error(), config.ContainerExitStatusPrefix) { + exitCodeStr := strings.Split(err.Error(), " ") + if len(exitCodeStr) == 2 { + exitStatus, err := strconv.ParseInt(exitCodeStr[1], 10, 32) + if err != nil { + fmt.Println(fmt.Sprintf("Error converting exit code to int: %s", err)) + os.Exit(1) + } + os.Exit(int(exitStatus)) + } + } os.Exit(1) } } diff --git a/cmd/tables.go b/cmd/tables.go index 3a2f633..f8c2d7e 100644 --- a/cmd/tables.go +++ b/cmd/tables.go @@ -55,7 +55,7 @@ func NewGraphTable(graph [][]string) table.Table { return tbl } -func NewRunningScenariosTable(runningScenarios []orchestratormodels.RunningScenario) table.Table { +func NewRunningScenariosTable(runningScenarios []orchestratormodels.ScenarioContainer) table.Table { tbl := table.New("Scenario ID", "Scenario Name", "Running Since", "Container Name") tbl.WithHeaderFormatter(headerFmt).WithFirstColumnFormatter(columnFmt) for i, v := range runningScenarios { diff --git a/cmd/utils.go b/cmd/utils.go index 0064de5..5e7828a 100644 --- a/cmd/utils.go +++ b/cmd/utils.go @@ -40,7 +40,7 @@ func GetProvider(offline bool, providerFactory *factory.ProviderFactory) provide } func FetchScenarios(provider provider.ScenarioDataProvider, dataSource string) (*[]string, error) { - scenarios, err := provider.GetScenarios(dataSource) + scenarios, err := provider.GetRegistryImages(dataSource) if err != nil { return nil, err } diff --git a/internal/config/config.go b/internal/config/config.go index 9442d58..74d3f21 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -16,6 +16,7 @@ type Config struct { KubeconfigPrefix string `json:"kubeconfig_prefix"` PodmanDarwinSocketTemplate string `json:"podman_darwin_socket_template"` PodmanLinuxSocketTemplate string `json:"podman_linux_socket_template"` + ContainerExitStatusPrefix string `json:"container_exit_status_prefix"` PodmanSocketRoot string `json:"podman_socket_root_linux"` PodmanRunningState string `json:"podman_running_state"` DockerSocketRoot string `json:"docker_socket_root"` diff --git a/internal/config/config.json b/internal/config/config.json index 5cbca4b..fa58d93 100644 --- a/internal/config/config.json +++ b/internal/config/config.json @@ -9,6 +9,7 @@ "krknctl_logs": "krknct-log", "podman_darwin_socket_template": "unix://%s/.local/share/containers/podman/machine/podman.sock", "podman_linux_socket_template": "unix://run/user/%d/podman/podman.sock", + "container_exit_status_prefix": "!#KRKN_EXIT_STATUS", "podman_socket_root_linux": "unix://run/podman/podman.sock", "podman_running_state": "running", "docker_socket_root": "unix:///var/run/docker.sock", diff --git a/main.go b/main.go index 036448a..fdaad08 100644 --- a/main.go +++ b/main.go @@ -24,7 +24,11 @@ func main() { os.Exit(1) } - scenarioOrchestrator := scenarioOrchestratorFactory.NewInstance(*detectedRuntime, &config) + scenarioOrchestrator := scenarioOrchestratorFactory.NewInstance(*detectedRuntime) + if scenarioOrchestrator == nil { + fmt.Printf("%s\n", color.New(color.FgHiRed).Sprint("failed to build scenario orchestrator instance")) + os.Exit(1) + } providerFactory := providerfactory.NewProviderFactory(&config) cmd.Execute(providerFactory, &scenarioOrchestrator, config) diff --git a/pkg/provider/factory/provide_factory_test.go b/pkg/provider/factory/provide_factory_test.go new file mode 100644 index 0000000..469b083 --- /dev/null +++ b/pkg/provider/factory/provide_factory_test.go @@ -0,0 +1,30 @@ +package factory + +import ( + "github.com/krkn-chaos/krknctl/internal/config" + "github.com/krkn-chaos/krknctl/pkg/provider" + "github.com/krkn-chaos/krknctl/pkg/provider/offline" + "github.com/krkn-chaos/krknctl/pkg/provider/quay" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestProviderFactory_NewInstance(t *testing.T) { + typeScenarioQuay := &quay.ScenarioProvider{} + typeScenarioOffline := &offline.ScenarioProvider{} + conf, err := config.LoadConfig() + assert.Nil(t, err) + assert.NotNil(t, conf) + + factory := NewProviderFactory(&conf) + assert.NotNil(t, factory) + + factoryQuay := factory.NewInstance(provider.Online) + assert.NotNil(t, factoryQuay) + assert.IsType(t, factoryQuay, typeScenarioQuay) + + factoryOffline := factory.NewInstance(provider.Offline) + assert.NotNil(t, factoryOffline) + assert.IsType(t, factoryOffline, typeScenarioOffline) + +} diff --git a/pkg/provider/models/models.go b/pkg/provider/models/models.go index 56cb77e..5a53866 100644 --- a/pkg/provider/models/models.go +++ b/pkg/provider/models/models.go @@ -14,9 +14,9 @@ type ScenarioTag struct { type ScenarioDetail struct { ScenarioTag - Title string `json:"title"` - Description string `json:"description"` - Fields []typing.InputField + Title string `json:"title"` + Description string `json:"description"` + Fields []typing.InputField `json:"fields"` } func (s *ScenarioDetail) GetFieldByName(name string) *typing.InputField { diff --git a/pkg/provider/models/models_test.go b/pkg/provider/models/models_test.go new file mode 100644 index 0000000..19711e6 --- /dev/null +++ b/pkg/provider/models/models_test.go @@ -0,0 +1,106 @@ +package models + +import ( + "encoding/json" + "github.com/krkn-chaos/krknctl/pkg/typing" + "github.com/stretchr/testify/assert" + "testing" +) + +func getScenarioDetail(t *testing.T) ScenarioDetail { + data := ` +{ + "title":"title", + "description":"description", + "fields":[ + { + "name":"testfield_1", + "type":"string", + "description":"test field 1", + "variable":"TESTFIELD_1" + }, + { + "name":"testfield_2", + "type":"number", + "description":"test field 2", + "variable":"TESTFIELD_2" + }, + { + "name":"testfield_3", + "description":"test field 3", + "type":"boolean", + "variable":"TESTFIELD_3" + }, + { + "name":"testfield_4", + "description":"test field 4", + "type":"file", + "variable":"TESTFIELD_4", + "mount_path":"/mnt/test" + } + ] +} +` + scenarioDetail := ScenarioDetail{} + err := json.Unmarshal([]byte(data), &scenarioDetail) + assert.Nil(t, err) + return scenarioDetail +} + +func TestScenarioDetail_GetFieldByEnvVar(t *testing.T) { + scenarioDetail := getScenarioDetail(t) + field1 := scenarioDetail.GetFieldByEnvVar("TESTFIELD_1") + assert.NotNil(t, field1) + assert.Equal(t, (*field1).Type, typing.String) + assert.Equal(t, *((*field1).Description), "test field 1") + assert.Equal(t, *((*field1).Variable), "TESTFIELD_1") + field2 := scenarioDetail.GetFieldByEnvVar("TESTFIELD_2") + assert.NotNil(t, field2) + assert.Equal(t, (*field2).Type, typing.Number) + assert.Equal(t, *((*field2).Description), "test field 2") + assert.Equal(t, *((*field2).Variable), "TESTFIELD_2") + field3 := scenarioDetail.GetFieldByEnvVar("TESTFIELD_3") + assert.NotNil(t, field3) + assert.Equal(t, (*field3).Type, typing.Boolean) + assert.Equal(t, *((*field3).Description), "test field 3") + assert.Equal(t, *((*field3).Variable), "TESTFIELD_3") + + nofield := scenarioDetail.GetFieldByName("nofield") + assert.Nil(t, nofield) + +} + +func TestScenarioDetail_GetFieldByName(t *testing.T) { + scenarioDetail := getScenarioDetail(t) + field1 := scenarioDetail.GetFieldByName("testfield_1") + assert.NotNil(t, field1) + assert.Equal(t, (*field1).Type, typing.String) + assert.Equal(t, *((*field1).Description), "test field 1") + assert.Equal(t, *((*field1).Variable), "TESTFIELD_1") + field2 := scenarioDetail.GetFieldByName("testfield_2") + assert.NotNil(t, field2) + assert.Equal(t, (*field2).Type, typing.Number) + assert.Equal(t, *((*field2).Description), "test field 2") + assert.Equal(t, *((*field2).Variable), "TESTFIELD_2") + field3 := scenarioDetail.GetFieldByName("testfield_3") + assert.NotNil(t, field3) + assert.Equal(t, (*field3).Type, typing.Boolean) + assert.Equal(t, *((*field3).Description), "test field 3") + assert.Equal(t, *((*field3).Variable), "TESTFIELD_3") + + nofield := scenarioDetail.GetFieldByName("nofield") + assert.Nil(t, nofield) + +} + +func TestScenarioDetail_GetFileFieldByMountPath(t *testing.T) { + scenarioDetail := getScenarioDetail(t) + field4 := scenarioDetail.GetFileFieldByMountPath("/mnt/test") + assert.NotNil(t, field4) + assert.Equal(t, (*field4).Type, typing.File) + assert.Equal(t, *((*field4).Description), "test field 4") + assert.Equal(t, *((*field4).Variable), "TESTFIELD_4") + + nofield := scenarioDetail.GetFieldByName("/mnt/notfound") + assert.Nil(t, nofield) +} diff --git a/pkg/provider/offline/scenario_provider.go b/pkg/provider/offline/scenario_provider.go index 8362072..746af2e 100644 --- a/pkg/provider/offline/scenario_provider.go +++ b/pkg/provider/offline/scenario_provider.go @@ -8,7 +8,7 @@ import ( type ScenarioProvider struct { } -func (p *ScenarioProvider) GetScenarios(dataSource string) (*[]models.ScenarioTag, error) { +func (p *ScenarioProvider) GetRegistryImages(dataSource string) (*[]models.ScenarioTag, error) { return nil, errors.New("not yet implemented") } diff --git a/pkg/provider/provider.go b/pkg/provider/provider.go index cbf9c8b..e5bf255 100644 --- a/pkg/provider/provider.go +++ b/pkg/provider/provider.go @@ -10,7 +10,7 @@ const ( ) type ScenarioDataProvider interface { - GetScenarios(dataSource string) (*[]models.ScenarioTag, error) + GetRegistryImages(dataSource string) (*[]models.ScenarioTag, error) GetScenarioDetail(scenario string, dataSource string) (*models.ScenarioDetail, error) ScaffoldScenarios(scenarios []string, dataSource string) (*string, error) } diff --git a/pkg/provider/quay/scenario_provider.go b/pkg/provider/quay/scenario_provider.go index c26afaf..7bd478c 100644 --- a/pkg/provider/quay/scenario_provider.go +++ b/pkg/provider/quay/scenario_provider.go @@ -23,7 +23,7 @@ type ScenarioProvider struct { Config *config.Config } -func (p *ScenarioProvider) GetScenarios(dataSource string) (*[]models.ScenarioTag, error) { +func (p *ScenarioProvider) GetRegistryImages(dataSource string) (*[]models.ScenarioTag, error) { tagBaseUrl, err := url.Parse(dataSource + "/tag") if err != nil { return nil, err @@ -158,7 +158,7 @@ func (p *ScenarioProvider) ScaffoldScenarios(scenarios []string, dataSource stri } func (p *ScenarioProvider) GetScenarioDetail(scenario string, dataSource string) (*models.ScenarioDetail, error) { - scenarios, err := p.GetScenarios(dataSource) + scenarios, err := p.GetRegistryImages(dataSource) if err != nil { return nil, err } @@ -180,7 +180,7 @@ func (p *ScenarioProvider) GetScenarioDetail(scenario string, dataSource string) if err != nil { return nil, err } - + var deferErr error = nil defer func() { deferErr = resp.Body.Close() diff --git a/pkg/provider/quay/scenario_provider_test.go b/pkg/provider/quay/scenario_provider_test.go index 6c08264..b650da7 100644 --- a/pkg/provider/quay/scenario_provider_test.go +++ b/pkg/provider/quay/scenario_provider_test.go @@ -1,7 +1,6 @@ package quay import ( - "encoding/json" krknctlconfig "github.com/krkn-chaos/krknctl/internal/config" "github.com/stretchr/testify/assert" "strings" @@ -9,83 +8,52 @@ import ( "time" ) -func getTestConfig() krknctlconfig.Config { - data := ` -{ - "version": "0.0.1", - "quay_protocol": "https", - "quay_host": "quay.io", - "quay_org": "krkn-chaos", - "quay_registry": "krknctl-test", - "quay_repositoryApi": "api/v1/repository", - "container_prefix": "krknctl", - "kubeconfig_prefix": "krknctl-kubeconfig", - "krknctl_logs": "krknct-log", - "podman_darwin_socket_template": "unix://%s/.local/share/containers/podman/machine/podman.sock", - "podman_linux_socket_template": "unix://run/user/%d/podman/podman.sock", - "podman_socket_root_linux": "unix://run/podman/podman.sock", - "docker_socket_root": "unix:///var/run/docker.sock", - "default_container_platform": "Podman", - "metrics_profile_path" : "/home/krkn/kraken/config/metrics-aggregated.yaml", - "alerts_profile_path":"/home/krkn/kraken/config/alerts", - "kubeconfig_path": "/home/krkn/.kube/config", - "label_title": "krknctl.title", - "label_description": "krknctl.description", - "label_input_fields": "krknctl.input_fields", - "label_title_regex": "LABEL krknctl\\.title=\\\"?(.*)\\\"?", - "label_description_regex": "LABEL krknctl\\.description=\\\"?(.*)\\\"?", - "label_input_fields_regex": "LABEL krknctl\\.input_fields=\\'?(\\[.*\\])\\'?" +func getConfig(t *testing.T) krknctlconfig.Config { + conf, err := krknctlconfig.LoadConfig() + assert.Nil(t, err) + return conf } -` - conf := krknctlconfig.Config{} - _ = json.Unmarshal([]byte(data), &conf) - - _ = krknctlconfig.Config{ - Version: "0.0.1", - QuayHost: "quay.io", - QuayOrg: "krkn-chaos", - QuayRegistry: "krknctl-test", - QuayRepositoryApi: "api/v1/repository", - } +func getTestConfig(t *testing.T) krknctlconfig.Config { + conf := getConfig(t) + conf.QuayRegistry = "krknctl-test" return conf } -func getWrongConfig() krknctlconfig.Config { - return krknctlconfig.Config{ - Version: "0.0.1", - QuayHost: "quay.io", - QuayOrg: "krkn-chaos", - QuayRegistry: "do_not_exist", - QuayRepositoryApi: "api/v1/repository", - } +func getWrongConfig(t *testing.T) krknctlconfig.Config { + conf := getConfig(t) + conf.QuayRegistry = "do_not_exist" + return conf } -func TestQuayScenarioProvider_GetScenarios(t *testing.T) { - config := getTestConfig() +func TestScenarioProvider_GetRegistryImages(t *testing.T) { + config := getTestConfig(t) provider := ScenarioProvider{Config: &config} uri, err := config.GetQuayRepositoryApiUri() assert.NoError(t, err) - scenarios, err := provider.GetScenarios(uri) + scenarios, err := provider.GetRegistryImages(uri) assert.Nil(t, err) - assert.Greater(t, len(*scenarios), 0) - assert.NotEqual(t, (*scenarios)[0].Name, "") - assert.NotEqual(t, (*scenarios)[0].Digest, "") - assert.NotEqual(t, (*scenarios)[0].Size, 0) - assert.NotEqual(t, (*scenarios)[0].LastModified, time.Time{}) assert.NotNil(t, scenarios) + assert.Greater(t, len(*scenarios), 0) + for i, _ := range *scenarios { + assert.NotEqual(t, (*scenarios)[i].Name, "") + assert.NotEqual(t, (*scenarios)[i].Digest, "") + assert.NotEqual(t, (*scenarios)[i].Size, 0) + assert.NotEqual(t, (*scenarios)[i].LastModified, time.Time{}) + } - wrongConfig := getWrongConfig() + wrongConfig := getWrongConfig(t) wrongProvider := ScenarioProvider{Config: &wrongConfig} uri, err = wrongConfig.GetQuayRepositoryApiUri() assert.NoError(t, err) - _, err = wrongProvider.GetScenarios(uri) - assert.NotNil(t, err) + _, err = wrongProvider.GetRegistryImages(uri) + assert.Error(t, err) + } func TestQuayScenarioProvider_GetScenarioDetail(t *testing.T) { - config := getTestConfig() + config := getTestConfig(t) provider := ScenarioProvider{Config: &config} uri, err := config.GetQuayRepositoryApiUri() assert.NoError(t, err) @@ -116,14 +84,13 @@ func TestQuayScenarioProvider_GetScenarioDetail(t *testing.T) { } -func TestScenarioProvider_ScaffoldScenarios(t *testing.T) { - - config, _ := krknctlconfig.LoadConfig() +func TestQuayScenarioProvider_ScaffoldScenarios(t *testing.T) { + config := getConfig(t) uri, err := config.GetQuayRepositoryApiUri() assert.NoError(t, err) provider := ScenarioProvider{Config: &config} - scenarios, err := provider.GetScenarios(uri) + scenarios, err := provider.GetRegistryImages(uri) assert.Nil(t, err) assert.NotNil(t, scenarios) var scenarioNames []string diff --git a/pkg/scenario_orchestrator/common_functions.go b/pkg/scenario_orchestrator/common_functions.go index 1428d24..e686ea0 100644 --- a/pkg/scenario_orchestrator/common_functions.go +++ b/pkg/scenario_orchestrator/common_functions.go @@ -87,6 +87,16 @@ func CommonRunAttached(image string, containerName string, env map[string]string return nil, err } } + + containerStatus, err := c.InspectScenario(models.Container{Id: *containerId}, ctx) + if err != nil { + return nil, err + } + // if there is an error exit status it is propagated via error to the cmd + if containerStatus.Container.ExitStatus > 0 { + return containerId, fmt.Errorf("%s %d", c.GetConfig().ContainerExitStatusPrefix, containerStatus.Container.ExitStatus) + } + return containerId, nil } @@ -103,14 +113,14 @@ func CommonAttachWait(containerId *string, stdout io.Writer, stderr io.Writer, c return interrupted, err } -func CommonListRunningScenarios(c ScenarioOrchestrator, ctx context.Context) (*[]models.RunningScenario, error) { +func CommonListRunningScenarios(c ScenarioOrchestrator, ctx context.Context) (*[]models.ScenarioContainer, error) { containersMap, err := c.ListRunningContainers(ctx) if err != nil { return nil, err } var indexes []int64 - var runningScenarios []models.RunningScenario + var runningScenarios []models.ScenarioContainer for k, _ := range *containersMap { indexes = append(indexes, k) @@ -119,7 +129,7 @@ func CommonListRunningScenarios(c ScenarioOrchestrator, ctx context.Context) (*[ for _, index := range indexes { container := (*containersMap)[index] - scenario, err := c.InspectRunningScenario(container, nil) + scenario, err := c.InspectScenario(container, nil) if err != nil { return nil, err } diff --git a/pkg/scenario_orchestrator/docker/scenario_orchestrator.go b/pkg/scenario_orchestrator/docker/scenario_orchestrator.go index e157e0f..b342760 100644 --- a/pkg/scenario_orchestrator/docker/scenario_orchestrator.go +++ b/pkg/scenario_orchestrator/docker/scenario_orchestrator.go @@ -293,7 +293,6 @@ func (c *ScenarioOrchestrator) ListRunningContainers(ctx context.Context) (*map[ return nil, err } - // Recuperare i container attualmente in esecuzione containers, err := cli.ContainerList(ctx, dockercontainer.ListOptions{All: true}) if err != nil { return nil, err @@ -319,8 +318,8 @@ func (c *ScenarioOrchestrator) ListRunningContainers(ctx context.Context) (*map[ } -func (c *ScenarioOrchestrator) InspectRunningScenario(container orchestratormodels.Container, ctx context.Context) (*orchestratormodels.RunningScenario, error) { - runningScenario := &orchestratormodels.RunningScenario{} +func (c *ScenarioOrchestrator) InspectScenario(container orchestratormodels.Container, ctx context.Context) (*orchestratormodels.ScenarioContainer, error) { + runningScenario := &orchestratormodels.ScenarioContainer{} scenario := orchestratormodels.Scenario{} scenario.Volumes = make(map[string]string) scenario.Env = make(map[string]string) @@ -334,6 +333,10 @@ func (c *ScenarioOrchestrator) InspectRunningScenario(container orchestratormode if err != nil { return nil, err } + container.Name = inspectData.Name + container.Status = inspectData.State.Status + container.ExitStatus = int32(inspectData.State.ExitCode) + scenarioDetail := providermodels.ScenarioDetail{} scenarioDetail.Digest = inspectData.ContainerJSONBase.Image imageAndTag := strings.Split(inspectData.Config.Image, ":") @@ -389,6 +392,28 @@ func (c *ScenarioOrchestrator) Connect(containerRuntimeUri string) (context.Cont return ctxWithClient, nil } +func (c *ScenarioOrchestrator) GetConfig() config.Config { + return c.Config +} + +func (c *ScenarioOrchestrator) ResolveContainerName(containerName string, ctx context.Context) (*string, error) { + cli, err := dockerClientFromContext(ctx) + if err != nil { + return nil, err + } + + containers, err := cli.ContainerList(ctx, dockercontainer.ListOptions{All: true}) + if err != nil { + return nil, err + } + for _, container := range containers { + if strings.Contains(container.Names[0], containerName) { + return &container.ID, nil + } + } + return nil, nil +} + // common functions func (c *ScenarioOrchestrator) AttachWait(containerId *string, stdout io.Writer, stderr io.Writer, ctx context.Context) (*bool, error) { @@ -414,6 +439,6 @@ func (c *ScenarioOrchestrator) PrintContainerRuntime() { scenario_orchestrator.CommonPrintRuntime(c.ContainerRuntime) } -func (c *ScenarioOrchestrator) ListRunningScenarios(ctx context.Context) (*[]orchestratormodels.RunningScenario, error) { +func (c *ScenarioOrchestrator) ListRunningScenarios(ctx context.Context) (*[]orchestratormodels.ScenarioContainer, error) { return scenario_orchestrator.CommonListRunningScenarios(c, ctx) } diff --git a/pkg/scenario_orchestrator/docker/scenario_orchestrator_test.go b/pkg/scenario_orchestrator/docker/scenario_orchestrator_test.go index 2e938b1..27f5bfe 100644 --- a/pkg/scenario_orchestrator/docker/scenario_orchestrator_test.go +++ b/pkg/scenario_orchestrator/docker/scenario_orchestrator_test.go @@ -1,108 +1,66 @@ package docker import ( - "encoding/json" + "context" "fmt" - krknctlconfig "github.com/krkn-chaos/krknctl/internal/config" - "github.com/krkn-chaos/krknctl/pkg/provider/quay" - "github.com/krkn-chaos/krknctl/pkg/scenario_orchestrator/utils" + dockercontainer "github.com/docker/docker/api/types/container" + "github.com/krkn-chaos/krknctl/internal/config" + "github.com/krkn-chaos/krknctl/pkg/scenario_orchestrator/models" + "github.com/krkn-chaos/krknctl/pkg/scenario_orchestrator/test" "github.com/stretchr/testify/assert" "os" - "os/user" + "regexp" "strconv" + "strings" "testing" ) -func getTestConfig() krknctlconfig.Config { - _ = krknctlconfig.Config{ - Version: "0.0.1", - QuayHost: "quay.io", - QuayOrg: "krkn-chaos", - QuayRegistry: "krkn-hub", - QuayRepositoryApi: "api/v1/repository", - ContainerPrefix: "krknctl-containers", - KubeconfigPrefix: "krknctl-kubeconfig", - PodmanDarwinSocketTemplate: "unix://%s/.local/share/containers/podman/machine/podman.sock", - PodmanLinuxSocketTemplate: "unix:///run/user/%d/podman/podman.sock", - PodmanSocketRoot: "unix:///run/podman/podman.sock", - DockerSocketRoot: "unix:///var/run/docker.sock", - } - - data := ` -{ - "version": "0.0.1", - "quay_protocol": "https", - "quay_host": "quay.io", - "quay_org": "krkn-chaos", - "quay_registry": "krkn-hub", - "quay_repositoryApi": "api/v1/repository", - "container_prefix": "krknctl", - "kubeconfig_prefix": "krknctl-kubeconfig", - "krknctl_logs": "krknct-log", - "podman_darwin_socket_template": "unix://%s/.local/share/containers/podman/machine/podman.sock", - "podman_linux_socket_template": "unix://run/user/%d/podman/podman.sock", - "podman_socket_root_linux": "unix://run/podman/podman.sock", - "podman_running_state": "running", - "docker_socket_root": "unix:///var/run/docker.sock", - "docker_running_state": "running", - "default_container_platform": "Podman", - "metrics_profile_path" : "/home/krkn/kraken/config/metrics-aggregated.yaml", - "alerts_profile_path":"/home/krkn/kraken/config/alerts", - "kubeconfig_path": "/home/krkn/.kube/config", - "label_title": "krknctl.title", - "label_description": "krknctl.description", - "label_input_fields": "krknctl.input_fields", - "label_title_regex": "LABEL krknctl\\.title=\\\"?(.*)\\\"?", - "label_description_regex": "LABEL krknctl\\.description=\\\"?(.*)\\\"?", - "label_input_fields_regex": "LABEL krknctl\\.input_fields=\\'?(\\[.*\\])\\'?" +func TestScenarioOrchestrator_Docker_Connect(t *testing.T) { + config := test.CommonGetTestConfig(t) + sopodman := ScenarioOrchestrator{Config: config, ContainerRuntime: models.Docker} + test.CommonTestScenarioOrchestratorConnect(t, &sopodman, config) } -` - conf := krknctlconfig.Config{} - _ = json.Unmarshal([]byte(data), &conf) - - _ = krknctlconfig.Config{ - Version: "0.0.1", - QuayHost: "quay.io", - QuayOrg: "krkn-chaos", - QuayRegistry: "krknctl-test", - QuayRepositoryApi: "api/v1/repository", - ContainerPrefix: "krknctl", - KubeconfigPrefix: "krknctl-kubeconfig", - PodmanDarwinSocketTemplate: "unix://%s/.local/share/containers/podman/machine/podman.sock", - PodmanLinuxSocketTemplate: "unix://run/user/%d/podman/podman.sock", - PodmanSocketRoot: "unix://run/podman/podman.sock", - } +func TestScenarioOrchestrator_Docker_RunAttached(t *testing.T) { + config := test.CommonGetTestConfig(t) + sopodman := ScenarioOrchestrator{Config: config, ContainerRuntime: models.Docker} + test.CommonTestScenarioOrchestratorRunAttached(t, &sopodman, config, 3) +} - return conf +func TestScenarioOrchestrator_Docker_Run(t *testing.T) { + config := test.CommonGetConfig(t) + sodocker := ScenarioOrchestrator{Config: config, ContainerRuntime: models.Docker} + test.CommonTestScenarioOrchestratorRun(t, &sodocker, config, 5) +} +func TestScenarioOrchestrator_Docker_RunGraph(t *testing.T) { + config := test.CommonGetConfig(t) + sodocker := ScenarioOrchestrator{Config: config, ContainerRuntime: models.Docker} + test.CommonTestScenarioOrchestratorRunGraph(t, &sodocker, config) } -func TestscenarioOrchestrator_Run(t *testing.T) { - env := map[string]string{ - "CHAOS_DURATION": "2", - "CORES": "1", - "CPU_PERCENTAGE": "60", - "NAMESPACE": "default", - } - conf := getTestConfig() - cm := ScenarioOrchestrator{ - Config: conf, - } - currentUser, err := user.Current() - fmt.Println("Current user: " + (*currentUser).Name) - fmt.Println("current user id" + (*currentUser).Uid) - quayProvider := quay.ScenarioProvider{Config: &conf} - uri, err := conf.GetQuayRepositoryApiUri() +func findContainers(t *testing.T, config config.Config, ctx context.Context) []string { + scenarioNameRegex, err := regexp.Compile(fmt.Sprintf("%s-.*-([0-9]+)", config.ContainerPrefix)) assert.Nil(t, err) - scenario, err := quayProvider.GetScenarioDetail("node-cpu-hog", uri) + + cli, err := dockerClientFromContext(ctx) assert.Nil(t, err) - assert.NotNil(t, scenario) - kubeconfig, err := utils.PrepareKubeconfig(nil, getTestConfig()) + + // Recuperare i container attualmente in esecuzione + containers, err := cli.ContainerList(ctx, dockercontainer.ListOptions{All: true}) assert.Nil(t, err) - assert.NotNil(t, kubeconfig) - fmt.Println("KUBECONFIG PARSED -> " + *kubeconfig) + var foundContainers []string + for _, container := range containers { + if scenarioNameRegex.MatchString(container.Names[0]) { + foundContainers = append(foundContainers, container.Names[0]) + } + } + return foundContainers +} +func TestScenarioOrchestrator_Docker_CleanContainers(t *testing.T) { + config := test.CommonGetTestConfig(t) + sodocker := ScenarioOrchestrator{Config: config, ContainerRuntime: models.Docker} envuid := os.Getenv("USERID") var uid *int = nil if envuid != "" { @@ -111,48 +69,61 @@ func TestscenarioOrchestrator_Run(t *testing.T) { uid = &_uid fmt.Println("USERID -> ", *uid) } - socket, err := cm.GetContainerRuntimeSocket(uid) + socket, err := sodocker.GetContainerRuntimeSocket(uid) assert.Nil(t, err) - assert.NotNil(t, socket) - ctx, err := cm.Connect(*socket) + ctx, err := sodocker.Connect(*socket) assert.Nil(t, err) - assert.NotNil(t, ctx) - - fmt.Println("CONTAINER SOCKET -> " + *socket) - containerId, err := cm.RunAttached(uri+":"+scenario.Name, scenario.Name, env, false, map[string]string{}, os.Stdout, os.Stderr, nil, ctx) - if err != nil { - fmt.Println("ERROR -> " + err.Error()) - } + test.CommonTestScenarioOrchestratorRunAttached(t, &sodocker, config, 5) + foundContainers := findContainers(t, config, ctx) + assert.Greater(t, len(foundContainers), 0) + numcontainers, err := sodocker.CleanContainers(ctx) assert.Nil(t, err) - assert.NotNil(t, containerId) + assert.Equal(t, len(foundContainers), *numcontainers) + foundContainers = findContainers(t, config, ctx) + assert.Equal(t, len(foundContainers), 0) + } -func TestScenarioOrchestrator_ListRunningContainers(t *testing.T) { - conf := getTestConfig() - cm := ScenarioOrchestrator{ - Config: conf, - } - kubeconfig, err := utils.PrepareKubeconfig(nil, conf) - assert.Nil(t, err) - assert.NotNil(t, kubeconfig) - fmt.Println("KUBECONFIG PARSED -> " + *kubeconfig) +func TestScenarioOrchestrator_Docker_AttachWait(t *testing.T) { + config := test.CommonGetTestConfig(t) + sopodman := ScenarioOrchestrator{Config: config, ContainerRuntime: models.Docker} + fileContent := test.CommonAttachWait(t, &sopodman, config) + fmt.Println("FILE CONTENT -> ", fileContent) + assert.True(t, strings.Contains(fileContent, "Release the krkn 4")) - envuid := os.Getenv("USERID") - var uid *int = nil - if envuid != "" { - _uid, err := strconv.Atoi(envuid) - assert.Nil(t, err) - uid = &_uid - fmt.Println("USERID -> ", *uid) - } - socket, err := cm.GetContainerRuntimeSocket(uid) - assert.Nil(t, err) - assert.NotNil(t, socket) - ctx, err := cm.Connect(*socket) - assert.Nil(t, err) - assert.NotNil(t, ctx) +} - containers, err := cm.ListRunningContainers(ctx) - assert.Nil(t, err) - assert.NotNil(t, containers) +func TestScenarioOrchestrator_Docker_Kill(t *testing.T) { + +} + +func TestScenarioOrchestrator_Docker_ListRunningContainers(t *testing.T) { + config := test.CommonGetConfig(t) + sodocker := ScenarioOrchestrator{Config: config, ContainerRuntime: models.Docker} + test.CommonTestScenarioOrchestratorListRunningContainers(t, &sodocker, config) +} + +func TestScenarioOrchestrator_Docker_ListRunningScenarios(t *testing.T) { + +} +func TestScenarioOrchestrator_Docker_InspectRunningScenario(t *testing.T) { + +} + +func TestScenarioOrchestrator_Docker_GetContainerRuntimeSocket(t *testing.T) { + +} + +func TestScenarioOrchestrator_Docker_GetContainerRuntime(t *testing.T) { + +} + +func TestScenarioOrchestrator_Docker_PrintContainerRuntime(t *testing.T) { + +} + +func TestScenarioOrchestrator_Docker_ResolveContainerId(t *testing.T) { + config := test.CommonGetTestConfig(t) + sodocker := ScenarioOrchestrator{Config: config, ContainerRuntime: models.Docker} + test.CommonTestScenarioOrchestratorResolveContainerName(t, &sodocker, config, 3) } diff --git a/pkg/scenario_orchestrator/factory/factory.go b/pkg/scenario_orchestrator/factory/factory.go index 18a75ea..8175413 100644 --- a/pkg/scenario_orchestrator/factory/factory.go +++ b/pkg/scenario_orchestrator/factory/factory.go @@ -19,27 +19,28 @@ func NewScenarioOrchestratorFactory(config config.Config) *ScenarioOrchestratorF } } -func (f *ScenarioOrchestratorFactory) NewInstance(containerEnvironment models.ContainerRuntime, config *config.Config) scenario_orchestrator.ScenarioOrchestrator { +func (f *ScenarioOrchestratorFactory) NewInstance(containerEnvironment models.ContainerRuntime) scenario_orchestrator.ScenarioOrchestrator { switch containerEnvironment { case models.Podman: + return f.getOrchestratorInstance(models.Podman) case models.Docker: - return f.getOrchestratorInstance(containerEnvironment, config) + return f.getOrchestratorInstance(models.Docker) case models.Both: - defaultContainerEnvironment := utils.EnvironmentFromString(config.DefaultContainerPlatform) - return f.getOrchestratorInstance(defaultContainerEnvironment, config) + defaultContainerEnvironment := utils.EnvironmentFromString(f.Config.DefaultContainerPlatform) + return f.getOrchestratorInstance(defaultContainerEnvironment) } return nil } -func (f *ScenarioOrchestratorFactory) getOrchestratorInstance(containerEnvironment models.ContainerRuntime, config *config.Config) scenario_orchestrator.ScenarioOrchestrator { +func (f *ScenarioOrchestratorFactory) getOrchestratorInstance(containerEnvironment models.ContainerRuntime) scenario_orchestrator.ScenarioOrchestrator { if containerEnvironment == models.Podman { return &podman.ScenarioOrchestrator{ - Config: *config, + Config: f.Config, ContainerRuntime: containerEnvironment, } } else { return &docker.ScenarioOrchestrator{ - Config: *config, + Config: f.Config, ContainerRuntime: containerEnvironment, } } diff --git a/pkg/scenario_orchestrator/models/models.go b/pkg/scenario_orchestrator/models/models.go index c30ab32..d8e9508 100644 --- a/pkg/scenario_orchestrator/models/models.go +++ b/pkg/scenario_orchestrator/models/models.go @@ -41,17 +41,19 @@ type Scenario struct { Volumes map[string]string `json:"volumes,omitempty"` } -type RunningScenario struct { +type ScenarioContainer struct { Scenario *Scenario ScenarioDetail *models.ScenarioDetail Container *Container } type Container struct { - Id string `json:"id,omitempty"` - Name string `json:"name,omitempty"` - Image string `json:"image,omitempty"` - Started int64 `json:"started,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Image string `json:"image,omitempty"` + Started int64 `json:"started,omitempty"` + Status string `json:"status,omitempty"` + ExitStatus int32 `json:"exit_status"` } type ScenarioSet map[string]ScenarioNode diff --git a/pkg/scenario_orchestrator/podman/scenario_orchestrator.go b/pkg/scenario_orchestrator/podman/scenario_orchestrator.go index 50f11ab..67a313a 100644 --- a/pkg/scenario_orchestrator/podman/scenario_orchestrator.go +++ b/pkg/scenario_orchestrator/podman/scenario_orchestrator.go @@ -218,8 +218,8 @@ func (c *ScenarioOrchestrator) ListRunningContainers(ctx context.Context) (*map[ } -func (c *ScenarioOrchestrator) InspectRunningScenario(container orchestratormodels.Container, ctx context.Context) (*orchestratormodels.RunningScenario, error) { - runningScenario := &orchestratormodels.RunningScenario{} +func (c *ScenarioOrchestrator) InspectScenario(container orchestratormodels.Container, ctx context.Context) (*orchestratormodels.ScenarioContainer, error) { + runningScenario := &orchestratormodels.ScenarioContainer{} scenario := orchestratormodels.Scenario{} scenario.Volumes = make(map[string]string) scenario.Env = make(map[string]string) @@ -232,6 +232,10 @@ func (c *ScenarioOrchestrator) InspectRunningScenario(container orchestratormode return nil, fmt.Errorf("container %s not found", container.Id) } + container.Name = inspectData.Name + container.Status = inspectData.State.Status + container.ExitStatus = inspectData.State.ExitCode + if inspectData.Config == nil { return nil, fmt.Errorf("container %s has no config", container.Id) } @@ -283,6 +287,26 @@ func (c *ScenarioOrchestrator) Connect(containerRuntimeUri string) (context.Cont return bindings.NewConnection(context.Background(), containerRuntimeUri) } +func (c *ScenarioOrchestrator) GetConfig() config.Config { + return c.Config +} + +func (c *ScenarioOrchestrator) ResolveContainerName(containerName string, ctx context.Context) (*string, error) { + _true := true + containerList, err := containers.List(ctx, &containers.ListOptions{ + All: &_true, + }) + if err != nil { + return nil, err + } + for _, container := range containerList { + if strings.Contains(container.Names[0], containerName) { + return &container.ID, nil + } + } + return nil, nil +} + // common functions func (c *ScenarioOrchestrator) RunAttached(image string, containerName string, env map[string]string, cache bool, volumeMounts map[string]string, stdout io.Writer, stderr io.Writer, commChan *chan *string, ctx context.Context) (*string, error) { @@ -308,6 +332,6 @@ func (c *ScenarioOrchestrator) PrintContainerRuntime() { scenario_orchestrator.CommonPrintRuntime(c.ContainerRuntime) } -func (c *ScenarioOrchestrator) ListRunningScenarios(ctx context.Context) (*[]orchestratormodels.RunningScenario, error) { +func (c *ScenarioOrchestrator) ListRunningScenarios(ctx context.Context) (*[]orchestratormodels.ScenarioContainer, error) { return scenario_orchestrator.CommonListRunningScenarios(c, ctx) } diff --git a/pkg/scenario_orchestrator/podman/scenario_orchestrator_test.go b/pkg/scenario_orchestrator/podman/scenario_orchestrator_test.go index 90bedc7..eab89ad 100644 --- a/pkg/scenario_orchestrator/podman/scenario_orchestrator_test.go +++ b/pkg/scenario_orchestrator/podman/scenario_orchestrator_test.go @@ -1,116 +1,46 @@ package podman import ( - "encoding/json" + "context" "fmt" - krknctlconfig "github.com/krkn-chaos/krknctl/internal/config" - "github.com/krkn-chaos/krknctl/pkg/dependencygraph" - "github.com/krkn-chaos/krknctl/pkg/provider/quay" + "github.com/containers/podman/v5/pkg/bindings/containers" + "github.com/krkn-chaos/krknctl/internal/config" "github.com/krkn-chaos/krknctl/pkg/scenario_orchestrator/models" - "github.com/krkn-chaos/krknctl/pkg/scenario_orchestrator/utils" + "github.com/krkn-chaos/krknctl/pkg/scenario_orchestrator/test" "github.com/stretchr/testify/assert" "os" - "os/user" + "regexp" "strconv" + "strings" "testing" ) -func getTestConfig() krknctlconfig.Config { - data := ` -{ - "version": "0.0.1", - "quay_protocol": "https", - "quay_host": "quay.io", - "quay_org": "krkn-chaos", - "quay_registry": "krknctl-test", - "quay_repositoryApi": "api/v1/repository", - "container_prefix": "krknctl", - "kubeconfig_prefix": "krknctl-kubeconfig", - "krknctl_logs": "krknct-log", - "podman_darwin_socket_template": "unix://%s/.local/share/containers/podman/machine/podman.sock", - "podman_linux_socket_template": "unix://run/user/%d/podman/podman.sock", - "podman_socket_root_linux": "unix://run/podman/podman.sock", - "podman_running_state": "running", - "docker_socket_root": "unix:///var/run/docker.sock", - "docker_running_state": "running", - "default_container_platform": "Podman", - "metrics_profile_path" : "/home/krkn/kraken/config/metrics-aggregated.yaml", - "alerts_profile_path":"/home/krkn/kraken/config/alerts", - "kubeconfig_path": "/home/krkn/.kube/config", - "label_title": "krknctl.title", - "label_description": "krknctl.description", - "label_input_fields": "krknctl.input_fields", - "label_title_regex": "LABEL krknctl\\.title=\\\"?(.*)\\\"?", - "label_description_regex": "LABEL krknctl\\.description=\\\"?(.*)\\\"?", - "label_input_fields_regex": "LABEL krknctl\\.input_fields=\\'?(\\[.*\\])\\'?" +func TestScenarioOrchestrator_Podman_Connect(t *testing.T) { + config := test.CommonGetTestConfig(t) + sopodman := ScenarioOrchestrator{Config: config, ContainerRuntime: models.Podman} + test.CommonTestScenarioOrchestratorConnect(t, &sopodman, config) } -` - conf := krknctlconfig.Config{} - _ = json.Unmarshal([]byte(data), &conf) - return conf +func TestScenarioOrchestrator_Podman_RunAttached(t *testing.T) { + config := test.CommonGetTestConfig(t) + sopodman := ScenarioOrchestrator{Config: config, ContainerRuntime: models.Podman} + test.CommonTestScenarioOrchestratorRunAttached(t, &sopodman, config, 10) } -func GetOriginalConfig() krknctlconfig.Config { - data := ` -{ - "version": "0.0.1", - "quay_protocol": "https", - "quay_host": "quay.io", - "quay_org": "krkn-chaos", - "quay_registry": "krkn-hub", - "quay_repositoryApi": "api/v1/repository", - "container_prefix": "krknctl", - "kubeconfig_prefix": "krknctl-kubeconfig", - "krknctl_logs": "krknct-log", - "podman_darwin_socket_template": "unix://%s/.local/share/containers/podman/machine/podman.sock", - "podman_linux_socket_template": "unix://run/user/%d/podman/podman.sock", - "podman_socket_root_linux": "unix://run/podman/podman.sock", - "podman_running_state": "running", - "docker_socket_root": "unix:///var/run/docker.sock", - "docker_running_state": "running", - "default_container_platform": "Podman", - "metrics_profile_path" : "/home/krkn/kraken/config/metrics-aggregated.yaml", - "alerts_profile_path":"/home/krkn/kraken/config/alerts", - "kubeconfig_path": "/home/krkn/.kube/config", - "label_title": "krknctl.title", - "label_description": "krknctl.description", - "label_input_fields": "krknctl.input_fields", - "label_title_regex": "LABEL krknctl\\.title=\\\"?(.*)\\\"?", - "label_description_regex": "LABEL krknctl\\.description=\\\"?(.*)\\\"?", - "label_input_fields_regex": "LABEL krknctl\\.input_fields=\\'?(\\[.*\\])\\'?" +func TestScenarioOrchestrator_Podman_Run(t *testing.T) { + config := test.CommonGetConfig(t) + sopodman := ScenarioOrchestrator{Config: config, ContainerRuntime: models.Podman} + test.CommonTestScenarioOrchestratorRun(t, &sopodman, config, 5) } -` - conf := krknctlconfig.Config{} - _ = json.Unmarshal([]byte(data), &conf) - return conf +func TestScenarioOrchestrator_Podman_RunGraph(t *testing.T) { + config := test.CommonGetConfig(t) + sopodman := ScenarioOrchestrator{Config: config, ContainerRuntime: models.Podman} + test.CommonTestScenarioOrchestratorRunGraph(t, &sopodman, config) } -func TestConnect(t *testing.T) { - env := map[string]string{ - "CHAOS_DURATION": "2", - "CORES": "1", - "CPU_PERCENTAGE": "60", - "NAMESPACE": "default", - } - conf := GetOriginalConfig() - cm := ScenarioOrchestrator{ - Config: conf, - } - currentUser, err := user.Current() - fmt.Println("Current user: " + (*currentUser).Name) - fmt.Println("current user id" + (*currentUser).Uid) - quayProvider := quay.ScenarioProvider{Config: &conf} - repositoryApiUri, err := conf.GetQuayRepositoryApiUri() - assert.Nil(t, err) - scenario, err := quayProvider.GetScenarioDetail("node-cpu-hog", repositoryApiUri) - assert.Nil(t, err) - assert.NotNil(t, scenario) - kubeconfig, err := utils.PrepareKubeconfig(nil, getTestConfig()) - assert.Nil(t, err) - assert.NotNil(t, kubeconfig) - fmt.Println("KUBECONFIG PARSED -> " + *kubeconfig) +func findContainers(t *testing.T, config config.Config) (int, context.Context) { + _true := true envuid := os.Getenv("USERID") var uid *int = nil if envuid != "" { @@ -119,205 +49,79 @@ func TestConnect(t *testing.T) { uid = &_uid fmt.Println("USERID -> ", *uid) } - socket, err := cm.GetContainerRuntimeSocket(uid) + + sopodman := ScenarioOrchestrator{Config: config, ContainerRuntime: models.Podman} + socket, err := sopodman.GetContainerRuntimeSocket(uid) assert.Nil(t, err) - assert.NotNil(t, socket) - imageUri, err := conf.GetQuayImageUri() + ctx, err := sopodman.Connect(*socket) assert.Nil(t, err) - fmt.Println("CONTAINER SOCKET -> " + *socket) - ctx, err := cm.Connect(*socket) + nameRegex, err := regexp.Compile(fmt.Sprintf("^%s.*-[0-9]+$", config.ContainerPrefix)) assert.Nil(t, err) - assert.NotNil(t, ctx) - containerId, err := cm.RunAttached(imageUri+":"+scenario.Name, scenario.Name, env, false, map[string]string{}, os.Stdout, os.Stderr, nil, ctx) - if err != nil { - fmt.Println("ERROR -> " + err.Error()) - } + containers, err := containers.List(ctx, &containers.ListOptions{ + All: &_true, + }) assert.Nil(t, err) - assert.NotNil(t, containerId) - -} - -func TestRunGraph(t *testing.T) { - data := ` -{ - "root":{ - "image":"quay.io/krkn-chaos/krknctl-test:dummy-scenario", - "name":"dummy-scenario", - "env":{ - "END":"2" - }, - "volumes":{} - }, - "first-row-1":{ - "depends_on":"root", - "image":"quay.io/krkn-chaos/krknctl-test:dummy-scenario", - "name":"dummy-scenario", - "env":{ - "END":"2" - }, - "volumes":{} - }, - "first-row-2":{ - "depends_on":"root", - "image":"quay.io/krkn-chaos/krknctl-test:dummy-scenario", - "name":"dummy-scenario", - "env":{ - "END":"2" - }, - "volumes":{} - }, - "second-row":{ - "depends_on":"first-row-1", - "image":"quay.io/krkn-chaos/krknctl-test:dummy-scenario", - "name":"dummy-scenario", - "env":{ - "END":"2" - }, - "volumes":{} - }, - "third-row-1":{ - "depends_on":"second-row", - "image":"quay.io/krkn-chaos/krknctl-test:dummy-scenario", - "name":"dummy-scenario", - "env":{ - "END":"2" - }, - "volumes":{} - }, - "third-row-2":{ - "depends_on":"second-row", - "image":"quay.io/krkn-chaos/krknctl-test:dummy-scenario", - "name":"dummy-scenario", - "env":{ - "END":"2" - }, - "volumes":{} + foundContainers := 0 + for _, container := range containers { + if nameRegex.MatchString(container.Names[0]) { + foundContainers++ + } } + return foundContainers, ctx } -` - conf := getTestConfig() - cm := ScenarioOrchestrator{ - Config: conf, - } - currentUser, err := user.Current() - fmt.Println("Current user: " + (*currentUser).Name) - fmt.Println("current user id" + (*currentUser).Uid) - quayProvider := quay.ScenarioProvider{Config: &conf} - repositoryApi, err := conf.GetQuayRepositoryApiUri() - assert.Nil(t, err) - scenario, err := quayProvider.GetScenarioDetail("dummy-scenario", repositoryApi) - assert.Nil(t, err) - assert.NotNil(t, scenario) - kubeconfig, err := utils.PrepareKubeconfig(nil, getTestConfig()) - assert.Nil(t, err) - assert.NotNil(t, kubeconfig) - fmt.Println("KUBECONFIG PARSED -> " + *kubeconfig) - envuid := os.Getenv("USERID") - var uid *int = nil - if envuid != "" { - _uid, err := strconv.Atoi(envuid) - assert.Nil(t, err) - uid = &_uid - fmt.Println("USERID -> ", *uid) - } - socket, err := cm.GetContainerRuntimeSocket(uid) - assert.Nil(t, err) - assert.NotNil(t, socket) - ctx, err := cm.Connect(*socket) - assert.Nil(t, err) - assert.NotNil(t, ctx) - - fmt.Println("CONTAINER SOCKET -> " + *socket) +func TestScenarioOrchestrator_Podman_CleanContainers(t *testing.T) { + config := test.CommonGetTestConfig(t) + sopodman := ScenarioOrchestrator{Config: config, ContainerRuntime: models.Podman} + test.CommonTestScenarioOrchestratorRunAttached(t, &sopodman, config, 5) + foundContainers, ctx := findContainers(t, config) + assert.Greater(t, foundContainers, 0) + numcontainers, err := sopodman.CleanContainers(ctx) + assert.Nil(t, err) + assert.Equal(t, foundContainers, *numcontainers) + foundContainers, ctx = findContainers(t, config) + assert.Equal(t, foundContainers, 0) +} - nodes := make(map[string]models.ScenarioNode) - err = json.Unmarshal([]byte(data), &nodes) - assert.Nil(t, err) +func TestScenarioOrchestrator_Podman_AttachWait(t *testing.T) { + config := test.CommonGetTestConfig(t) + sopodman := ScenarioOrchestrator{Config: config, ContainerRuntime: models.Podman} + fileContent := test.CommonAttachWait(t, &sopodman, config) + fmt.Println("FILE CONTENT -> ", fileContent) + assert.True(t, strings.Contains(fileContent, "Release the krkn 4")) - convertedNodes := make(map[string]dependencygraph.ParentProvider, len(nodes)) +} - // Populate the new map - for key, node := range nodes { - // Since ScenarioNode implements ParentProvider, this is valid - convertedNodes[key] = node - } +func TestScenarioOrchestrator_Podman_Kill(t *testing.T) { +} - graph, err := dependencygraph.NewGraphFromNodes(convertedNodes) +func TestScenarioOrchestrator_Podman_ListRunningContainers(t *testing.T) { + config := test.CommonGetConfig(t) + sodocker := ScenarioOrchestrator{Config: config, ContainerRuntime: models.Podman} + test.CommonTestScenarioOrchestratorListRunningContainers(t, &sodocker, config) +} - assert.Nil(t, err) - assert.NotNil(t, graph) - executionPlan := graph.TopoSortedLayers() - assert.NotNil(t, executionPlan) +func TestScenarioOrchestrator_Podman_ListRunningScenarios(t *testing.T) { - commChannel := make(chan *models.GraphCommChannel) - go func() { - cm.RunGraph(nodes, executionPlan, map[string]string{}, map[string]string{}, false, commChannel, ctx) - }() +} +func TestScenarioOrchestrator_Podman_InspectRunningScenario(t *testing.T) { - for { - c := <-commChannel - if c == nil { - break - } else { - assert.Nil(t, (*c).Err) - fmt.Printf("Running step %d scenario: %s\n", *c.Layer, *c.ScenarioId) - } +} - } +func TestScenarioOrchestrator_Podman_GetContainerRuntimeSocket(t *testing.T) { } -func TestScenarioOrchestrator_ListRunningScenarios(t *testing.T) { - conf := GetOriginalConfig() - cm := ScenarioOrchestrator{ - Config: conf, - } - envuid := os.Getenv("USERID") - var uid *int = nil - if envuid != "" { - _uid, err := strconv.Atoi(envuid) - assert.Nil(t, err) - uid = &_uid - fmt.Println("USERID -> ", *uid) - } - socket, err := cm.GetContainerRuntimeSocket(uid) - assert.Nil(t, err) - assert.NotNil(t, socket) - ctx, err := cm.Connect(*socket) - assert.Nil(t, err) - assert.NotNil(t, ctx) - containerMap, err := cm.ListRunningContainers(ctx) - assert.Nil(t, err) - assert.NotNil(t, containerMap) +func TestScenarioOrchestrator_Podman_GetContainerRuntime(t *testing.T) { + } -func TestScenarioOrchestrator_GetScenarioDetail(t *testing.T) { - conf := GetOriginalConfig() - cm := ScenarioOrchestrator{ - Config: conf, - } - envuid := os.Getenv("USERID") - var uid *int = nil - if envuid != "" { - _uid, err := strconv.Atoi(envuid) - assert.Nil(t, err) - uid = &_uid - fmt.Println("USERID -> ", *uid) - } +func TestScenarioOrchestrator_Podman_PrintContainerRuntime(t *testing.T) { - socket, err := cm.GetContainerRuntimeSocket(uid) - assert.Nil(t, err) - assert.NotNil(t, socket) - ctx, err := cm.Connect(*socket) - assert.Nil(t, err) - assert.NotNil(t, ctx) - scenarios, err := cm.ListRunningContainers(ctx) - assert.Nil(t, err) - assert.NotNil(t, scenarios) - for _, v := range *scenarios { - containerMap, err := cm.InspectRunningScenario(v, ctx) - assert.Nil(t, err) - assert.NotNil(t, containerMap) - } +} +func TestScenarioOrchestrator_Podman_ResolveContainerId(t *testing.T) { + config := test.CommonGetTestConfig(t) + sodocker := ScenarioOrchestrator{Config: config, ContainerRuntime: models.Podman} + test.CommonTestScenarioOrchestratorResolveContainerName(t, &sodocker, config, 3) } diff --git a/pkg/scenario_orchestrator/scenario_orchestrator.go b/pkg/scenario_orchestrator/scenario_orchestrator.go index dfb38e6..183829e 100644 --- a/pkg/scenario_orchestrator/scenario_orchestrator.go +++ b/pkg/scenario_orchestrator/scenario_orchestrator.go @@ -2,6 +2,7 @@ package scenario_orchestrator import ( "context" + "github.com/krkn-chaos/krknctl/internal/config" orchestrator_models "github.com/krkn-chaos/krknctl/pkg/scenario_orchestrator/models" "io" "os" @@ -25,11 +26,13 @@ type ScenarioOrchestrator interface { Kill(containerId *string, ctx context.Context) error ListRunningContainers(ctx context.Context) (*map[int64]orchestrator_models.Container, error) - ListRunningScenarios(ctx context.Context) (*[]orchestrator_models.RunningScenario, error) - InspectRunningScenario(container orchestrator_models.Container, ctx context.Context) (*orchestrator_models.RunningScenario, error) + ListRunningScenarios(ctx context.Context) (*[]orchestrator_models.ScenarioContainer, error) + InspectScenario(container orchestrator_models.Container, ctx context.Context) (*orchestrator_models.ScenarioContainer, error) GetContainerRuntimeSocket(userId *int) (*string, error) GetContainerRuntime() orchestrator_models.ContainerRuntime PrintContainerRuntime() + GetConfig() config.Config + ResolveContainerName(containerName string, ctx context.Context) (*string, error) } diff --git a/pkg/scenario_orchestrator/test/common_test_functions.go b/pkg/scenario_orchestrator/test/common_test_functions.go new file mode 100644 index 0000000..b278187 --- /dev/null +++ b/pkg/scenario_orchestrator/test/common_test_functions.go @@ -0,0 +1,423 @@ +package test + +import ( + "encoding/json" + "fmt" + krknctlconfig "github.com/krkn-chaos/krknctl/internal/config" + "github.com/krkn-chaos/krknctl/pkg/dependencygraph" + "github.com/krkn-chaos/krknctl/pkg/provider/quay" + "github.com/krkn-chaos/krknctl/pkg/scenario_orchestrator" + "github.com/krkn-chaos/krknctl/pkg/scenario_orchestrator/models" + "github.com/krkn-chaos/krknctl/pkg/scenario_orchestrator/utils" + "github.com/letsencrypt/boulder/core" + "github.com/stretchr/testify/assert" + "os" + "os/user" + "strconv" + "testing" + "time" +) + +func CommonGetConfig(t *testing.T) krknctlconfig.Config { + conf, err := krknctlconfig.LoadConfig() + assert.Nil(t, err) + return conf +} + +func CommonGetTestConfig(t *testing.T) krknctlconfig.Config { + conf := CommonGetConfig(t) + conf.QuayRegistry = "krknctl-test" + return conf +} + +func CommonTestScenarioOrchestratorRun(t *testing.T, so scenario_orchestrator.ScenarioOrchestrator, conf krknctlconfig.Config, duration int) string { + env := map[string]string{ + "END": fmt.Sprintf("%d", duration), + } + + currentUser, err := user.Current() + fmt.Println("Current user: " + (*currentUser).Name) + fmt.Println("current user id" + (*currentUser).Uid) + quayProvider := quay.ScenarioProvider{Config: &conf} + registryUri, err := conf.GetQuayImageUri() + assert.Nil(t, err) + + apiUri, err := conf.GetQuayRepositoryApiUri() + assert.Nil(t, err) + + scenario, err := quayProvider.GetScenarioDetail("dummy-scenario", apiUri) + assert.Nil(t, err) + assert.NotNil(t, scenario) + kubeconfig, err := utils.PrepareKubeconfig(nil, conf) + assert.Nil(t, err) + assert.NotNil(t, kubeconfig) + fmt.Println("KUBECONFIG PARSED -> " + *kubeconfig) + + envuid := os.Getenv("USERID") + var uid *int = nil + if envuid != "" { + _uid, err := strconv.Atoi(envuid) + assert.Nil(t, err) + uid = &_uid + fmt.Println("USERID -> ", *uid) + } + socket, err := so.GetContainerRuntimeSocket(uid) + assert.Nil(t, err) + assert.NotNil(t, socket) + ctx, err := so.Connect(*socket) + assert.Nil(t, err) + assert.NotNil(t, ctx) + + fmt.Println("CONTAINER SOCKET -> " + *socket) + timestamp := time.Now().Unix() + containerName := fmt.Sprintf("%s-%s-%d", conf.ContainerPrefix, scenario.Name, timestamp) + containerId, err := so.Run(registryUri+":"+scenario.Name, containerName, env, false, map[string]string{}, nil, ctx) + assert.Nil(t, err) + assert.NotNil(t, containerId) + return *containerId +} + +func CommonTestScenarioOrchestratorRunAttached(t *testing.T, so scenario_orchestrator.ScenarioOrchestrator, conf krknctlconfig.Config, duration int) string { + env := map[string]string{ + "END": fmt.Sprintf("%d", duration), + "EXIT_STATUS": "0", + } + + currentUser, err := user.Current() + fmt.Println("Current user: " + (*currentUser).Name) + fmt.Println("current user id" + (*currentUser).Uid) + quayProvider := quay.ScenarioProvider{Config: &conf} + registryUri, err := conf.GetQuayImageUri() + assert.Nil(t, err) + apiUri, err := conf.GetQuayRepositoryApiUri() + assert.Nil(t, err) + scenario, err := quayProvider.GetScenarioDetail("failing-scenario", apiUri) + assert.Nil(t, err) + assert.NotNil(t, scenario) + kubeconfig, err := utils.PrepareKubeconfig(nil, conf) + assert.Nil(t, err) + assert.NotNil(t, kubeconfig) + fmt.Println("KUBECONFIG PARSED -> " + *kubeconfig) + + envuid := os.Getenv("USERID") + var uid *int = nil + if envuid != "" { + _uid, err := strconv.Atoi(envuid) + assert.Nil(t, err) + uid = &_uid + fmt.Println("USERID -> ", *uid) + } + socket, err := so.GetContainerRuntimeSocket(uid) + assert.Nil(t, err) + assert.NotNil(t, socket) + ctx, err := so.Connect(*socket) + assert.Nil(t, err) + assert.NotNil(t, ctx) + + fmt.Println("CONTAINER SOCKET -> " + *socket) + containerName1 := utils.GenerateContainerName(conf, scenario.Name, nil) + containerId, err := so.RunAttached(registryUri+":"+scenario.Name, containerName1, env, false, map[string]string{}, os.Stdout, os.Stderr, nil, ctx) + if err != nil { + fmt.Println("ERROR -> " + err.Error()) + } + assert.Nil(t, err) + assert.NotNil(t, containerId) + + // Testing exit status > 0 + exitStatus := "3" + env["END"] = fmt.Sprintf("%d", duration) + env["EXIT_STATUS"] = exitStatus + containerName2 := utils.GenerateContainerName(conf, scenario.Name, nil) + containerId, err = so.RunAttached(registryUri+":"+scenario.Name, containerName2, env, false, map[string]string{}, os.Stdout, os.Stderr, nil, ctx) + if err != nil { + fmt.Println("ERROR -> " + err.Error()) + } + assert.NotNil(t, err) + assert.NotNil(t, containerId) + assert.Equal(t, fmt.Sprintf("%s %s", conf.ContainerExitStatusPrefix, exitStatus), err.Error()) + + return *containerId +} + +func CommonTestScenarioOrchestratorConnect(t *testing.T, so scenario_orchestrator.ScenarioOrchestrator, config krknctlconfig.Config) { + currentUser, err := user.Current() + fmt.Println("Current user: " + (*currentUser).Name) + fmt.Println("current user id" + (*currentUser).Uid) + + envuid := os.Getenv("USERID") + var uid *int = nil + if envuid != "" { + _uid, err := strconv.Atoi(envuid) + assert.Nil(t, err) + uid = &_uid + fmt.Println("USERID -> ", *uid) + } + socket, err := so.GetContainerRuntimeSocket(uid) + assert.Nil(t, err) + assert.NotNil(t, socket) + assert.Nil(t, err) + fmt.Println("CONTAINER SOCKET -> " + *socket) + ctx, err := so.Connect(*socket) + assert.Nil(t, err) + assert.NotNil(t, ctx) +} + +func CommonTestScenarioOrchestratorRunGraph(t *testing.T, so scenario_orchestrator.ScenarioOrchestrator, config krknctlconfig.Config) { + data := ` +{ + "root":{ + "image":"quay.io/krkn-chaos/krknctl-test:dummy-scenario", + "name":"dummy-scenario", + "env":{ + "END":"2" + }, + "volumes":{} + }, + "first-row-1":{ + "depends_on":"root", + "image":"quay.io/krkn-chaos/krknctl-test:dummy-scenario", + "name":"dummy-scenario", + "env":{ + "END":"2" + }, + "volumes":{} + }, + "first-row-2":{ + "depends_on":"root", + "image":"quay.io/krkn-chaos/krknctl-test:dummy-scenario", + "name":"dummy-scenario", + "env":{ + "END":"2" + }, + "volumes":{} + }, + "second-row":{ + "depends_on":"first-row-1", + "image":"quay.io/krkn-chaos/krknctl-test:dummy-scenario", + "name":"dummy-scenario", + "env":{ + "END":"2" + }, + "volumes":{} + }, + "third-row-1":{ + "depends_on":"second-row", + "image":"quay.io/krkn-chaos/krknctl-test:dummy-scenario", + "name":"dummy-scenario", + "env":{ + "END":"2" + }, + "volumes":{} + }, + "third-row-2":{ + "depends_on":"second-row", + "image":"quay.io/krkn-chaos/krknctl-test:dummy-scenario", + "name":"dummy-scenario", + "env":{ + "END":"2" + }, + "volumes":{} + } +} +` + + currentUser, err := user.Current() + fmt.Println("Current user: " + (*currentUser).Name) + fmt.Println("current user id" + (*currentUser).Uid) + quayProvider := quay.ScenarioProvider{Config: &config} + repositoryApi, err := config.GetQuayRepositoryApiUri() + assert.Nil(t, err) + scenario, err := quayProvider.GetScenarioDetail("dummy-scenario", repositoryApi) + assert.Nil(t, err) + assert.NotNil(t, scenario) + kubeconfig, err := utils.PrepareKubeconfig(nil, config) + assert.Nil(t, err) + assert.NotNil(t, kubeconfig) + fmt.Println("KUBECONFIG PARSED -> " + *kubeconfig) + + envuid := os.Getenv("USERID") + var uid *int = nil + if envuid != "" { + _uid, err := strconv.Atoi(envuid) + assert.Nil(t, err) + uid = &_uid + fmt.Println("USERID -> ", *uid) + } + socket, err := so.GetContainerRuntimeSocket(uid) + assert.Nil(t, err) + assert.NotNil(t, socket) + ctx, err := so.Connect(*socket) + assert.Nil(t, err) + assert.NotNil(t, ctx) + + fmt.Println("CONTAINER SOCKET -> " + *socket) + + nodes := make(map[string]models.ScenarioNode) + err = json.Unmarshal([]byte(data), &nodes) + assert.Nil(t, err) + + convertedNodes := make(map[string]dependencygraph.ParentProvider, len(nodes)) + + // Populate the new map + for key, node := range nodes { + // Since ScenarioNode implements ParentProvider, this is valid + convertedNodes[key] = node + } + + graph, err := dependencygraph.NewGraphFromNodes(convertedNodes) + + assert.Nil(t, err) + assert.NotNil(t, graph) + executionPlan := graph.TopoSortedLayers() + assert.NotNil(t, executionPlan) + + commChannel := make(chan *models.GraphCommChannel) + go func() { + so.RunGraph(nodes, executionPlan, map[string]string{}, map[string]string{}, false, commChannel, ctx) + }() + + for { + c := <-commChannel + if c == nil { + break + } else { + assert.Nil(t, (*c).Err) + fmt.Printf("Running step %d scenario: %s\n", *c.Layer, *c.ScenarioId) + } + + } + +} + +func CommonTestScenarioOrchestratorListRunningContainers(t *testing.T, so scenario_orchestrator.ScenarioOrchestrator, config krknctlconfig.Config) { + kubeconfig, err := utils.PrepareKubeconfig(nil, config) + assert.Nil(t, err) + assert.NotNil(t, kubeconfig) + fmt.Println("KUBECONFIG PARSED -> " + *kubeconfig) + + envuid := os.Getenv("USERID") + var uid *int = nil + if envuid != "" { + _uid, err := strconv.Atoi(envuid) + assert.Nil(t, err) + uid = &_uid + fmt.Println("USERID -> ", *uid) + } + socket, err := so.GetContainerRuntimeSocket(uid) + assert.Nil(t, err) + assert.NotNil(t, socket) + ctx, err := so.Connect(*socket) + assert.Nil(t, err) + assert.NotNil(t, ctx) + + containers, err := so.ListRunningContainers(ctx) + assert.Nil(t, err) + assert.NotNil(t, containers) +} + +func CommonScenarioDetail(t *testing.T, so scenario_orchestrator.ScenarioOrchestrator) { + envuid := os.Getenv("USERID") + var uid *int = nil + if envuid != "" { + _uid, err := strconv.Atoi(envuid) + assert.Nil(t, err) + uid = &_uid + fmt.Println("USERID -> ", *uid) + } + + socket, err := so.GetContainerRuntimeSocket(uid) + assert.Nil(t, err) + assert.NotNil(t, socket) + ctx, err := so.Connect(*socket) + assert.Nil(t, err) + assert.NotNil(t, ctx) + scenarios, err := so.ListRunningContainers(ctx) + assert.Nil(t, err) + assert.NotNil(t, scenarios) + for _, v := range *scenarios { + containerMap, err := so.InspectScenario(v, ctx) + assert.Nil(t, err) + assert.NotNil(t, containerMap) + } +} + +func CommonAttachWait(t *testing.T, so scenario_orchestrator.ScenarioOrchestrator, conf krknctlconfig.Config) string { + envuid := os.Getenv("USERID") + var uid *int = nil + if envuid != "" { + _uid, err := strconv.Atoi(envuid) + assert.Nil(t, err) + uid = &_uid + fmt.Println("USERID -> ", *uid) + } + socket, err := so.GetContainerRuntimeSocket(uid) + assert.Nil(t, err) + assert.NotNil(t, socket) + ctx, err := so.Connect(*socket) + assert.Nil(t, err) + assert.NotNil(t, ctx) + testFilename := fmt.Sprintf("krknctl-attachwait-%s-%d", core.RandomString(5), time.Now().Unix()) + fmt.Println("FILE_NAME -> ", testFilename) + file, err := os.OpenFile(testFilename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0755) + assert.Nil(t, err) + containerId := CommonTestScenarioOrchestratorRunAttached(t, so, conf, 5) + so.AttachWait(&containerId, file, file, ctx) + err = file.Close() + assert.Nil(t, err) + filecontent, err := os.ReadFile(testFilename) + assert.Nil(t, err) + return string(filecontent) +} + +func CommonTestScenarioOrchestratorResolveContainerName(t *testing.T, so scenario_orchestrator.ScenarioOrchestrator, conf krknctlconfig.Config, duration int) { + env := map[string]string{ + "END": fmt.Sprintf("%d", duration), + "EXIT_STATUS": "0", + } + + currentUser, err := user.Current() + fmt.Println("Current user: " + (*currentUser).Name) + fmt.Println("current user id" + (*currentUser).Uid) + quayProvider := quay.ScenarioProvider{Config: &conf} + registryUri, err := conf.GetQuayImageUri() + assert.Nil(t, err) + apiUri, err := conf.GetQuayRepositoryApiUri() + assert.Nil(t, err) + scenario, err := quayProvider.GetScenarioDetail("failing-scenario", apiUri) + assert.Nil(t, err) + assert.NotNil(t, scenario) + kubeconfig, err := utils.PrepareKubeconfig(nil, conf) + assert.Nil(t, err) + assert.NotNil(t, kubeconfig) + fmt.Println("KUBECONFIG PARSED -> " + *kubeconfig) + + envuid := os.Getenv("USERID") + var uid *int = nil + if envuid != "" { + _uid, err := strconv.Atoi(envuid) + assert.Nil(t, err) + uid = &_uid + fmt.Println("USERID -> ", *uid) + } + socket, err := so.GetContainerRuntimeSocket(uid) + assert.Nil(t, err) + assert.NotNil(t, socket) + ctx, err := so.Connect(*socket) + assert.Nil(t, err) + assert.NotNil(t, ctx) + + fmt.Println("CONTAINER SOCKET -> " + *socket) + containerName := utils.GenerateContainerName(conf, scenario.Name, nil) + containerId, err := so.RunAttached(registryUri+":"+scenario.Name, containerName, env, false, map[string]string{}, os.Stdout, os.Stderr, nil, ctx) + assert.Nil(t, err) + assert.NotNil(t, containerId) + + resolvedContainerId, err := so.ResolveContainerName(containerName, ctx) + assert.Nil(t, err) + assert.Equal(t, *containerId, *resolvedContainerId) + + resolvedContainerId, err = so.ResolveContainerName("not_found", ctx) + assert.Nil(t, resolvedContainerId) + assert.Nil(t, err) + +} diff --git a/tests/containerfiles/dummyscenario/build.sh b/tests/containerfiles/dummyscenario/build.sh index 39628f6..51ad7e6 100755 --- a/tests/containerfiles/dummyscenario/build.sh +++ b/tests/containerfiles/dummyscenario/build.sh @@ -1,6 +1,6 @@ export KRKNCTL_INPUT=$(cat krknctl-input.json|tr -d "\n") envsubst < Containerfile.template > Containerfile -podman build . -t quay.io/krkn-chaos/krknctl-test:dummy-scenario +podman build --platform linux/amd64 . -t quay.io/krkn-chaos/krknctl-test:dummy-scenario podman tag quay.io/krkn-chaos/krknctl-test:dummy-scenario quay.io/krkn-chaos/krkn-hub:dummy-scenario podman push quay.io/krkn-chaos/krknctl-test:dummy-scenario podman push quay.io/krkn-chaos/krkn-hub:dummy-scenario \ No newline at end of file diff --git a/tests/containerfiles/failingscenario/Containerfile b/tests/containerfiles/failingscenario/Containerfile new file mode 100644 index 0000000..988a49c --- /dev/null +++ b/tests/containerfiles/failingscenario/Containerfile @@ -0,0 +1,10 @@ +FROM registry.access.redhat.com/ubi9-minimal:9.4-1227.1726694542 +RUN groupadd -g 1001 krkn && useradd -m -u 1001 -g krkn krkn +COPY run.sh /home/krkn + +LABEL krknctl.kubeconfig_path="/home/krkn/.kube/config" +LABEL krknctl.title="Dummy Scenario" +LABEL krknctl.description="The dummy scenario simply waits for a specified amount of time without introducing any chaos. It serves as a root node in a flat graph structure to run multiple scenarios in parallel or can be used for testing purposes." +LABEL krknctl.input_fields='[ { "name":"duration", "short_description":"Duration", "description":"Sets the duration of the dummy scenario", "variable":"END", "type":"number", "default":"10" }, { "name":"exit", "short_description":"Exit Status", "description":"Sets the exit status of the container", "variable":"EXIT_STATUS", "type":"number", "default":"0" }]' +USER krkn +ENTRYPOINT ["bash", "/home/krkn/run.sh"] \ No newline at end of file diff --git a/tests/containerfiles/failingscenario/Containerfile.template b/tests/containerfiles/failingscenario/Containerfile.template new file mode 100644 index 0000000..54f50dc --- /dev/null +++ b/tests/containerfiles/failingscenario/Containerfile.template @@ -0,0 +1,10 @@ +FROM registry.access.redhat.com/ubi9-minimal:9.4-1227.1726694542 +RUN groupadd -g 1001 krkn && useradd -m -u 1001 -g krkn krkn +COPY run.sh /home/krkn + +LABEL krknctl.kubeconfig_path="/home/krkn/.kube/config" +LABEL krknctl.title="Dummy Scenario" +LABEL krknctl.description="The dummy scenario simply waits for a specified amount of time without introducing any chaos. It serves as a root node in a flat graph structure to run multiple scenarios in parallel or can be used for testing purposes." +LABEL krknctl.input_fields='$KRKNCTL_INPUT' +USER krkn +ENTRYPOINT ["bash", "/home/krkn/run.sh"] \ No newline at end of file diff --git a/tests/containerfiles/failingscenario/build.sh b/tests/containerfiles/failingscenario/build.sh new file mode 100755 index 0000000..33feedf --- /dev/null +++ b/tests/containerfiles/failingscenario/build.sh @@ -0,0 +1,5 @@ +export KRKNCTL_INPUT=$(cat krknctl-input.json|tr -d "\n") +envsubst < Containerfile.template > Containerfile +podman build --platform linux/amd64 . -t quay.io/krkn-chaos/krknctl-test:failing-scenario +podman tag quay.io/krkn-chaos/krknctl-test:failing-scenario +podman push quay.io/krkn-chaos/krknctl-test:failing-scenario diff --git a/tests/containerfiles/failingscenario/run.sh b/tests/containerfiles/failingscenario/run.sh new file mode 100755 index 0000000..90ffd3d --- /dev/null +++ b/tests/containerfiles/failingscenario/run.sh @@ -0,0 +1,8 @@ +[ -z $END ] && echo '$END variable not exported' && exit 1 +[ -z $EXIT_STATUS ] && echo '$EXIT_STATUS variable not exported' && exit 1 +for i in $(seq 0 $END); do + echo "Release the krkn $i" + sleep 1 +done +echo "EXITING $EXIT_STATUS" +exit $EXIT_STATUS \ No newline at end of file