diff --git a/cli/azd/.vscode/cspell.yaml b/cli/azd/.vscode/cspell.yaml index 2c97ad56cf3..cc23a3fb0de 100644 --- a/cli/azd/.vscode/cspell.yaml +++ b/cli/azd/.vscode/cspell.yaml @@ -35,6 +35,13 @@ overrides: - cloudapp - mediaservices - msecnd + - filename: internal/tracing/fields/fields.go + words: + - azuredeps + - filename: internal/appdetect/java.go + words: + - springframework + - eventhubs - filename: docs/docgen.go words: - alexwolf diff --git a/cli/azd/internal/appdetect/appdetect.go b/cli/azd/internal/appdetect/appdetect.go index e6103d3cbd7..77a362f140b 100644 --- a/cli/azd/internal/appdetect/appdetect.go +++ b/cli/azd/internal/appdetect/appdetect.go @@ -110,6 +110,7 @@ const ( DbPostgres DatabaseDep = "postgres" DbMongo DatabaseDep = "mongo" DbMySql DatabaseDep = "mysql" + DbCosmos DatabaseDep = "cosmos" DbSqlServer DatabaseDep = "sqlserver" DbRedis DatabaseDep = "redis" ) @@ -122,6 +123,8 @@ func (db DatabaseDep) Display() string { return "MongoDB" case DbMySql: return "MySQL" + case DbCosmos: + return "Cosmos DB" case DbSqlServer: return "SQL Server" case DbRedis: @@ -131,6 +134,48 @@ func (db DatabaseDep) Display() string { return "" } +//type AzureDep string + +type AzureDep interface { + ResourceDisplay() string +} + +type AzureDepServiceBus struct { + Queues []string + IsJms bool +} + +func (a AzureDepServiceBus) ResourceDisplay() string { + return "Azure Service Bus" +} + +type AzureDepEventHubs struct { + Names []string + UseKafka bool + SpringBootVersion string +} + +func (a AzureDepEventHubs) ResourceDisplay() string { + return "Azure Event Hubs" +} + +type AzureDepStorageAccount struct { + ContainerNames []string +} + +func (a AzureDepStorageAccount) ResourceDisplay() string { + return "Azure Storage Account" +} + +type SpringCloudAzureDep struct { +} + +func (a SpringCloudAzureDep) ResourceDisplay() string { + return "Spring Cloud Azure Starter" +} + +const UnknownSpringBootVersion string = "unknownSpringBootVersion" + type Project struct { // The language associated with the project. Language Language @@ -141,6 +186,9 @@ type Project struct { // Experimental: Database dependencies inferred through heuristics while scanning dependencies in the project. DatabaseDeps []DatabaseDep + // Experimental: Azure dependencies inferred through heuristics while scanning dependencies in the project. + AzureDeps []AzureDep + // The path to the project directory. Path string diff --git a/cli/azd/internal/appdetect/appdetect_test.go b/cli/azd/internal/appdetect/appdetect_test.go index b356a151ace..cba1dc94866 100644 --- a/cli/azd/internal/appdetect/appdetect_test.go +++ b/cli/azd/internal/appdetect/appdetect_test.go @@ -46,8 +46,10 @@ func TestDetect(t *testing.T) { Path: "java-multimodules/application", DetectionRule: "Inferred by presence of: pom.xml", DatabaseDeps: []DatabaseDep{ + DbMongo, DbMySql, DbPostgres, + DbRedis, }, }, { @@ -130,8 +132,10 @@ func TestDetect(t *testing.T) { Path: "java-multimodules/application", DetectionRule: "Inferred by presence of: pom.xml", DatabaseDeps: []DatabaseDep{ + DbMongo, DbMySql, DbPostgres, + DbRedis, }, }, { @@ -163,8 +167,10 @@ func TestDetect(t *testing.T) { Path: "java-multimodules/application", DetectionRule: "Inferred by presence of: pom.xml", DatabaseDeps: []DatabaseDep{ + DbMongo, DbMySql, DbPostgres, + DbRedis, }, }, { @@ -199,8 +205,10 @@ func TestDetect(t *testing.T) { Path: "java-multimodules/application", DetectionRule: "Inferred by presence of: pom.xml", DatabaseDeps: []DatabaseDep{ + DbMongo, DbMySql, DbPostgres, + DbRedis, }, }, { diff --git a/cli/azd/internal/appdetect/java.go b/cli/azd/internal/appdetect/java.go index fe6fec3ea65..1b2b66f624a 100644 --- a/cli/azd/internal/appdetect/java.go +++ b/cli/azd/internal/appdetect/java.go @@ -4,11 +4,13 @@ import ( "context" "encoding/xml" "fmt" + "github.com/azure/azure-dev/cli/azd/internal/tracing" + "github.com/azure/azure-dev/cli/azd/internal/tracing/fields" "io/fs" - "maps" + "log" "os" "path/filepath" - "slices" + "regexp" "strings" ) @@ -23,10 +25,13 @@ func (jd *javaDetector) Language() Language { func (jd *javaDetector) DetectProject(ctx context.Context, path string, entries []fs.DirEntry) (*Project, error) { for _, entry := range entries { if strings.ToLower(entry.Name()) == "pom.xml" { + tracing.SetUsageAttributes(fields.AppInitJavaDetect.String("start")) pomFile := filepath.Join(path, entry.Name()) project, err := readMavenProject(pomFile) if err != nil { - return nil, fmt.Errorf("error reading pom.xml: %w", err) + log.Printf("Please edit azure.yaml manually to satisfy your requirement. azd can not help you "+ + "to that by detect your java project because error happened when reading pom.xml: %s. ", err) + return nil, nil } if len(project.Modules) > 0 { @@ -45,15 +50,18 @@ func (jd *javaDetector) DetectProject(ctx context.Context, path string, entries } _ = currentRoot // use currentRoot here in the analysis - result, err := detectDependencies(project, &Project{ + result, err := detectDependencies(currentRoot, project, &Project{ Language: Java, Path: path, DetectionRule: "Inferred by presence of: pom.xml", }) if err != nil { - return nil, fmt.Errorf("detecting dependencies: %w", err) + log.Printf("Please edit azure.yaml manually to satisfy your requirement. azd can not help you "+ + "to that by detect your java project because error happened when detecting dependencies: %s", err) + return nil, nil } + tracing.SetUsageAttributes(fields.AppInitJavaDetect.String("finish")) return result, nil } } @@ -66,6 +74,7 @@ type mavenProject struct { XmlName xml.Name `xml:"project"` Parent parent `xml:"parent"` Modules []string `xml:"modules>module"` // Capture the modules + Properties Properties `xml:"properties"` Dependencies []dependency `xml:"dependencies>dependency"` DependencyManagement dependencyManagement `xml:"dependencyManagement"` Build build `xml:"build"` @@ -79,6 +88,15 @@ type parent struct { Version string `xml:"version"` } +type Properties struct { + Entries []Property `xml:",any"` // Capture all elements inside +} + +type Property struct { + XMLName xml.Name + Value string `xml:",chardata"` +} + // Dependency represents a single Maven dependency. type dependency struct { GroupId string `xml:"groupId"` @@ -110,8 +128,16 @@ func readMavenProject(filePath string) (*mavenProject, error) { return nil, err } + var initialProject mavenProject + if err := xml.Unmarshal(bytes, &initialProject); err != nil { + return nil, fmt.Errorf("parsing xml: %w", err) + } + + // replace all placeholders with properties + str := replaceAllPlaceholders(initialProject, string(bytes)) + var project mavenProject - if err := xml.Unmarshal(bytes, &project); err != nil { + if err := xml.Unmarshal([]byte(str), &project); err != nil { return nil, fmt.Errorf("parsing xml: %w", err) } @@ -120,24 +146,29 @@ func readMavenProject(filePath string) (*mavenProject, error) { return &project, nil } -func detectDependencies(mavenProject *mavenProject, project *Project) (*Project, error) { - databaseDepMap := map[DatabaseDep]struct{}{} - for _, dep := range mavenProject.Dependencies { - if dep.GroupId == "com.mysql" && dep.ArtifactId == "mysql-connector-j" { - databaseDepMap[DbMySql] = struct{}{} - } +func replaceAllPlaceholders(project mavenProject, input string) string { + propsMap := parseProperties(project.Properties) - if dep.GroupId == "org.postgresql" && dep.ArtifactId == "postgresql" { - databaseDepMap[DbPostgres] = struct{}{} + re := regexp.MustCompile(`\$\{([A-Za-z0-9-_.]+)}`) + return re.ReplaceAllStringFunc(input, func(match string) string { + // Extract the key inside ${} + key := re.FindStringSubmatch(match)[1] + if value, exists := propsMap[key]; exists { + return value } - } + return match + }) +} - if len(databaseDepMap) > 0 { - project.DatabaseDeps = slices.SortedFunc(maps.Keys(databaseDepMap), - func(a, b DatabaseDep) int { - return strings.Compare(string(a), string(b)) - }) +func parseProperties(properties Properties) map[string]string { + result := make(map[string]string) + for _, entry := range properties.Entries { + result[entry.XMLName.Local] = entry.Value } + return result +} +func detectDependencies(currentRoot *mavenProject, mavenProject *mavenProject, project *Project) (*Project, error) { + detectAzureDependenciesByAnalyzingSpringBootProject(currentRoot, mavenProject, project) return project, nil } diff --git a/cli/azd/internal/appdetect/spring_boot.go b/cli/azd/internal/appdetect/spring_boot.go new file mode 100644 index 00000000000..4f97c1ff23b --- /dev/null +++ b/cli/azd/internal/appdetect/spring_boot.go @@ -0,0 +1,347 @@ +package appdetect + +import ( + "fmt" + "log" + "maps" + "slices" + "strings" +) + +type SpringBootProject struct { + springBootVersion string + applicationProperties map[string]string + parentProject *mavenProject + mavenProject *mavenProject +} + +type DatabaseDependencyRule struct { + databaseDep DatabaseDep + mavenDependencies []MavenDependency +} + +type MavenDependency struct { + groupId string + artifactId string +} + +var databaseDependencyRules = []DatabaseDependencyRule{ + { + databaseDep: DbPostgres, + mavenDependencies: []MavenDependency{ + { + groupId: "org.postgresql", + artifactId: "postgresql", + }, + }, + }, + { + databaseDep: DbMySql, + mavenDependencies: []MavenDependency{ + { + groupId: "com.mysql", + artifactId: "mysql-connector-j", + }, + }, + }, + { + databaseDep: DbRedis, + mavenDependencies: []MavenDependency{ + { + groupId: "org.springframework.boot", + artifactId: "spring-boot-starter-data-redis", + }, + { + groupId: "org.springframework.boot", + artifactId: "spring-boot-starter-data-redis-reactive", + }, + }, + }, + { + databaseDep: DbMongo, + mavenDependencies: []MavenDependency{ + { + groupId: "org.springframework.boot", + artifactId: "spring-boot-starter-data-mongodb", + }, + { + groupId: "org.springframework.boot", + artifactId: "spring-boot-starter-data-mongodb-reactive", + }, + }, + }, + { + databaseDep: DbCosmos, + mavenDependencies: []MavenDependency{ + { + groupId: "com.azure.spring", + artifactId: "spring-cloud-azure-starter-data-cosmos", + }, + }, + }, +} + +func detectAzureDependenciesByAnalyzingSpringBootProject( + parentProject *mavenProject, mavenProject *mavenProject, azdProject *Project) { + if !isSpringBootApplication(mavenProject) { + log.Printf("Skip analyzing spring boot project. path = %s.", mavenProject.path) + return + } + var springBootProject = SpringBootProject{ + springBootVersion: detectSpringBootVersion(parentProject, mavenProject), + applicationProperties: readProperties(azdProject.Path), + parentProject: parentProject, + mavenProject: mavenProject, + } + detectDatabases(azdProject, &springBootProject) + detectServiceBus(azdProject, &springBootProject) + detectEventHubs(azdProject, &springBootProject) + detectStorageAccount(azdProject, &springBootProject) + detectSpringCloudAzure(azdProject, &springBootProject) +} + +func detectDatabases(azdProject *Project, springBootProject *SpringBootProject) { + databaseDepMap := map[DatabaseDep]struct{}{} + for _, rule := range databaseDependencyRules { + for _, targetDependency := range rule.mavenDependencies { + var targetGroupId = targetDependency.groupId + var targetArtifactId = targetDependency.artifactId + if hasDependency(springBootProject, targetGroupId, targetArtifactId) { + databaseDepMap[rule.databaseDep] = struct{}{} + logServiceAddedAccordingToMavenDependency(rule.databaseDep.Display(), + targetGroupId, targetArtifactId) + break + } + } + } + if len(databaseDepMap) > 0 { + azdProject.DatabaseDeps = slices.SortedFunc(maps.Keys(databaseDepMap), + func(a, b DatabaseDep) int { + return strings.Compare(string(a), string(b)) + }) + } +} + +func detectServiceBus(azdProject *Project, springBootProject *SpringBootProject) { + // we need to figure out multiple projects are using the same service bus + detectServiceBusAccordingToJMSMavenDependency(azdProject, springBootProject) + detectServiceBusAccordingToSpringCloudStreamBinderMavenDependency(azdProject, springBootProject) +} + +func detectServiceBusAccordingToJMSMavenDependency(azdProject *Project, springBootProject *SpringBootProject) { + var targetGroupId = "com.azure.spring" + var targetArtifactId = "spring-cloud-azure-starter-servicebus-jms" + if hasDependency(springBootProject, targetGroupId, targetArtifactId) { + newDependency := AzureDepServiceBus{ + IsJms: true, + } + azdProject.AzureDeps = append(azdProject.AzureDeps, newDependency) + logServiceAddedAccordingToMavenDependency(newDependency.ResourceDisplay(), targetGroupId, targetArtifactId) + } +} + +func detectServiceBusAccordingToSpringCloudStreamBinderMavenDependency( + azdProject *Project, springBootProject *SpringBootProject) { + var targetGroupId = "com.azure.spring" + var targetArtifactId = "spring-cloud-azure-stream-binder-servicebus" + if hasDependency(springBootProject, targetGroupId, targetArtifactId) { + bindingDestinations := getBindingDestinationMap(springBootProject.applicationProperties) + var destinations = distinctValues(bindingDestinations) + newDep := AzureDepServiceBus{ + Queues: destinations, + IsJms: false, + } + azdProject.AzureDeps = append(azdProject.AzureDeps, newDep) + logServiceAddedAccordingToMavenDependency(newDep.ResourceDisplay(), targetGroupId, targetArtifactId) + for bindingName, destination := range bindingDestinations { + log.Printf(" Detected Service Bus queue [%s] for binding [%s] by analyzing property file.", + destination, bindingName) + } + } +} + +func detectEventHubs(azdProject *Project, springBootProject *SpringBootProject) { + // we need to figure out multiple projects are using the same event hub + detectEventHubsAccordingToSpringCloudStreamBinderMavenDependency(azdProject, springBootProject) + detectEventHubsAccordingToSpringCloudStreamKafkaMavenDependency(azdProject, springBootProject) +} + +func detectEventHubsAccordingToSpringCloudStreamBinderMavenDependency( + azdProject *Project, springBootProject *SpringBootProject) { + var targetGroupId = "com.azure.spring" + var targetArtifactId = "spring-cloud-azure-stream-binder-eventhubs" + if hasDependency(springBootProject, targetGroupId, targetArtifactId) { + bindingDestinations := getBindingDestinationMap(springBootProject.applicationProperties) + var destinations = distinctValues(bindingDestinations) + newDep := AzureDepEventHubs{ + Names: destinations, + UseKafka: false, + } + azdProject.AzureDeps = append(azdProject.AzureDeps, newDep) + logServiceAddedAccordingToMavenDependency(newDep.ResourceDisplay(), targetGroupId, targetArtifactId) + for bindingName, destination := range bindingDestinations { + log.Printf(" Detected Event Hub [%s] for binding [%s] by analyzing property file.", + destination, bindingName) + } + } +} + +func detectEventHubsAccordingToSpringCloudStreamKafkaMavenDependency( + azdProject *Project, springBootProject *SpringBootProject) { + var targetGroupId = "org.springframework.cloud" + var targetArtifactId = "spring-cloud-starter-stream-kafka" + if hasDependency(springBootProject, targetGroupId, targetArtifactId) { + bindingDestinations := getBindingDestinationMap(springBootProject.applicationProperties) + var destinations = distinctValues(bindingDestinations) + newDep := AzureDepEventHubs{ + Names: destinations, + UseKafka: true, + SpringBootVersion: springBootProject.springBootVersion, + } + azdProject.AzureDeps = append(azdProject.AzureDeps, newDep) + logServiceAddedAccordingToMavenDependency(newDep.ResourceDisplay(), targetGroupId, targetArtifactId) + for bindingName, destination := range bindingDestinations { + log.Printf(" Detected Kafka Topic [%s] for binding [%s] by analyzing property file.", + destination, bindingName) + } + } +} + +func detectStorageAccount(azdProject *Project, springBootProject *SpringBootProject) { + detectStorageAccountAccordingToSpringCloudStreamBinderMavenDependencyAndProperty(azdProject, springBootProject) +} + +func detectStorageAccountAccordingToSpringCloudStreamBinderMavenDependencyAndProperty( + azdProject *Project, springBootProject *SpringBootProject) { + var targetGroupId = "com.azure.spring" + var targetArtifactId = "spring-cloud-azure-stream-binder-eventhubs" + var targetPropertyName = "spring.cloud.azure.eventhubs.processor.checkpoint-store.container-name" + if hasDependency(springBootProject, targetGroupId, targetArtifactId) { + bindingDestinations := getBindingDestinationMap(springBootProject.applicationProperties) + containsInBindingName := "" + for bindingName := range bindingDestinations { + if strings.Contains(bindingName, "-in-") { // Example: consume-in-0 + containsInBindingName = bindingName + break + } + } + if containsInBindingName != "" { + targetPropertyValue := springBootProject.applicationProperties[targetPropertyName] + newDep := AzureDepStorageAccount{ + ContainerNames: []string{targetPropertyValue}, + } + azdProject.AzureDeps = append(azdProject.AzureDeps, newDep) + logServiceAddedAccordingToMavenDependencyAndExtraCondition(newDep.ResourceDisplay(), targetGroupId, + targetArtifactId, "binding name ["+containsInBindingName+"] contains '-in-'") + log.Printf(" Detected Storage Account container name: [%s] by analyzing property file.", + targetPropertyValue) + } + } +} + +func detectSpringCloudAzure(azdProject *Project, springBootProject *SpringBootProject) { + var targetGroupId = "com.azure.spring" + var targetArtifactId = "spring-cloud-azure-starter" + if hasDependency(springBootProject, targetGroupId, targetArtifactId) { + newDep := SpringCloudAzureDep{} + azdProject.AzureDeps = append(azdProject.AzureDeps, newDep) + logServiceAddedAccordingToMavenDependency(newDep.ResourceDisplay(), targetGroupId, targetArtifactId) + } +} + +func logServiceAddedAccordingToMavenDependency(resourceName, groupId string, artifactId string) { + logServiceAddedAccordingToMavenDependencyAndExtraCondition(resourceName, groupId, artifactId, "") +} + +func logServiceAddedAccordingToMavenDependencyAndExtraCondition( + resourceName, groupId string, artifactId string, extraCondition string) { + insertedString := "" + extraCondition = strings.TrimSpace(extraCondition) + if extraCondition != "" { + insertedString = " and " + extraCondition + } + log.Printf("Detected '%s' because found dependency '%s:%s' in pom.xml file%s.", + resourceName, groupId, artifactId, insertedString) +} + +func detectSpringBootVersion(currentRoot *mavenProject, mavenProject *mavenProject) string { + // mavenProject prioritize than rootProject + if mavenProject != nil { + return detectSpringBootVersionFromProject(mavenProject) + } else if currentRoot != nil { + return detectSpringBootVersionFromProject(currentRoot) + } + return UnknownSpringBootVersion +} + +func detectSpringBootVersionFromProject(project *mavenProject) string { + if project.Parent.ArtifactId == "spring-boot-starter-parent" { + return project.Parent.Version + } else { + for _, dep := range project.DependencyManagement.Dependencies { + if dep.ArtifactId == "spring-boot-dependencies" { + return dep.Version + } + } + } + return UnknownSpringBootVersion +} + +func isSpringBootApplication(mavenProject *mavenProject) bool { + // how can we tell it's a Spring Boot project? + // 1. It has a parent with a groupId of org.springframework.boot and an artifactId of spring-boot-starter-parent + // 2. It has a dependency with a groupId of org.springframework.boot and an artifactId that starts with + // spring-boot-starter + if mavenProject.Parent.GroupId == "org.springframework.boot" && + mavenProject.Parent.ArtifactId == "spring-boot-starter-parent" { + return true + } + for _, dep := range mavenProject.Dependencies { + if dep.GroupId == "org.springframework.boot" && + strings.HasPrefix(dep.ArtifactId, "spring-boot-starter") { + return true + } + } + return false +} + +func distinctValues(input map[string]string) []string { + valueSet := make(map[string]struct{}) + for _, value := range input { + valueSet[value] = struct{}{} + } + + var result []string + for value := range valueSet { + result = append(result, value) + } + + return result +} + +// Function to find all properties that match the pattern `spring.cloud.stream.bindings..destination` +func getBindingDestinationMap(properties map[string]string) map[string]string { + result := make(map[string]string) + + // Iterate through the properties map and look for matching keys + for key, value := range properties { + // Check if the key matches the pattern `spring.cloud.stream.bindings..destination` + if strings.HasPrefix(key, "spring.cloud.stream.bindings.") && strings.HasSuffix(key, ".destination") { + // Extract the binding name + bindingName := key[len("spring.cloud.stream.bindings.") : len(key)-len(".destination")] + // Store the binding name and destination value + result[bindingName] = fmt.Sprintf("%v", value) + } + } + + return result +} + +func hasDependency(project *SpringBootProject, groupId string, artifactId string) bool { + for _, projectDependency := range project.mavenProject.Dependencies { + if projectDependency.GroupId == groupId && projectDependency.ArtifactId == artifactId { + return true + } + } + return false +} diff --git a/cli/azd/internal/appdetect/spring_boot_property.go b/cli/azd/internal/appdetect/spring_boot_property.go new file mode 100644 index 00000000000..95cbcde6248 --- /dev/null +++ b/cli/azd/internal/appdetect/spring_boot_property.go @@ -0,0 +1,124 @@ +package appdetect + +import ( + "bufio" + "fmt" + "github.com/azure/azure-dev/cli/azd/pkg/osutil" + "github.com/braydonk/yaml" + "log" + "os" + "path/filepath" + "strings" +) + +func readProperties(projectPath string) map[string]string { + // todo: do we need to consider the bootstrap.properties + result := make(map[string]string) + readPropertiesInPropertiesFile(filepath.Join(projectPath, "/src/main/resources/application.properties"), result) + readPropertiesInYamlFile(filepath.Join(projectPath, "/src/main/resources/application.yml"), result) + readPropertiesInYamlFile(filepath.Join(projectPath, "/src/main/resources/application.yaml"), result) + profile, profileSet := result["spring.profiles.active"] + if profileSet { + readPropertiesInPropertiesFile( + filepath.Join(projectPath, "/src/main/resources/application-"+profile+".properties"), result) + readPropertiesInYamlFile(filepath.Join(projectPath, "/src/main/resources/application-"+profile+".yml"), result) + readPropertiesInYamlFile(filepath.Join(projectPath, "/src/main/resources/application-"+profile+".yaml"), result) + } + return result +} + +func readPropertiesInYamlFile(yamlFilePath string, result map[string]string) { + if !osutil.FileExists(yamlFilePath) { + return + } + data, err := os.ReadFile(yamlFilePath) + if err != nil { + log.Fatalf("error reading YAML file: %v", err) + return + } + + // Parse the YAML into a yaml.Node + var root yaml.Node + err = yaml.Unmarshal(data, &root) + if err != nil { + log.Fatalf("error unmarshalling YAML: %v", err) + return + } + + parseYAML("", &root, result) +} + +// Recursively parse the YAML and build dot-separated keys into a map +func parseYAML(prefix string, node *yaml.Node, result map[string]string) { + switch node.Kind { + case yaml.DocumentNode: + // Process each document's content + for _, contentNode := range node.Content { + parseYAML(prefix, contentNode, result) + } + case yaml.MappingNode: + // Process key-value pairs in a map + for i := 0; i < len(node.Content); i += 2 { + keyNode := node.Content[i] + valueNode := node.Content[i+1] + + // Ensure the key is a scalar + if keyNode.Kind != yaml.ScalarNode { + continue + } + + keyStr := keyNode.Value + newPrefix := keyStr + if prefix != "" { + newPrefix = prefix + "." + keyStr + } + parseYAML(newPrefix, valueNode, result) + } + case yaml.SequenceNode: + // Process items in a sequence (list) + for i, item := range node.Content { + newPrefix := fmt.Sprintf("%s[%d]", prefix, i) + parseYAML(newPrefix, item, result) + } + case yaml.ScalarNode: + // If it's a scalar value, add it to the result map + result[prefix] = getEnvironmentVariablePlaceholderHandledValue(node.Value) + default: + // Handle other node types if necessary + } +} + +func readPropertiesInPropertiesFile(propertiesFilePath string, result map[string]string) { + if !osutil.FileExists(propertiesFilePath) { + return + } + file, err := os.Open(propertiesFilePath) + if err != nil { + log.Fatalf("error opening properties file: %v", err) + return + } + defer file.Close() + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + line := scanner.Text() + if strings.TrimSpace(line) == "" || strings.HasPrefix(line, "#") { + continue + } + parts := strings.SplitN(line, "=", 2) + if len(parts) == 2 { + key := strings.TrimSpace(parts[0]) + value := getEnvironmentVariablePlaceholderHandledValue(parts[1]) + result[key] = value + } + } +} + +func getEnvironmentVariablePlaceholderHandledValue(rawValue string) string { + trimmedRawValue := strings.TrimSpace(rawValue) + if strings.HasPrefix(trimmedRawValue, "${") && strings.HasSuffix(trimmedRawValue, "}") { + envVar := trimmedRawValue[2 : len(trimmedRawValue)-1] + return os.Getenv(envVar) + } + return trimmedRawValue +} diff --git a/cli/azd/internal/appdetect/spring_boot_property_test.go b/cli/azd/internal/appdetect/spring_boot_property_test.go new file mode 100644 index 00000000000..922bd17503e --- /dev/null +++ b/cli/azd/internal/appdetect/spring_boot_property_test.go @@ -0,0 +1,70 @@ +package appdetect + +import ( + "github.com/stretchr/testify/require" + "os" + "path/filepath" + "testing" +) + +func TestReadProperties(t *testing.T) { + var properties = readProperties(filepath.Join("testdata", "java-spring", "project-one")) + require.Equal(t, "", properties["not.exist"]) + require.Equal(t, "jdbc:h2:mem:testdb", properties["spring.datasource.url"]) + + properties = readProperties(filepath.Join("testdata", "java-spring", "project-two")) + require.Equal(t, "", properties["not.exist"]) + require.Equal(t, "jdbc:h2:mem:testdb", properties["spring.datasource.url"]) + + properties = readProperties(filepath.Join("testdata", "java-spring", "project-three")) + require.Equal(t, "", properties["not.exist"]) + require.Equal(t, "HTML", properties["spring.thymeleaf.mode"]) + + properties = readProperties(filepath.Join("testdata", "java-spring", "project-four")) + require.Equal(t, "", properties["not.exist"]) + require.Equal(t, "mysql", properties["database"]) +} + +func TestGetEnvironmentVariablePlaceholderHandledValue(t *testing.T) { + tests := []struct { + name string + inputValue string + environmentVariables map[string]string + expectedValue string + }{ + { + "No environment variable placeholder", + "valueOne", + map[string]string{}, + "valueOne", + }, + { + "Has invalid environment variable placeholder", + "${VALUE_ONE", + map[string]string{}, + "${VALUE_ONE", + }, + { + "Has valid environment variable placeholder, but environment variable not set", + "${VALUE_TWO}", + map[string]string{}, + "", + }, + { + "Has valid environment variable placeholder, and environment variable set", + "${VALUE_THREE}", + map[string]string{"VALUE_THREE": "valueThree"}, + "valueThree", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + for k, v := range tt.environmentVariables { + err := os.Setenv(k, v) + require.NoError(t, err) + } + handledValue := getEnvironmentVariablePlaceholderHandledValue(tt.inputValue) + require.Equal(t, tt.expectedValue, handledValue) + }) + } +} diff --git a/cli/azd/internal/appdetect/spring_boot_test.go b/cli/azd/internal/appdetect/spring_boot_test.go new file mode 100644 index 00000000000..23b6517d222 --- /dev/null +++ b/cli/azd/internal/appdetect/spring_boot_test.go @@ -0,0 +1,128 @@ +package appdetect + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestDetectSpringBootVersion(t *testing.T) { + tests := []struct { + name string + currentRoot *mavenProject + project *mavenProject + expectedVersion string + }{ + { + "unknown", + nil, + nil, + UnknownSpringBootVersion, + }, + { + "project.parent", + nil, + &mavenProject{ + Parent: parent{ + GroupId: "org.springframework.boot", + ArtifactId: "spring-boot-starter-parent", + Version: "2.x", + }, + }, + "2.x", + }, + { + "project.dependencyManagement", + nil, + &mavenProject{ + DependencyManagement: dependencyManagement{ + Dependencies: []dependency{ + { + GroupId: "org.springframework.boot", + ArtifactId: "spring-boot-dependencies", + Version: "2.x", + }, + }, + }, + }, + "2.x", + }, + { + "root.parent", + &mavenProject{ + Parent: parent{ + GroupId: "org.springframework.boot", + ArtifactId: "spring-boot-starter-parent", + Version: "3.x", + }, + }, + nil, + "3.x", + }, + { + "root.dependencyManagement", + &mavenProject{ + DependencyManagement: dependencyManagement{ + Dependencies: []dependency{ + { + GroupId: "org.springframework.boot", + ArtifactId: "spring-boot-dependencies", + Version: "3.x", + }, + }, + }, + }, + nil, + "3.x", + }, + { + "both.root.and.project.parent", + &mavenProject{ + Parent: parent{ + GroupId: "org.springframework.boot", + ArtifactId: "spring-boot-starter-parent", + Version: "2.x", + }, + }, + &mavenProject{ + Parent: parent{ + GroupId: "org.springframework.boot", + ArtifactId: "spring-boot-starter-parent", + Version: "3.x", + }, + }, + "3.x", + }, + { + "both.root.and.project.dependencyManagement", + &mavenProject{ + DependencyManagement: dependencyManagement{ + Dependencies: []dependency{ + { + GroupId: "org.springframework.boot", + ArtifactId: "spring-boot-dependencies", + Version: "2.x", + }, + }, + }, + }, + &mavenProject{ + DependencyManagement: dependencyManagement{ + Dependencies: []dependency{ + { + GroupId: "org.springframework.boot", + ArtifactId: "spring-boot-dependencies", + Version: "3.x", + }, + }, + }, + }, + "3.x", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + version := detectSpringBootVersion(tt.currentRoot, tt.project) + assert.Equal(t, tt.expectedVersion, version) + }) + } +} diff --git a/cli/azd/internal/appdetect/testdata/java-multimodules/application/pom.xml b/cli/azd/internal/appdetect/testdata/java-multimodules/application/pom.xml index e4ddaa858b5..a63cc042486 100644 --- a/cli/azd/internal/appdetect/testdata/java-multimodules/application/pom.xml +++ b/cli/azd/internal/appdetect/testdata/java-multimodules/application/pom.xml @@ -38,6 +38,16 @@ com.mysql mysql-connector-j + + + org.springframework.boot + spring-boot-starter-data-redis + + + + org.springframework.boot + spring-boot-starter-data-mongodb + org.postgresql diff --git a/cli/azd/internal/appdetect/testdata/java-spring/project-four/src/main/resources/application-mysql.properties b/cli/azd/internal/appdetect/testdata/java-spring/project-four/src/main/resources/application-mysql.properties new file mode 100644 index 00000000000..33ec21d3c95 --- /dev/null +++ b/cli/azd/internal/appdetect/testdata/java-spring/project-four/src/main/resources/application-mysql.properties @@ -0,0 +1,7 @@ +# database init, supports mysql too +database=mysql +spring.datasource.url=jdbc:mysql://${MYSQL_HOST:localhost}:${MYSQL_PORT:3306}/${MYSQL_DATABASE:petclinic} +spring.datasource.username=${MYSQL_USERNAME:petclinic} +spring.datasource.password=${MYSQL_PASSWORD:} +# SQL is written to be idempotent so this is safe +spring.sql.init.mode=always diff --git a/cli/azd/internal/appdetect/testdata/java-spring/project-four/src/main/resources/application-postgres.properties b/cli/azd/internal/appdetect/testdata/java-spring/project-four/src/main/resources/application-postgres.properties new file mode 100644 index 00000000000..7d9676e3aad --- /dev/null +++ b/cli/azd/internal/appdetect/testdata/java-spring/project-four/src/main/resources/application-postgres.properties @@ -0,0 +1,6 @@ +database=postgres +spring.datasource.url=jdbc:postgresql://${POSTGRES_HOST:localhost}:${POSTGRES_HOST:5432}/${POSTGRES_DATABASE:petclinic} +spring.datasource.username=${POSTGRES_USERNAME:petclinic} +spring.datasource.password=${POSTGRES_PASSWORD:} +# SQL is written to be idempotent so this is safe +spring.sql.init.mode=always diff --git a/cli/azd/internal/appdetect/testdata/java-spring/project-four/src/main/resources/application.properties b/cli/azd/internal/appdetect/testdata/java-spring/project-four/src/main/resources/application.properties new file mode 100644 index 00000000000..59d5368e73c --- /dev/null +++ b/cli/azd/internal/appdetect/testdata/java-spring/project-four/src/main/resources/application.properties @@ -0,0 +1,29 @@ +# database init, supports mysql too +database=h2 +spring.sql.init.schema-locations=classpath*:db/${database}/schema.sql +spring.sql.init.data-locations=classpath*:db/${database}/data.sql + +# Web +spring.thymeleaf.mode=HTML + +# JPA +spring.jpa.hibernate.ddl-auto=none +spring.jpa.open-in-view=true + +# Internationalization +spring.messages.basename=messages/messages + +spring.profiles.active=mysql + +# Actuator +management.endpoints.web.exposure.include=* + +# Logging +logging.level.org.springframework=INFO +# logging.level.org.springframework.web=DEBUG +# logging.level.org.springframework.context.annotation=TRACE + +# Maximum time static resources should be cached +spring.web.resources.cache.cachecontrol.max-age=12h + +server.port=8081 diff --git a/cli/azd/internal/appdetect/testdata/java-spring/project-one/src/main/resources/application.yml b/cli/azd/internal/appdetect/testdata/java-spring/project-one/src/main/resources/application.yml new file mode 100644 index 00000000000..09d0cc057c5 --- /dev/null +++ b/cli/azd/internal/appdetect/testdata/java-spring/project-one/src/main/resources/application.yml @@ -0,0 +1,12 @@ +spring: + datasource: + url: jdbc:h2:mem:testdb + jackson: + date-format: com.microsoft.azure.simpletodo.configuration.RFC3339DateFormat + serialization: + write-dates-as-timestamps: false + jpa: + hibernate: + ddl-auto: update + show-sql: true + diff --git a/cli/azd/internal/appdetect/testdata/java-spring/project-three/src/main/resources/application.properties b/cli/azd/internal/appdetect/testdata/java-spring/project-three/src/main/resources/application.properties new file mode 100644 index 00000000000..59d5368e73c --- /dev/null +++ b/cli/azd/internal/appdetect/testdata/java-spring/project-three/src/main/resources/application.properties @@ -0,0 +1,29 @@ +# database init, supports mysql too +database=h2 +spring.sql.init.schema-locations=classpath*:db/${database}/schema.sql +spring.sql.init.data-locations=classpath*:db/${database}/data.sql + +# Web +spring.thymeleaf.mode=HTML + +# JPA +spring.jpa.hibernate.ddl-auto=none +spring.jpa.open-in-view=true + +# Internationalization +spring.messages.basename=messages/messages + +spring.profiles.active=mysql + +# Actuator +management.endpoints.web.exposure.include=* + +# Logging +logging.level.org.springframework=INFO +# logging.level.org.springframework.web=DEBUG +# logging.level.org.springframework.context.annotation=TRACE + +# Maximum time static resources should be cached +spring.web.resources.cache.cachecontrol.max-age=12h + +server.port=8081 diff --git a/cli/azd/internal/appdetect/testdata/java-spring/project-two/src/main/resources/application.yaml b/cli/azd/internal/appdetect/testdata/java-spring/project-two/src/main/resources/application.yaml new file mode 100644 index 00000000000..09d0cc057c5 --- /dev/null +++ b/cli/azd/internal/appdetect/testdata/java-spring/project-two/src/main/resources/application.yaml @@ -0,0 +1,12 @@ +spring: + datasource: + url: jdbc:h2:mem:testdb + jackson: + date-format: com.microsoft.azure.simpletodo.configuration.RFC3339DateFormat + serialization: + write-dates-as-timestamps: false + jpa: + hibernate: + ddl-auto: update + show-sql: true + diff --git a/cli/azd/internal/auth_type.go b/cli/azd/internal/auth_type.go new file mode 100644 index 00000000000..72fcc331580 --- /dev/null +++ b/cli/azd/internal/auth_type.go @@ -0,0 +1,29 @@ +package internal + +// AuthType defines different authentication types. +type AuthType string + +const ( + AuthTypeUnspecified AuthType = "UNSPECIFIED" + // Username and password, or key based authentication + AuthTypePassword AuthType = "PASSWORD" + // Connection string authentication + AuthTypeConnectionString AuthType = "CONNECTION_STRING" + // Microsoft EntraID token credential + AuthTypeUserAssignedManagedIdentity AuthType = "USER_ASSIGNED_MANAGED_IDENTITY" +) + +func GetAuthTypeDescription(authType AuthType) string { + switch authType { + case AuthTypeUnspecified: + return "Unspecified" + case AuthTypePassword: + return "Username and password" + case AuthTypeConnectionString: + return "Connection string" + case AuthTypeUserAssignedManagedIdentity: + return "User assigned managed identity" + default: + return "Unspecified" + } +} diff --git a/cli/azd/internal/cmd/add/add_configure.go b/cli/azd/internal/cmd/add/add_configure.go index fac15c5a0a8..f3e5ad18c0a 100644 --- a/cli/azd/internal/cmd/add/add_configure.go +++ b/cli/azd/internal/cmd/add/add_configure.go @@ -56,7 +56,9 @@ func fillDatabaseName( for { dbName, err := console.Prompt(ctx, input.ConsoleOptions{ - Message: fmt.Sprintf("Input the name of the app database (%s)", r.Type.String()), + Message: fmt.Sprintf("Input the databaseName for %s "+ + "(Not databaseServerName. This url can explain the difference: "+ + "'jdbc:mysql://databaseServerName:3306/databaseName'):", r.Type.String()), Help: "Hint: App database name\n\n" + "Name of the database that the app connects to. " + "This database will be created after running azd provision or azd up.", diff --git a/cli/azd/internal/repository/app_init.go b/cli/azd/internal/repository/app_init.go index cb211bc14b1..b080c252e07 100644 --- a/cli/azd/internal/repository/app_init.go +++ b/cli/azd/internal/repository/app_init.go @@ -2,6 +2,7 @@ package repository import ( "context" + "errors" "fmt" "maps" "os" @@ -39,11 +40,19 @@ var LanguageMap = map[appdetect.Language]project.ServiceLanguageKind{ var dbMap = map[appdetect.DatabaseDep]struct{}{ appdetect.DbMongo: {}, appdetect.DbPostgres: {}, + appdetect.DbMySql: {}, + appdetect.DbCosmos: {}, appdetect.DbRedis: {}, } var featureCompose = alpha.MustFeatureKey("compose") +var azureDepMap = map[string]struct{}{ + appdetect.AzureDepServiceBus{}.ResourceDisplay(): {}, + appdetect.AzureDepEventHubs{}.ResourceDisplay(): {}, + appdetect.AzureDepStorageAccount{}.ResourceDisplay(): {}, +} + // InitFromApp initializes the infra directory and project file from the current existing app. func (i *Initializer) InitFromApp( ctx context.Context, @@ -120,10 +129,41 @@ func (i *Initializer) InitFromApp( i.console.StopSpinner(ctx, title, input.StepDone) var prjAppHost []appdetect.Project - for _, prj := range projects { + for index, prj := range projects { if prj.Language == appdetect.DotNetAppHost { prjAppHost = append(prjAppHost, prj) } + + if prj.Language == appdetect.Java { + var hasKafkaDep bool + var hasSpringCloudAzureDep bool + for depIndex, dep := range prj.AzureDeps { + if eventHubs, ok := dep.(appdetect.AzureDepEventHubs); ok && eventHubs.UseKafka { + hasKafkaDep = true + springBootVersion := eventHubs.SpringBootVersion + + if springBootVersion == appdetect.UnknownSpringBootVersion { + var err error + springBootVersion, err = promptSpringBootVersion(i.console, ctx) + if err != nil { + return err + } + eventHubs.SpringBootVersion = springBootVersion + prj.AzureDeps[depIndex] = eventHubs + } + } + if _, ok := dep.(appdetect.SpringCloudAzureDep); ok { + hasSpringCloudAzureDep = true + } + } + + if hasKafkaDep && !hasSpringCloudAzureDep { + err := processSpringCloudAzureDepByPrompt(i.console, ctx, &projects[index]) + if err != nil { + return err + } + } + } } if len(prjAppHost) > 1 { @@ -268,7 +308,7 @@ func (i *Initializer) InitFromApp( title = "Generating " + output.WithHighLightFormat("./"+azdcontext.ProjectFileName) i.console.ShowSpinner(ctx, title, input.Step) - err = i.genProjectFile(ctx, azdCtx, detect, composeEnabled) + err = i.genProjectFile(ctx, azdCtx, detect, infraSpec, composeEnabled) if err != nil { i.console.StopSpinner(ctx, title, input.GetStepResultFormat(err)) return err @@ -362,8 +402,9 @@ func (i *Initializer) genProjectFile( ctx context.Context, azdCtx *azdcontext.AzdContext, detect detectConfirm, + spec *scaffold.InfraSpec, addResources bool) error { - config, err := i.prjConfigFromDetect(ctx, azdCtx.ProjectDirectory(), detect, addResources) + config, err := i.prjConfigFromDetect(ctx, azdCtx.ProjectDirectory(), detect, spec, addResources) if err != nil { return fmt.Errorf("converting config: %w", err) } @@ -384,13 +425,15 @@ func (i *Initializer) prjConfigFromDetect( ctx context.Context, root string, detect detectConfirm, + spec *scaffold.InfraSpec, addResources bool) (project.ProjectConfig, error) { config := project.ProjectConfig{ Name: azdcontext.ProjectName(root), Metadata: &project.ProjectMetadata{ Template: fmt.Sprintf("%s@%s", InitGenTemplateId, internal.VersionInfo().Version), }, - Services: map[string]*project.ServiceConfig{}, + Services: map[string]*project.ServiceConfig{}, + Resources: map[string]*project.ResourceConfig{}, } svcMapping := map[string]string{} @@ -400,6 +443,98 @@ func (i *Initializer) prjConfigFromDetect( return config, err } + if !addResources { + for _, db := range prj.DatabaseDeps { + switch db { + case appdetect.DbMongo: + config.Resources["mongo"] = &project.ResourceConfig{ + Type: project.ResourceTypeDbMongo, + Name: spec.DbCosmosMongo.DatabaseName, + Props: project.MongoDBProps{ + DatabaseName: spec.DbCosmosMongo.DatabaseName, + }, + } + case appdetect.DbPostgres: + config.Resources["postgres"] = &project.ResourceConfig{ + Type: project.ResourceTypeDbPostgres, + Name: spec.DbPostgres.DatabaseName, + Props: project.PostgresProps{ + DatabaseName: spec.DbPostgres.DatabaseName, + AuthType: spec.DbPostgres.AuthType, + }, + } + case appdetect.DbMySql: + config.Resources["mysql"] = &project.ResourceConfig{ + Type: project.ResourceTypeDbMySQL, + Props: project.MySQLProps{ + DatabaseName: spec.DbMySql.DatabaseName, + AuthType: spec.DbMySql.AuthType, + }, + } + case appdetect.DbRedis: + config.Resources["redis"] = &project.ResourceConfig{ + Type: project.ResourceTypeDbRedis, + } + case appdetect.DbCosmos: + cosmosDBProps := project.CosmosDBProps{ + DatabaseName: spec.DbCosmos.DatabaseName, + } + for _, container := range spec.DbCosmos.Containers { + cosmosDBProps.Containers = append(cosmosDBProps.Containers, project.CosmosDBContainerProps{ + ContainerName: container.ContainerName, + PartitionKeyPaths: container.PartitionKeyPaths, + }) + } + config.Resources["cosmos"] = &project.ResourceConfig{ + Type: project.ResourceTypeDbCosmos, + Props: cosmosDBProps, + } + } + + } + for _, azureDep := range prj.AzureDeps { + switch azureDep.(type) { + case appdetect.AzureDepServiceBus: + config.Resources["servicebus"] = &project.ResourceConfig{ + Type: project.ResourceTypeMessagingServiceBus, + Props: project.ServiceBusProps{ + Queues: spec.AzureServiceBus.Queues, + IsJms: spec.AzureServiceBus.IsJms, + AuthType: spec.AzureServiceBus.AuthType, + }, + } + case appdetect.AzureDepEventHubs: + if spec.AzureEventHubs.UseKafka { + config.Resources["kafka"] = &project.ResourceConfig{ + Type: project.ResourceTypeMessagingKafka, + Props: project.KafkaProps{ + Topics: spec.AzureEventHubs.EventHubNames, + AuthType: spec.AzureEventHubs.AuthType, + SpringBootVersion: spec.AzureEventHubs.SpringBootVersion, + }, + } + } else { + config.Resources["eventhubs"] = &project.ResourceConfig{ + Type: project.ResourceTypeMessagingEventHubs, + Props: project.EventHubsProps{ + EventHubNames: spec.AzureEventHubs.EventHubNames, + AuthType: spec.AzureEventHubs.AuthType, + }, + } + } + case appdetect.AzureDepStorageAccount: + config.Resources["storage"] = &project.ResourceConfig{ + Type: project.ResourceTypeStorage, + Props: project.StorageProps{ + Containers: spec.AzureStorageAccount.ContainerNames, + AuthType: spec.AzureStorageAccount.AuthType, + }, + } + + } + } + } + config.Services[svc.Name] = &svc svcMapping[prj.Path] = svc.Name } @@ -414,45 +549,135 @@ func (i *Initializer) prjConfigFromDetect( }) for _, database := range databases { + var resourceConfig project.ResourceConfig + var databaseName string if database == appdetect.DbRedis { - redis := project.ResourceConfig{ - Type: project.ResourceTypeDbRedis, - Name: "redis", + databaseName = "redis" + } else { + var err error + databaseName, err = i.getDatabaseNameByPrompt(ctx, database) + if err != nil { + return config, err + } + } + var authType = internal.AuthTypeUnspecified + if database == appdetect.DbPostgres || database == appdetect.DbMySql { + var err error + authType, err = chooseAuthTypeByPrompt( + database.Display(), + []internal.AuthType{internal.AuthTypeUserAssignedManagedIdentity, internal.AuthTypePassword}, + ctx, + i.console) + if err != nil { + return config, err } - config.Resources[redis.Name] = &redis - dbNames[database] = redis.Name - continue } - - var dbType project.ResourceType switch database { + case appdetect.DbRedis: + resourceConfig = project.ResourceConfig{ + Type: project.ResourceTypeDbRedis, + Name: "redis", + } case appdetect.DbMongo: - dbType = project.ResourceTypeDbMongo + resourceConfig = project.ResourceConfig{ + Type: project.ResourceTypeDbMongo, + Name: "mongo", + Props: project.MongoDBProps{ + DatabaseName: databaseName, + }, + } + case appdetect.DbCosmos: + cosmosDBProps := project.CosmosDBProps{ + DatabaseName: databaseName, + } + containers, err := detectCosmosSqlDatabaseContainersInDirectory(detect.root) + if err != nil { + return config, err + } + for _, container := range containers { + cosmosDBProps.Containers = append(cosmosDBProps.Containers, project.CosmosDBContainerProps{ + ContainerName: container.ContainerName, + PartitionKeyPaths: container.PartitionKeyPaths, + }) + } + resourceConfig = project.ResourceConfig{ + Type: project.ResourceTypeDbCosmos, + Name: "cosmos", + Props: cosmosDBProps, + } case appdetect.DbPostgres: - dbType = project.ResourceTypeDbPostgres + resourceConfig = project.ResourceConfig{ + Type: project.ResourceTypeDbPostgres, + Name: "postgresql", + Props: project.PostgresProps{ + DatabaseName: databaseName, + AuthType: authType, + }, + } + case appdetect.DbMySql: + resourceConfig = project.ResourceConfig{ + Type: project.ResourceTypeDbMySQL, + Name: "mysql", + Props: project.MySQLProps{ + DatabaseName: databaseName, + AuthType: authType, + }, + } } + config.Resources[resourceConfig.Name] = &resourceConfig + dbNames[database] = resourceConfig.Name + } - db := project.ResourceConfig{ - Type: dbType, + for _, azureDepPair := range detect.AzureDeps { + azureDep := azureDepPair.first + authType, err := chooseAuthTypeByPrompt( + azureDep.ResourceDisplay(), + []internal.AuthType{internal.AuthTypeUserAssignedManagedIdentity, internal.AuthTypeConnectionString}, + ctx, + i.console) + if err != nil { + return config, err } - - for { - dbName, err := promptDbName(i.console, ctx, database) - if err != nil { - return config, err + switch azureDep.(type) { + case appdetect.AzureDepServiceBus: + azureDepServiceBus := azureDep.(appdetect.AzureDepServiceBus) + config.Resources["servicebus"] = &project.ResourceConfig{ + Type: project.ResourceTypeMessagingServiceBus, + Props: project.ServiceBusProps{ + Queues: azureDepServiceBus.Queues, + IsJms: azureDepServiceBus.IsJms, + AuthType: authType, + }, } - - if dbName == "" { - i.console.Message(ctx, "Database name is required.") - continue + case appdetect.AzureDepEventHubs: + azureDepEventHubs := azureDep.(appdetect.AzureDepEventHubs) + if azureDepEventHubs.UseKafka { + config.Resources["kafka"] = &project.ResourceConfig{ + Type: project.ResourceTypeMessagingKafka, + Props: project.KafkaProps{ + Topics: azureDepEventHubs.Names, + AuthType: authType, + SpringBootVersion: azureDepEventHubs.SpringBootVersion, + }, + } + } else { + config.Resources["eventhubs"] = &project.ResourceConfig{ + Type: project.ResourceTypeMessagingEventHubs, + Props: project.EventHubsProps{ + EventHubNames: azureDepEventHubs.Names, + AuthType: authType, + }, + } + } + case appdetect.AzureDepStorageAccount: + config.Resources["storage"] = &project.ResourceConfig{ + Type: project.ResourceTypeStorage, + Props: project.StorageProps{ + Containers: azureDep.(appdetect.AzureDepStorageAccount).ContainerNames, + AuthType: authType, + }, } - - db.Name = dbName - break } - - config.Resources[db.Name] = &db - dbNames[database] = db.Name } backends := []*project.ResourceConfig{} @@ -483,6 +708,21 @@ func (i *Initializer) prjConfigFromDetect( resSpec.Uses = append(resSpec.Uses, dbNames[db]) } + for _, azureDep := range svc.AzureDeps { + switch azureDep.(type) { + case appdetect.AzureDepServiceBus: + resSpec.Uses = append(resSpec.Uses, "servicebus") + case appdetect.AzureDepEventHubs: + if azureDep.(appdetect.AzureDepEventHubs).UseKafka { + resSpec.Uses = append(resSpec.Uses, "kafka") + } else { + resSpec.Uses = append(resSpec.Uses, "eventhubs") + } + case appdetect.AzureDepStorageAccount: + resSpec.Uses = append(resSpec.Uses, "storage") + } + } + resSpec.Name = name resSpec.Props = props config.Resources[name] = &resSpec @@ -505,6 +745,42 @@ func (i *Initializer) prjConfigFromDetect( return config, nil } +func (i *Initializer) getDatabaseNameByPrompt(ctx context.Context, database appdetect.DatabaseDep) (string, error) { + var result string + for { + dbName, err := promptDbName(i.console, ctx, database) + if err != nil { + return dbName, err + } + if dbName == "" { + i.console.Message(ctx, "Database name is required.") + continue + } + result = dbName + break + } + return result, nil +} + +func chooseAuthTypeByPrompt( + name string, + authOptions []internal.AuthType, + ctx context.Context, + console input.Console) (internal.AuthType, error) { + var options []string + for _, option := range authOptions { + options = append(options, internal.GetAuthTypeDescription(option)) + } + selection, err := console.Select(ctx, input.ConsoleOptions{ + Message: "Choose auth type for " + name + ":", + Options: options, + }) + if err != nil { + return internal.AuthTypeUnspecified, err + } + return authOptions[selection], nil +} + // ServiceFromDetect creates a ServiceConfig from an appdetect project. func ServiceFromDetect( root string, @@ -578,3 +854,57 @@ func ServiceFromDetect( return svc, nil } + +func processSpringCloudAzureDepByPrompt(console input.Console, ctx context.Context, project *appdetect.Project) error { + continueOption, err := console.Select(ctx, input.ConsoleOptions{ + Message: "Detected Kafka dependency but no spring-cloud-azure-starter found. Select an option", + Options: []string{ + "Exit then I will manually add this dependency", + "Continue without this dependency, and provision Azure Event Hubs for Kafka", + "Continue without this dependency, and not provision Azure Event Hubs for Kafka", + }, + }) + if err != nil { + return err + } + + switch continueOption { + case 0: + return errors.New("you have to manually add dependency com.azure.spring:spring-cloud-azure-starter by following https://github.com/Azure/azure-sdk-for-java/wiki/Spring-Versions-Mapping") + case 1: + return nil + case 2: + // remove Kafka Azure Dep + var result []appdetect.AzureDep + for _, dep := range project.AzureDeps { + if eventHubs, ok := dep.(appdetect.AzureDepEventHubs); !(ok && eventHubs.UseKafka) { + result = append(result, dep) + } + } + project.AzureDeps = result + return nil + } + return nil +} + +func promptSpringBootVersion(console input.Console, ctx context.Context) (string, error) { + selection, err := console.Select(ctx, input.ConsoleOptions{ + Message: "No spring boot version detected, what is your spring boot version?", + Options: []string{ + "Spring Boot 2.x", + "Spring Boot 3.x", + }, + }) + if err != nil { + return "", err + } + + switch selection { + case 0: + return "2.x", nil + case 1: + return "3.x", nil + default: + return appdetect.UnknownSpringBootVersion, nil + } +} diff --git a/cli/azd/internal/repository/app_init_test.go b/cli/azd/internal/repository/app_init_test.go index 37652ea84e7..12b6fbabd40 100644 --- a/cli/azd/internal/repository/app_init_test.go +++ b/cli/azd/internal/repository/app_init_test.go @@ -3,6 +3,7 @@ package repository import ( "context" "fmt" + "github.com/azure/azure-dev/cli/azd/internal/scaffold" "os" "path/filepath" "strings" @@ -216,6 +217,7 @@ func TestInitializer_prjConfigFromDetect(t *testing.T) { "my$special$db", "n", "postgres", // fill in db name + "Username and password", }, want: project.ProjectConfig{ Services: map[string]*project.ServiceConfig{ @@ -236,18 +238,25 @@ func TestInitializer_prjConfigFromDetect(t *testing.T) { Type: project.ResourceTypeDbRedis, Name: "redis", }, - "mongodb": { + "mongo": { Type: project.ResourceTypeDbMongo, - Name: "mongodb", + Name: "mongo", + Props: project.MongoDBProps{ + DatabaseName: "mongodb", + }, }, - "postgres": { + "postgresql": { Type: project.ResourceTypeDbPostgres, - Name: "postgres", + Name: "postgresql", + Props: project.PostgresProps{ + AuthType: internal.AuthTypePassword, + DatabaseName: "postgres", + }, }, "py": { Type: project.ResourceTypeHostContainerApp, Name: "py", - Uses: []string{"postgres", "mongodb", "redis"}, + Uses: []string{"postgresql", "mongo", "redis"}, Props: project.ContainerAppProps{ Port: 80, }, @@ -308,6 +317,7 @@ func TestInitializer_prjConfigFromDetect(t *testing.T) { context.Background(), dir, tt.detect, + &scaffold.InfraSpec{}, true) // Print extra newline to avoid mangling `go test -v` final test result output while waiting for final stdin, diff --git a/cli/azd/internal/repository/detect_confirm.go b/cli/azd/internal/repository/detect_confirm.go index e7191d271ae..6885372b1db 100644 --- a/cli/azd/internal/repository/detect_confirm.go +++ b/cli/azd/internal/repository/detect_confirm.go @@ -42,11 +42,17 @@ const ( EntryKindModified EntryKind = "modified" ) +type Pair struct { + first appdetect.AzureDep + second EntryKind +} + // detectConfirm handles prompting for confirming the detected services and databases type detectConfirm struct { // detected services and databases Services []appdetect.Project Databases map[appdetect.DatabaseDep]EntryKind + AzureDeps map[string]Pair // the root directory of the project root string @@ -59,6 +65,7 @@ type detectConfirm struct { // Init initializes state from initial detection output func (d *detectConfirm) Init(projects []appdetect.Project, root string) { d.Databases = make(map[appdetect.DatabaseDep]EntryKind) + d.AzureDeps = make(map[string]Pair) d.Services = make([]appdetect.Project, 0, len(projects)) d.modified = false d.root = root @@ -73,16 +80,24 @@ func (d *detectConfirm) Init(projects []appdetect.Project, root string) { d.Databases[dbType] = EntryKindDetected } } + + for _, azureDep := range project.AzureDeps { + if _, supported := azureDepMap[azureDep.ResourceDisplay()]; supported { + d.AzureDeps[azureDep.ResourceDisplay()] = Pair{azureDep, EntryKindDetected} + } + } } d.captureUsage( fields.AppInitDetectedDatabase, - fields.AppInitDetectedServices) + fields.AppInitDetectedServices, + fields.AppInitDetectedAzureDeps) } func (d *detectConfirm) captureUsage( databases attribute.Key, - services attribute.Key) { + services attribute.Key, + azureDeps attribute.Key) { names := make([]string, 0, len(d.Services)) for _, svc := range d.Services { names = append(names, string(svc.Language)) @@ -93,9 +108,16 @@ func (d *detectConfirm) captureUsage( dbNames = append(dbNames, string(db)) } + azureDepNames := make([]string, 0, len(d.AzureDeps)) + + for _, pair := range d.AzureDeps { + azureDepNames = append(azureDepNames, pair.first.ResourceDisplay()) + } + tracing.SetUsageAttributes( databases.StringSlice(dbNames), services.StringSlice(names), + azureDeps.StringSlice(azureDepNames), ) } @@ -146,7 +168,8 @@ func (d *detectConfirm) Confirm(ctx context.Context) error { case 0: d.captureUsage( fields.AppInitConfirmedDatabases, - fields.AppInitConfirmedServices) + fields.AppInitConfirmedServices, + fields.AppInitDetectedAzureDeps) return nil case 1: if err := d.remove(ctx); err != nil { @@ -203,14 +226,21 @@ func (d *detectConfirm) render(ctx context.Context) error { } } + if len(d.Databases) > 0 { + d.console.Message(ctx, "\n"+output.WithBold("Detected databases:")+"\n") + } for db, entry := range d.Databases { switch db { case appdetect.DbPostgres: recommendedServices = append(recommendedServices, "Azure Database for PostgreSQL flexible server") + case appdetect.DbMySql: + recommendedServices = append(recommendedServices, "Azure Database for MySQL flexible server") + case appdetect.DbCosmos: + recommendedServices = append(recommendedServices, "Azure Cosmos DB for NoSQL") case appdetect.DbMongo: recommendedServices = append(recommendedServices, "Azure CosmosDB API for MongoDB") case appdetect.DbRedis: - recommendedServices = append(recommendedServices, "Azure Container Apps Redis add-on") + recommendedServices = append(recommendedServices, "Azure Cache for Redis") } status := "" @@ -224,6 +254,23 @@ func (d *detectConfirm) render(ctx context.Context) error { d.console.Message(ctx, "") } + if len(d.AzureDeps) > 0 { + d.console.Message(ctx, "\n"+output.WithBold("Detected Azure dependencies:")+"\n") + } + for azureDep, entry := range d.AzureDeps { + recommendedServices = append(recommendedServices, azureDep) + + status := "" + if entry.second == EntryKindModified { + status = " " + output.WithSuccessFormat("[Updated]") + } else if entry.second == EntryKindManual { + status = " " + output.WithSuccessFormat("[Added]") + } + + d.console.Message(ctx, " "+color.BlueString(azureDep)+status) + d.console.Message(ctx, "") + } + displayedServices := make([]string, 0, len(recommendedServices)) for _, svc := range recommendedServices { displayedServices = append(displayedServices, color.MagentaString(svc)) diff --git a/cli/azd/internal/repository/infra_confirm.go b/cli/azd/internal/repository/infra_confirm.go index 6cea992dea6..66ba9afdc79 100644 --- a/cli/azd/internal/repository/infra_confirm.go +++ b/cli/azd/internal/repository/infra_confirm.go @@ -3,6 +3,8 @@ package repository import ( "context" "fmt" + "github.com/azure/azure-dev/cli/azd/internal" + "os" "path/filepath" "regexp" "strconv" @@ -49,15 +51,63 @@ func (i *Initializer) infraSpecFromDetect( i.console.Message(ctx, "Database name is required.") continue } - + authType, err := chooseAuthTypeByPrompt( + database.Display(), + []internal.AuthType{internal.AuthTypeUserAssignedManagedIdentity, internal.AuthTypePassword}, + ctx, + i.console) + if err != nil { + return scaffold.InfraSpec{}, err + } spec.DbPostgres = &scaffold.DatabasePostgres{ DatabaseName: dbName, + AuthType: authType, + } + break dbPrompt + case appdetect.DbMySql: + if dbName == "" { + i.console.Message(ctx, "Database name is required.") + continue + } + authType, err := chooseAuthTypeByPrompt( + database.Display(), + []internal.AuthType{internal.AuthTypeUserAssignedManagedIdentity, internal.AuthTypePassword}, + ctx, + i.console) + if err != nil { + return scaffold.InfraSpec{}, err + } + spec.DbMySql = &scaffold.DatabaseMySql{ + DatabaseName: dbName, + AuthType: authType, + } + break dbPrompt + case appdetect.DbCosmos: + if dbName == "" { + i.console.Message(ctx, "Database name is required.") + continue + } + containers, err := detectCosmosSqlDatabaseContainersInDirectory(detect.root) + if err != nil { + return scaffold.InfraSpec{}, err + } + spec.DbCosmos = &scaffold.DatabaseCosmosAccount{ + DatabaseName: dbName, + Containers: containers, } + break dbPrompt } break dbPrompt } } + for _, azureDep := range detect.AzureDeps { + err := i.buildInfraSpecByAzureDep(ctx, azureDep.first, &spec) + if err != nil { + return scaffold.InfraSpec{}, err + } + } + for _, svc := range detect.Services { name := names.LabelName(filepath.Base(svc.Path)) serviceSpec := scaffold.ServiceSpec{ @@ -85,17 +135,26 @@ func (i *Initializer) infraSpecFromDetect( switch db { case appdetect.DbMongo: - serviceSpec.DbCosmosMongo = &scaffold.DatabaseReference{ - DatabaseName: spec.DbCosmosMongo.DatabaseName, - } + serviceSpec.DbCosmosMongo = spec.DbCosmosMongo case appdetect.DbPostgres: - serviceSpec.DbPostgres = &scaffold.DatabaseReference{ - DatabaseName: spec.DbPostgres.DatabaseName, - } + serviceSpec.DbPostgres = spec.DbPostgres + case appdetect.DbMySql: + serviceSpec.DbMySql = spec.DbMySql + case appdetect.DbCosmos: + serviceSpec.DbCosmos = spec.DbCosmos case appdetect.DbRedis: - serviceSpec.DbRedis = &scaffold.DatabaseReference{ - DatabaseName: "redis", - } + serviceSpec.DbRedis = spec.DbRedis + } + } + + for _, azureDep := range svc.AzureDeps { + switch azureDep.(type) { + case appdetect.AzureDepServiceBus: + serviceSpec.AzureServiceBus = spec.AzureServiceBus + case appdetect.AzureDepEventHubs: + serviceSpec.AzureEventHubs = spec.AzureEventHubs + case appdetect.AzureDepStorageAccount: + serviceSpec.AzureStorageAccount = spec.AzureStorageAccount } } spec.Services = append(spec.Services, serviceSpec) @@ -160,7 +219,9 @@ func promptPortNumber(console input.Console, ctx context.Context, promptMessage func promptDbName(console input.Console, ctx context.Context, database appdetect.DatabaseDep) (string, error) { for { dbName, err := console.Prompt(ctx, input.ConsoleOptions{ - Message: fmt.Sprintf("Input the name of the app database (%s)", database.Display()), + Message: fmt.Sprintf("Input the databaseName for %s "+ + "(Not databaseServerName. This url can explain the difference: "+ + "'jdbc:mysql://databaseServerName:3306/databaseName'):", database.Display()), Help: "Hint: App database name\n\n" + "Name of the database that the app connects to. " + "This database will be created after running azd provision or azd up." + @@ -258,3 +319,84 @@ func PromptPort( return port, nil } + +func (i *Initializer) buildInfraSpecByAzureDep( + ctx context.Context, + azureDep appdetect.AzureDep, + spec *scaffold.InfraSpec) error { + authType, err := chooseAuthTypeByPrompt( + azureDep.ResourceDisplay(), + []internal.AuthType{internal.AuthTypeUserAssignedManagedIdentity, internal.AuthTypeConnectionString}, + ctx, + i.console) + if err != nil { + return err + } + switch dependency := azureDep.(type) { + case appdetect.AzureDepServiceBus: + spec.AzureServiceBus = &scaffold.AzureDepServiceBus{ + IsJms: dependency.IsJms, + Queues: dependency.Queues, + AuthType: authType, + } + case appdetect.AzureDepEventHubs: + spec.AzureEventHubs = &scaffold.AzureDepEventHubs{ + EventHubNames: dependency.Names, + AuthType: authType, + UseKafka: dependency.UseKafka, + SpringBootVersion: dependency.SpringBootVersion, + } + case appdetect.AzureDepStorageAccount: + spec.AzureStorageAccount = &scaffold.AzureDepStorageAccount{ + ContainerNames: dependency.ContainerNames, + AuthType: authType, + } + } + return nil +} + +func detectCosmosSqlDatabaseContainersInDirectory(root string) ([]scaffold.CosmosSqlDatabaseContainer, error) { + var result []scaffold.CosmosSqlDatabaseContainer + err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() && filepath.Ext(path) == ".java" { + container, err := detectCosmosSqlDatabaseContainerInFile(path) + if err != nil { + return err + } + if len(container.ContainerName) != 0 { + result = append(result, container) + } + } + return nil + }) + return result, err +} + +func detectCosmosSqlDatabaseContainerInFile(filePath string) (scaffold.CosmosSqlDatabaseContainer, error) { + var result scaffold.CosmosSqlDatabaseContainer + result.PartitionKeyPaths = make([]string, 0) + content, err := os.ReadFile(filePath) + if err != nil { + return result, err + } + // todo: + // 1. Maybe "@Container" is not "com.azure.spring.data.cosmos.core.mapping.Container" + // 2. Maybe "@Container" is imported by "com.azure.spring.data.cosmos.core.mapping.*" + containerRegex := regexp.MustCompile(`@Container\s*\(containerName\s*=\s*"([^"]+)"\)`) + partitionKeyRegex := regexp.MustCompile(`@PartitionKey\s*(?:\n\s*)?(?:private|public|protected)?\s*\w+\s+(\w+);`) + + matches := containerRegex.FindAllStringSubmatch(string(content), -1) + if len(matches) != 1 { + return result, nil + } + result.ContainerName = matches[0][1] + + matches = partitionKeyRegex.FindAllStringSubmatch(string(content), -1) + for _, match := range matches { + result.PartitionKeyPaths = append(result.PartitionKeyPaths, match[1]) + } + return result, nil +} diff --git a/cli/azd/internal/repository/infra_confirm_test.go b/cli/azd/internal/repository/infra_confirm_test.go index 98fcfe90ab7..98839ce520a 100644 --- a/cli/azd/internal/repository/infra_confirm_test.go +++ b/cli/azd/internal/repository/infra_confirm_test.go @@ -3,7 +3,10 @@ package repository import ( "context" "fmt" + "github.com/azure/azure-dev/cli/azd/pkg/osutil" + "github.com/stretchr/testify/assert" "os" + "path/filepath" "strings" "testing" @@ -166,11 +169,13 @@ func TestInitializer_infraSpecFromDetect(t *testing.T) { "n", "my$special$db", "n", - "myappdb", // fill in db name + "myappdb", // fill in db name + "User assigned managed identity", // confirm db authentication }, want: scaffold.InfraSpec{ DbPostgres: &scaffold.DatabasePostgres{ DatabaseName: "myappdb", + AuthType: "USER_ASSIGNED_MANAGED_IDENTITY", }, Services: []scaffold.ServiceSpec{ { @@ -183,8 +188,9 @@ func TestInitializer_infraSpecFromDetect(t *testing.T) { }, }, }, - DbPostgres: &scaffold.DatabaseReference{ + DbPostgres: &scaffold.DatabasePostgres{ DatabaseName: "myappdb", + AuthType: "USER_ASSIGNED_MANAGED_IDENTITY", }, }, { @@ -229,3 +235,72 @@ func TestInitializer_infraSpecFromDetect(t *testing.T) { }) } } + +func TestDetectCosmosSqlDatabaseContainerInFile(t *testing.T) { + tests := []struct { + javaFileContent string + expectedContainers scaffold.CosmosSqlDatabaseContainer + }{ + { + javaFileContent: "", + expectedContainers: scaffold.CosmosSqlDatabaseContainer{ + ContainerName: "", + PartitionKeyPaths: []string{}, + }, + }, + { + javaFileContent: "@Container(containerName = \"users\")", + expectedContainers: scaffold.CosmosSqlDatabaseContainer{ + ContainerName: "users", + PartitionKeyPaths: []string{}, + }, + }, + { + javaFileContent: "" + + "@Container(containerName = \"users\")\n" + + "public class User {\n" + + " @Id\n " + + "private String id;\n" + + " private String firstName;\n" + + " @PartitionKey\n" + + " private String lastName;", + expectedContainers: scaffold.CosmosSqlDatabaseContainer{ + ContainerName: "users", + PartitionKeyPaths: []string{ + "lastName", + }, + }, + }, + { + javaFileContent: "" + + "@Container(containerName = \"users\")\n" + + "public class User {\n" + + " @Id\n " + + "private String id;\n" + + " private String firstName;\n" + + " @PartitionKey private String lastName;", + expectedContainers: scaffold.CosmosSqlDatabaseContainer{ + ContainerName: "users", + PartitionKeyPaths: []string{ + "lastName", + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.javaFileContent, func(t *testing.T) { + tempDir := t.TempDir() + tempFile := filepath.Join(tempDir, "Example.java") + file, err := os.Create(tempFile) + assert.NoError(t, err) + file.Close() + + err = os.WriteFile(tempFile, []byte(tt.javaFileContent), osutil.PermissionFile) + assert.NoError(t, err) + + container, err := detectCosmosSqlDatabaseContainerInFile(tempFile) + assert.NoError(t, err) + assert.Equal(t, tt.expectedContainers, container) + }) + } +} diff --git a/cli/azd/internal/repository/testdata/empty/azureyaml_created.txt b/cli/azd/internal/repository/testdata/empty/azureyaml_created.txt index 7318d2a5007..5443f055e86 100644 --- a/cli/azd/internal/repository/testdata/empty/azureyaml_created.txt +++ b/cli/azd/internal/repository/testdata/empty/azureyaml_created.txt @@ -1,3 +1,3 @@ -# yaml-language-server: $schema=https://raw.githubusercontent.com/Azure/azure-dev/main/schemas/v1.0/azure.yaml.json +# yaml-language-server: $schema=https://raw.githubusercontent.com/azure-javaee/azure-dev/feature/sjad/schemas/alpha/azure.yaml.json name: "" diff --git a/cli/azd/internal/repository/util.go b/cli/azd/internal/repository/util.go new file mode 100644 index 00000000000..3e5f563646a --- /dev/null +++ b/cli/azd/internal/repository/util.go @@ -0,0 +1,106 @@ +package repository + +import "strings" + +//cspell:disable + +// LabelName cleans up a string to be used as a RFC 1123 Label name. +// It does not enforce the 63 character limit. +// +// RFC 1123 Label name: +// - contain only lowercase alphanumeric characters or '-' +// - start with an alphanumeric character +// - end with an alphanumeric character +// +// Examples: +// - myproject, MYPROJECT -> myproject +// - myProject, myProjecT, MyProject, MyProjecT -> my-project +// - my.project, My.Project, my-project, My-Project -> my-project +func LabelName(name string) string { + hasSeparator, n := cleanAlphaNumeric(name) + if hasSeparator { + return labelNameFromSeparators(n) + } + + return labelNameFromCasing(name) +} + +//cspell:enable + +// cleanAlphaNumeric removes non-alphanumeric characters from the name. +// +// It also returns whether the name uses word separators. +func cleanAlphaNumeric(name string) (hasSeparator bool, cleaned string) { + sb := strings.Builder{} + hasSeparator = false + for _, c := range name { + if isAsciiAlphaNumeric(c) { + sb.WriteRune(c) + } else if isSeparator(c) { + hasSeparator = true + sb.WriteRune(c) + } + } + + return hasSeparator, sb.String() +} + +func isAsciiAlphaNumeric(r rune) bool { + return ('0' <= r && r <= '9') || ('A' <= r && r <= 'Z') || ('a' <= r && r <= 'z') +} + +func isSeparator(r rune) bool { + return r == '-' || r == '_' || r == '.' +} + +func lowerCase(r rune) rune { + if 'A' <= r && r <= 'Z' { + r += 'a' - 'A' + } + return r +} + +// Converts camel-cased or Pascal-cased names into lower-cased dash-separated names. +// Example: MyProject, myProject -> my-project +func labelNameFromCasing(name string) string { + result := strings.Builder{} + // previously seen upper-case character + prevUpperCase := -2 // -2 to avoid matching the first character + + for i, c := range name { + if 'A' <= c && c <= 'Z' { + if prevUpperCase == i-1 { // handle runs of upper-case word + prevUpperCase = i + result.WriteRune(lowerCase(c)) + continue + } + + if i > 0 && i != len(name)-1 { + result.WriteRune('-') + } + + prevUpperCase = i + } + + if isAsciiAlphaNumeric(c) { + result.WriteRune(lowerCase(c)) + } + } + + return result.String() +} + +// Converts all word-separated names into lower-cased dash-separated names. +// Examples: my.project, my_project, My-Project -> my-project +func labelNameFromSeparators(name string) string { + result := strings.Builder{} + for i, c := range name { + if isAsciiAlphaNumeric(c) { + result.WriteRune(lowerCase(c)) + } else if i > 0 && i != len(name)-1 && isSeparator(c) { + result.WriteRune('-') + } + } + + return result.String() +} diff --git a/cli/azd/internal/repository/util_test.go b/cli/azd/internal/repository/util_test.go new file mode 100644 index 00000000000..56a2c467756 --- /dev/null +++ b/cli/azd/internal/repository/util_test.go @@ -0,0 +1,67 @@ +package repository + +import ( + "testing" +) + +//cspell:disable + +func TestLabelName(t *testing.T) { + tests := []struct { + name string + input string + expected string + }{ + {"Lowercase", "myproject", "myproject"}, + {"Uppercase", "MYPROJECT", "myproject"}, + {"MixedCase", "myProject", "my-project"}, + {"MixedCaseEnd", "myProjecT", "my-project"}, + {"TitleCase", "MyProject", "my-project"}, + {"TitleCaseEnd", "MyProjecT", "my-project"}, + {"WithDot", "my.project", "my-project"}, + {"WithDotTitleCase", "My.Project", "my-project"}, + {"WithHyphen", "my-project", "my-project"}, + {"WithHyphenTitleCase", "My-Project", "my-project"}, + {"StartWithNumber", "1myproject", "1myproject"}, + {"EndWithNumber", "myproject2", "myproject2"}, + {"MixedWithNumbers", "my2Project3", "my2-project3"}, + {"SpecialCharacters", "my_project!@#", "my-project"}, + {"EmptyString", "", ""}, + {"OnlySpecialCharacters", "@#$%^&*", ""}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := LabelName(tt.input) + if result != tt.expected { + t.Errorf("LabelName(%q) = %q, want %q", tt.input, result, tt.expected) + } + }) + } +} + +func TestLabelNameEdgeCases(t *testing.T) { + tests := []struct { + name string + input string + expected string + }{ + {"SingleCharacter", "A", "a"}, + {"TwoCharacters", "Ab", "ab"}, + {"StartEndHyphens", "-abc-", "abc"}, + {"LongString", + "ThisIsOneVeryLongStringThatExceedsTheSixtyThreeCharacterLimitForRFC1123LabelNames", + "this-is-one-very-long-string-that-exceeds-the-sixty-three-character-limit-for-rfc1123-label-names"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := LabelName(tt.input) + if result != tt.expected { + t.Errorf("LabelName(%q) = %q, want %q", tt.input, result, tt.expected) + } + }) + } +} + +//cspell:enable diff --git a/cli/azd/internal/scaffold/bicep_env.go b/cli/azd/internal/scaffold/bicep_env.go new file mode 100644 index 00000000000..330f83cbb60 --- /dev/null +++ b/cli/azd/internal/scaffold/bicep_env.go @@ -0,0 +1,214 @@ +package scaffold + +import ( + "fmt" + "github.com/azure/azure-dev/cli/azd/internal" + "strings" +) + +func ToBicepEnv(env Env) BicepEnv { + if isResourceConnectionEnv(env.Value) { + resourceType, resourceInfoType := toResourceConnectionInfo(env.Value) + value, ok := bicepEnv[resourceType][resourceInfoType] + if !ok { + panic(unsupportedType(env)) + } + if isSecret(resourceInfoType) { + if isKeyVaultSecret(value) { + return BicepEnv{ + BicepEnvType: BicepEnvTypeKeyVaultSecret, + Name: env.Name, + SecretName: secretName(env), + SecretValue: unwrapKeyVaultSecretValue(value), + } + } else { + return BicepEnv{ + BicepEnvType: BicepEnvTypeSecret, + Name: env.Name, + SecretName: secretName(env), + SecretValue: value, + } + } + } else { + return BicepEnv{ + BicepEnvType: BicepEnvTypePlainText, + Name: env.Name, + PlainTextValue: value, + } + } + } else { + return BicepEnv{ + BicepEnvType: BicepEnvTypePlainText, + Name: env.Name, + PlainTextValue: toBicepEnvPlainTextValue(env.Value), + } + } +} + +func ShouldAddToBicepFile(spec ServiceSpec, name string) bool { + return !willBeAddedByServiceConnector(spec, name) +} + +func willBeAddedByServiceConnector(spec ServiceSpec, name string) bool { + if (spec.DbPostgres != nil && spec.DbPostgres.AuthType == internal.AuthTypeUserAssignedManagedIdentity) || + (spec.DbMySql != nil && spec.DbMySql.AuthType == internal.AuthTypeUserAssignedManagedIdentity) { + return name == "spring.datasource.url" || + name == "spring.datasource.username" || + name == "spring.datasource.azure.passwordless-enabled" + } else { + return false + } +} + +// inputStringExample -> 'inputStringExample' +func addQuotation(input string) string { + return fmt.Sprintf("'%s'", input) +} + +// 'inputStringExample' -> 'inputStringExample' +// '${inputSingleVariableExample}' -> inputSingleVariableExample +// '${HOST}:${PORT}' -> '${HOST}:${PORT}' +func removeQuotationIfItIsASingleVariable(input string) string { + prefix := "'${" + suffix := "}'" + if strings.HasPrefix(input, prefix) && strings.HasSuffix(input, suffix) { + prefixTrimmed := strings.TrimPrefix(input, prefix) + trimmed := strings.TrimSuffix(prefixTrimmed, suffix) + if strings.IndexAny(trimmed, "}") == -1 { + return trimmed + } else { + return input + } + } else { + return input + } +} + +// The BicepEnv.PlainTextValue is handled as variable by default. +// If the value is string, it should contain ('). +// Here are some examples of input and output: +// inputStringExample -> 'inputStringExample' +// ${inputSingleVariableExample} -> inputSingleVariableExample +// ${HOST}:${PORT} -> '${HOST}:${PORT}' +func toBicepEnvPlainTextValue(input string) string { + return removeQuotationIfItIsASingleVariable(addQuotation(input)) +} + +// BicepEnv +// +// For Name and SecretName, they are handled as string by default. +// Which means quotation will be added before they are used in bicep file, because they are always string value. +// +// For PlainTextValue and SecretValue, they are handled as variable by default. +// When they are string value, quotation should be contained by themselves. +// Set variable as default is mainly to avoid this problem: +// https://learn.microsoft.com/en-us/azure/azure-resource-manager/bicep/linter-rule-simplify-interpolation +type BicepEnv struct { + BicepEnvType BicepEnvType + Name string + PlainTextValue string + SecretName string + SecretValue string +} + +type BicepEnvType string + +const ( + BicepEnvTypePlainText BicepEnvType = "plainText" + BicepEnvTypeSecret BicepEnvType = "secret" + BicepEnvTypeKeyVaultSecret BicepEnvType = "keyVaultSecret" +) + +// Note: The value is handled as variable. +// If the value is string, it should contain quotation inside itself. +var bicepEnv = map[ResourceType]map[ResourceInfoType]string{ + ResourceTypeDbPostgres: { + ResourceInfoTypeHost: "postgreServer.outputs.fqdn", + ResourceInfoTypePort: "'5432'", + ResourceInfoTypeDatabaseName: "postgreSqlDatabaseName", + ResourceInfoTypeUsername: "postgreSqlDatabaseUser", + ResourceInfoTypePassword: "postgreSqlDatabasePassword", + ResourceInfoTypeUrl: "'postgresql://${postgreSqlDatabaseUser}:${postgreSqlDatabasePassword}@${postgreServer.outputs.fqdn}:5432/${postgreSqlDatabaseName}'", + ResourceInfoTypeJdbcUrl: "'jdbc:postgresql://${postgreServer.outputs.fqdn}:5432/${postgreSqlDatabaseName}'", + }, + ResourceTypeDbMySQL: { + ResourceInfoTypeHost: "mysqlServer.outputs.fqdn", + ResourceInfoTypePort: "'3306'", + ResourceInfoTypeDatabaseName: "mysqlDatabaseName", + ResourceInfoTypeUsername: "mysqlDatabaseUser", + ResourceInfoTypePassword: "mysqlDatabasePassword", + ResourceInfoTypeUrl: "'mysql://${mysqlDatabaseUser}:${mysqlDatabasePassword}@${mysqlServer.outputs.fqdn}:3306/${mysqlDatabaseName}'", + ResourceInfoTypeJdbcUrl: "'jdbc:mysql://${mysqlServer.outputs.fqdn}:3306/${mysqlDatabaseName}'", + }, + ResourceTypeDbRedis: { + ResourceInfoTypeHost: "redis.outputs.hostName", + ResourceInfoTypePort: "string(redis.outputs.sslPort)", + ResourceInfoTypeEndpoint: "'${redis.outputs.hostName}:${redis.outputs.sslPort}'", + ResourceInfoTypePassword: wrapToKeyVaultSecretValue("redisConn.outputs.keyVaultUrlForPass"), + ResourceInfoTypeUrl: wrapToKeyVaultSecretValue("redisConn.outputs.keyVaultUrlForUrl"), + }, + ResourceTypeDbMongo: { + ResourceInfoTypeDatabaseName: "mongoDatabaseName", + ResourceInfoTypeUrl: wrapToKeyVaultSecretValue("cosmos.outputs.exportedSecrets['MONGODB-URL'].secretUri"), + }, + ResourceTypeDbCosmos: { + ResourceInfoTypeEndpoint: "cosmos.outputs.endpoint", + ResourceInfoTypeDatabaseName: "cosmosDatabaseName", + }, + ResourceTypeMessagingServiceBus: { + ResourceInfoTypeNamespace: "serviceBusNamespace.outputs.name", + ResourceInfoTypeConnectionString: wrapToKeyVaultSecretValue("serviceBusConnectionString.outputs.keyVaultUrl"), + }, + ResourceTypeMessagingEventHubs: { + ResourceInfoTypeNamespace: "eventHubNamespace.outputs.name", + ResourceInfoTypeConnectionString: wrapToKeyVaultSecretValue("eventHubsConnectionString.outputs.keyVaultUrl"), + }, + ResourceTypeMessagingKafka: { + ResourceInfoTypeEndpoint: "'${eventHubNamespace.outputs.name}.servicebus.windows.net:9093'", + ResourceInfoTypeConnectionString: wrapToKeyVaultSecretValue("eventHubsConnectionString.outputs.keyVaultUrl"), + }, + ResourceTypeStorage: { + ResourceInfoTypeAccountName: "storageAccountName", + ResourceInfoTypeConnectionString: wrapToKeyVaultSecretValue("storageAccountConnectionString.outputs.keyVaultUrl"), + }, + ResourceTypeOpenAiModel: { + ResourceInfoTypeEndpoint: "account.outputs.endpoint", + }, + ResourceTypeHostContainerApp: {}, +} + +func unsupportedType(env Env) string { + return fmt.Sprintf("unsupported connection info type for resource type. "+ + "value = %s", env.Value) +} + +func PlaceHolderForServiceIdentityClientId() string { + return "__PlaceHolderForServiceIdentityClientId" +} + +func isSecret(info ResourceInfoType) bool { + return info == ResourceInfoTypePassword || info == ResourceInfoTypeUrl || info == ResourceInfoTypeConnectionString +} + +func secretName(env Env) string { + resourceType, resourceInfoType := toResourceConnectionInfo(env.Value) + name := fmt.Sprintf("%s-%s", resourceType, resourceInfoType) + lowerCaseName := strings.ToLower(name) + noDotName := strings.Replace(lowerCaseName, ".", "-", -1) + noUnderscoreName := strings.Replace(noDotName, "_", "-", -1) + return noUnderscoreName +} + +var keyVaultSecretPrefix = "keyvault:" + +func isKeyVaultSecret(value string) bool { + return strings.HasPrefix(value, keyVaultSecretPrefix) +} + +func wrapToKeyVaultSecretValue(value string) string { + return fmt.Sprintf("%s%s", keyVaultSecretPrefix, value) +} + +func unwrapKeyVaultSecretValue(value string) string { + return strings.TrimPrefix(value, keyVaultSecretPrefix) +} diff --git a/cli/azd/internal/scaffold/bicep_env_test.go b/cli/azd/internal/scaffold/bicep_env_test.go new file mode 100644 index 00000000000..d93efd57e98 --- /dev/null +++ b/cli/azd/internal/scaffold/bicep_env_test.go @@ -0,0 +1,172 @@ +package scaffold + +import ( + "github.com/azure/azure-dev/cli/azd/internal" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestToBicepEnv(t *testing.T) { + tests := []struct { + name string + in Env + want BicepEnv + }{ + { + name: "Plain text", + in: Env{ + Name: "enable-customer-related-feature", + Value: "true", + }, + want: BicepEnv{ + BicepEnvType: BicepEnvTypePlainText, + Name: "enable-customer-related-feature", + PlainTextValue: "'true'", // Note: Quotation add automatically + }, + }, + { + name: "Plain text from EnvTypeResourceConnectionPlainText", + in: Env{ + Name: "spring.jms.servicebus.pricing-tier", + Value: "premium", + }, + want: BicepEnv{ + BicepEnvType: BicepEnvTypePlainText, + Name: "spring.jms.servicebus.pricing-tier", + PlainTextValue: "'premium'", // Note: Quotation add automatically + }, + }, + { + name: "Plain text from EnvTypeResourceConnectionResourceInfo", + in: Env{ + Name: "POSTGRES_PORT", + Value: ToResourceConnectionEnv(ResourceTypeDbPostgres, ResourceInfoTypePort), + }, + want: BicepEnv{ + BicepEnvType: BicepEnvTypePlainText, + Name: "POSTGRES_PORT", + PlainTextValue: "'5432'", + }, + }, + { + name: "Secret", + in: Env{ + Name: "POSTGRES_PASSWORD", + Value: ToResourceConnectionEnv(ResourceTypeDbPostgres, ResourceInfoTypePassword), + }, + want: BicepEnv{ + BicepEnvType: BicepEnvTypeSecret, + Name: "POSTGRES_PASSWORD", + SecretName: "db-postgres-password", + SecretValue: "postgreSqlDatabasePassword", + }, + }, + { + name: "KeuVault Secret", + in: Env{ + Name: "REDIS_PASSWORD", + Value: ToResourceConnectionEnv(ResourceTypeDbRedis, ResourceInfoTypePassword), + }, + want: BicepEnv{ + BicepEnvType: BicepEnvTypeKeyVaultSecret, + Name: "REDIS_PASSWORD", + SecretName: "db-redis-password", + SecretValue: "redisConn.outputs.keyVaultUrlForPass", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := ToBicepEnv(tt.in) + assert.Equal(t, tt.want, actual) + }) + } +} + +func TestToBicepEnvPlainTextValue(t *testing.T) { + tests := []struct { + name string + in string + want string + }{ + { + name: "string", + in: "inputStringExample", + want: "'inputStringExample'", + }, + { + name: "single variable", + in: "${inputSingleVariableExample}", + want: "inputSingleVariableExample", + }, + { + name: "multiple variable", + in: "${HOST}:${PORT}", + want: "'${HOST}:${PORT}'", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := toBicepEnvPlainTextValue(tt.in) + assert.Equal(t, tt.want, actual) + }) + } +} + +func TestShouldAddToBicepFile(t *testing.T) { + tests := []struct { + name string + infraSpec ServiceSpec + propertyName string + want bool + }{ + { + name: "not related property and not using mysql and postgres", + infraSpec: ServiceSpec{}, + propertyName: "test", + want: true, + }, + { + name: "not using mysql and postgres", + infraSpec: ServiceSpec{}, + propertyName: "spring.datasource.url", + want: true, + }, + { + name: "not using user assigned managed identity", + infraSpec: ServiceSpec{ + DbMySql: &DatabaseMySql{ + AuthType: internal.AuthTypePassword, + }, + }, + propertyName: "spring.datasource.url", + want: true, + }, + { + name: "not service connector added property", + infraSpec: ServiceSpec{ + DbMySql: &DatabaseMySql{ + AuthType: internal.AuthTypePassword, + }, + }, + propertyName: "test", + want: true, + }, + { + name: "should not added", + infraSpec: ServiceSpec{ + DbMySql: &DatabaseMySql{ + AuthType: internal.AuthTypePassword, + }, + }, + propertyName: "spring.datasource.url", + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := ShouldAddToBicepFile(tt.infraSpec, tt.propertyName) + assert.Equal(t, tt.want, actual) + }) + } +} diff --git a/cli/azd/internal/scaffold/scaffold.go b/cli/azd/internal/scaffold/scaffold.go index f9ce4752ea9..2b9d94a6abb 100644 --- a/cli/azd/internal/scaffold/scaffold.go +++ b/cli/azd/internal/scaffold/scaffold.go @@ -3,6 +3,7 @@ package scaffold import ( "bytes" "fmt" + "github.com/azure/azure-dev/cli/azd/internal" "io/fs" "os" "path" @@ -24,12 +25,15 @@ const templateRoot = "scaffold/templates" // To execute a named template, call Execute with the defined name. func Load() (*template.Template, error) { funcMap := template.FuncMap{ - "bicepName": BicepName, - "containerAppName": ContainerAppName, - "upper": strings.ToUpper, - "lower": strings.ToLower, - "alphaSnakeUpper": AlphaSnakeUpper, - "formatParam": FormatParameter, + "bicepName": BicepName, + "containerAppName": ContainerAppName, + "upper": strings.ToUpper, + "lower": strings.ToLower, + "alphaSnakeUpper": AlphaSnakeUpper, + "formatParam": FormatParameter, + "hasPrefix": strings.HasPrefix, + "toBicepEnv": ToBicepEnv, + "shouldAddToBicepFile": ShouldAddToBicepFile, } t, err := template.New("templates"). @@ -76,6 +80,18 @@ func supportingFiles(spec InfraSpec) []string { files = append(files, "/modules/fetch-container-image.bicep") } + if spec.AzureServiceBus != nil && spec.AzureServiceBus.AuthType == internal.AuthTypeConnectionString { + files = append(files, "/modules/set-servicebus-namespace-connection-string.bicep") + } + + if spec.AzureEventHubs != nil && spec.AzureEventHubs.AuthType == internal.AuthTypeConnectionString { + files = append(files, "/modules/set-event-hubs-namespace-connection-string.bicep") + } + + if spec.AzureStorageAccount != nil && spec.AzureStorageAccount.AuthType == internal.AuthTypeConnectionString { + files = append(files, "/modules/set-storage-account-connection-string.bicep") + } + return files } @@ -201,12 +217,21 @@ func executeToFS(targetFS *memfs.FS, tmpl *template.Template, name string, path } func preExecExpand(spec *InfraSpec) { - // postgres requires specific password seeding parameters + // postgres and mysql requires specific password seeding parameters if spec.DbPostgres != nil { spec.Parameters = append(spec.Parameters, Parameter{ - Name: "databasePassword", - Value: "$(secretOrRandomPassword ${AZURE_KEY_VAULT_NAME} databasePassword)", + Name: "postgreSqlDatabasePassword", + Value: "$(secretOrRandomPassword ${AZURE_KEY_VAULT_NAME} postgreSqlDatabasePassword)", + Type: "string", + Secret: true, + }) + } + if spec.DbMySql != nil { + spec.Parameters = append(spec.Parameters, + Parameter{ + Name: "mysqlDatabasePassword", + Value: "$(secretOrRandomPassword ${AZURE_KEY_VAULT_NAME} mysqlDatabasePassword)", Type: "string", Secret: true, }) diff --git a/cli/azd/internal/scaffold/scaffold_test.go b/cli/azd/internal/scaffold/scaffold_test.go index 238043c3673..d5a7dc212fb 100644 --- a/cli/azd/internal/scaffold/scaffold_test.go +++ b/cli/azd/internal/scaffold/scaffold_test.go @@ -98,13 +98,11 @@ func TestExecInfra(t *testing.T) { }, }, }, - DbCosmosMongo: &DatabaseReference{ + DbCosmosMongo: &DatabaseCosmosMongo{ DatabaseName: "appdb", }, - DbRedis: &DatabaseReference{ - DatabaseName: "redis", - }, - DbPostgres: &DatabaseReference{ + DbRedis: &DatabaseRedis{}, + DbPostgres: &DatabasePostgres{ DatabaseName: "appdb", }, }, @@ -133,7 +131,7 @@ func TestExecInfra(t *testing.T) { { Name: "api", Port: 3100, - DbPostgres: &DatabaseReference{ + DbPostgres: &DatabasePostgres{ DatabaseName: "appdb", }, }, @@ -150,7 +148,7 @@ func TestExecInfra(t *testing.T) { { Name: "api", Port: 3100, - DbCosmosMongo: &DatabaseReference{ + DbCosmosMongo: &DatabaseCosmosMongo{ DatabaseName: "appdb", }, }, @@ -163,11 +161,9 @@ func TestExecInfra(t *testing.T) { DbRedis: &DatabaseRedis{}, Services: []ServiceSpec{ { - Name: "api", - Port: 3100, - DbRedis: &DatabaseReference{ - DatabaseName: "redis", - }, + Name: "api", + Port: 3100, + DbRedis: &DatabaseRedis{}, }, }, }, diff --git a/cli/azd/internal/scaffold/spec.go b/cli/azd/internal/scaffold/spec.go index 763b83c322e..198556c5798 100644 --- a/cli/azd/internal/scaffold/spec.go +++ b/cli/azd/internal/scaffold/spec.go @@ -2,6 +2,7 @@ package scaffold import ( "fmt" + "github.com/azure/azure-dev/cli/azd/internal" "strings" ) @@ -11,11 +12,17 @@ type InfraSpec struct { // Databases to create DbPostgres *DatabasePostgres - DbCosmosMongo *DatabaseCosmosMongo + DbMySql *DatabaseMySql DbRedis *DatabaseRedis + DbCosmosMongo *DatabaseCosmosMongo + DbCosmos *DatabaseCosmosAccount // ai models AIModels []AIModel + + AzureServiceBus *AzureDepServiceBus + AzureEventHubs *AzureDepEventHubs + AzureStorageAccount *AzureDepStorageAccount } type Parameter struct { @@ -28,6 +35,23 @@ type Parameter struct { type DatabasePostgres struct { DatabaseUser string DatabaseName string + AuthType internal.AuthType +} + +type DatabaseMySql struct { + DatabaseUser string + DatabaseName string + AuthType internal.AuthType +} + +type CosmosSqlDatabaseContainer struct { + ContainerName string + PartitionKeyPaths []string +} + +type DatabaseCosmosAccount struct { + DatabaseName string + Containers []CosmosSqlDatabaseContainer } type DatabaseCosmosMongo struct { @@ -51,11 +75,30 @@ type AIModelModel struct { Version string } +type AzureDepServiceBus struct { + Queues []string + TopicsAndSubscriptions map[string][]string + AuthType internal.AuthType + IsJms bool +} + +type AzureDepEventHubs struct { + EventHubNames []string + AuthType internal.AuthType + UseKafka bool + SpringBootVersion string +} + +type AzureDepStorageAccount struct { + ContainerNames []string + AuthType internal.AuthType +} + type ServiceSpec struct { Name string Port int - Env map[string]string + Envs []Env // Front-end properties. Frontend *Frontend @@ -64,14 +107,88 @@ type ServiceSpec struct { Backend *Backend // Connection to a database - DbPostgres *DatabaseReference - DbCosmosMongo *DatabaseReference - DbRedis *DatabaseReference + DbPostgres *DatabasePostgres + DbMySql *DatabaseMySql + DbRedis *DatabaseRedis + DbCosmosMongo *DatabaseCosmosMongo + DbCosmos *DatabaseCosmosAccount // AI model connections AIModels []AIModelReference + + AzureServiceBus *AzureDepServiceBus + AzureEventHubs *AzureDepEventHubs + AzureStorageAccount *AzureDepStorageAccount +} + +type Env struct { + Name string + Value string +} + +var resourceConnectionEnvPrefix = "$resource.connection" + +func isResourceConnectionEnv(env string) bool { + if !strings.HasPrefix(env, resourceConnectionEnvPrefix) { + return false + } + a := strings.Split(env, ":") + if len(a) != 3 { + return false + } + return a[0] != "" && a[1] != "" && a[2] != "" } +func ToResourceConnectionEnv(resourceType ResourceType, resourceInfoType ResourceInfoType) string { + return fmt.Sprintf("%s:%s:%s", resourceConnectionEnvPrefix, resourceType, resourceInfoType) +} + +func toResourceConnectionInfo(resourceConnectionEnv string) (resourceType ResourceType, + resourceInfoType ResourceInfoType) { + if !isResourceConnectionEnv(resourceConnectionEnv) { + return "", "" + } + a := strings.Split(resourceConnectionEnv, ":") + return ResourceType(a[1]), ResourceInfoType(a[2]) +} + +// todo merge ResourceType and project.ResourceType +// Not use project.ResourceType because it will cause cycle import. +// Not merge it in current PR to avoid conflict with upstream main branch. +// Solution proposal: define a ResourceType in lower level that can be used both in scaffold and project package. + +type ResourceType string + +const ( + ResourceTypeDbRedis ResourceType = "db.redis" + ResourceTypeDbPostgres ResourceType = "db.postgres" + ResourceTypeDbMySQL ResourceType = "db.mysql" + ResourceTypeDbMongo ResourceType = "db.mongo" + ResourceTypeDbCosmos ResourceType = "db.cosmos" + ResourceTypeHostContainerApp ResourceType = "host.containerapp" + ResourceTypeOpenAiModel ResourceType = "ai.openai.model" + ResourceTypeMessagingServiceBus ResourceType = "messaging.servicebus" + ResourceTypeMessagingEventHubs ResourceType = "messaging.eventhubs" + ResourceTypeMessagingKafka ResourceType = "messaging.kafka" + ResourceTypeStorage ResourceType = "storage" +) + +type ResourceInfoType string + +const ( + ResourceInfoTypeHost ResourceInfoType = "host" + ResourceInfoTypePort ResourceInfoType = "port" + ResourceInfoTypeEndpoint ResourceInfoType = "endpoint" + ResourceInfoTypeDatabaseName ResourceInfoType = "databaseName" + ResourceInfoTypeNamespace ResourceInfoType = "namespace" + ResourceInfoTypeAccountName ResourceInfoType = "accountName" + ResourceInfoTypeUsername ResourceInfoType = "username" + ResourceInfoTypePassword ResourceInfoType = "password" + ResourceInfoTypeUrl ResourceInfoType = "url" + ResourceInfoTypeJdbcUrl ResourceInfoType = "jdbcUrl" + ResourceInfoTypeConnectionString ResourceInfoType = "connectionString" +) + type Frontend struct { Backends []ServiceReference } @@ -84,10 +201,6 @@ type ServiceReference struct { Name string } -type DatabaseReference struct { - DatabaseName string -} - type AIModelReference struct { Name string } diff --git a/cli/azd/internal/scaffold/spec_test.go b/cli/azd/internal/scaffold/spec_test.go new file mode 100644 index 00000000000..34f69f07222 --- /dev/null +++ b/cli/azd/internal/scaffold/spec_test.go @@ -0,0 +1,94 @@ +package scaffold + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestToResourceConnectionEnv(t *testing.T) { + tests := []struct { + name string + inputResourceType ResourceType + inputResourceInfoType ResourceInfoType + want string + }{ + { + name: "mysql username", + inputResourceType: ResourceTypeDbMySQL, + inputResourceInfoType: ResourceInfoTypeUsername, + want: "$resource.connection:db.mysql:username", + }, + { + name: "postgres password", + inputResourceType: ResourceTypeDbPostgres, + inputResourceInfoType: ResourceInfoTypePassword, + want: "$resource.connection:db.postgres:password", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := ToResourceConnectionEnv(tt.inputResourceType, tt.inputResourceInfoType) + assert.Equal(t, tt.want, actual) + }) + } +} + +func TestIsResourceConnectionEnv(t *testing.T) { + tests := []struct { + name string + input string + want bool + }{ + { + name: "valid", + input: "$resource.connection:db.postgres:password", + want: true, + }, + { + name: "invalid", + input: "$resource.connection:db.postgres:", + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := isResourceConnectionEnv(tt.input) + assert.Equal(t, tt.want, result) + }) + } +} + +func TestToResourceConnectionInfo(t *testing.T) { + tests := []struct { + name string + input string + wantResourceType ResourceType + wantResourceInfoType ResourceInfoType + }{ + { + name: "invalid input", + input: "$resource.connection:db.mysql::username", + wantResourceType: "", + wantResourceInfoType: "", + }, + { + name: "mysql username", + input: "$resource.connection:db.mysql:username", + wantResourceType: ResourceTypeDbMySQL, + wantResourceInfoType: ResourceInfoTypeUsername, + }, + { + name: "postgres password", + input: "$resource.connection:db.postgres:password", + wantResourceType: ResourceTypeDbPostgres, + wantResourceInfoType: ResourceInfoTypePassword, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + resourceType, resourceInfoType := toResourceConnectionInfo(tt.input) + assert.Equal(t, tt.wantResourceType, resourceType) + assert.Equal(t, tt.wantResourceInfoType, resourceInfoType) + }) + } +} diff --git a/cli/azd/internal/tracing/fields/fields.go b/cli/azd/internal/tracing/fields/fields.go index 52562e181c6..6b3bf726609 100644 --- a/cli/azd/internal/tracing/fields/fields.go +++ b/cli/azd/internal/tracing/fields/fields.go @@ -240,8 +240,9 @@ const ( const ( InitMethod = attribute.Key("init.method") - AppInitDetectedDatabase = attribute.Key("appinit.detected.databases") - AppInitDetectedServices = attribute.Key("appinit.detected.services") + AppInitDetectedDatabase = attribute.Key("appinit.detected.databases") + AppInitDetectedServices = attribute.Key("appinit.detected.services") + AppInitDetectedAzureDeps = attribute.Key("appinit.detected.azuredeps") AppInitConfirmedDatabases = attribute.Key("appinit.confirmed.databases") AppInitConfirmedServices = attribute.Key("appinit.confirmed.services") @@ -249,6 +250,9 @@ const ( AppInitModifyAddCount = attribute.Key("appinit.modify_add.count") AppInitModifyRemoveCount = attribute.Key("appinit.modify_remove.count") + // AppInitJavaDetect indicates if java detector has started or finished + AppInitJavaDetect = attribute.Key("appinit.java.detect") + // The last step recorded during the app init process. AppInitLastStep = attribute.Key("appinit.lastStep") ) diff --git a/cli/azd/pkg/pipeline/pipeline_manager_test.go b/cli/azd/pkg/pipeline/pipeline_manager_test.go index 6396a37f925..945a99726e7 100644 --- a/cli/azd/pkg/pipeline/pipeline_manager_test.go +++ b/cli/azd/pkg/pipeline/pipeline_manager_test.go @@ -773,7 +773,8 @@ func createPipelineManager( mockContext.Console, args, mockContext.Container, - project.NewImportManager(project.NewDotNetImporter(nil, nil, nil, nil, mockContext.AlphaFeaturesManager)), + project.NewImportManager( + project.NewDotNetImporter(nil, nil, nil, nil, mockContext.AlphaFeaturesManager)), &mockUserConfigManager{}, ) } diff --git a/cli/azd/pkg/project/importer.go b/cli/azd/pkg/project/importer.go index 26fbde3a07e..3494d76b81c 100644 --- a/cli/azd/pkg/project/importer.go +++ b/cli/azd/pkg/project/importer.go @@ -167,7 +167,7 @@ func (im *ImportManager) ProjectInfrastructure(ctx context.Context, projectConfi composeEnabled := im.dotNetImporter.alphaFeatureManager.IsEnabled(featureCompose) if composeEnabled && len(projectConfig.Resources) > 0 { - return tempInfra(ctx, projectConfig) + return tempInfra(ctx, projectConfig, im.dotNetImporter.console) } if !composeEnabled && len(projectConfig.Resources) > 0 { @@ -209,7 +209,7 @@ func (im *ImportManager) SynthAllInfrastructure(ctx context.Context, projectConf composeEnabled := im.dotNetImporter.alphaFeatureManager.IsEnabled(featureCompose) if composeEnabled && len(projectConfig.Resources) > 0 { - return infraFsForProject(ctx, projectConfig) + return infraFsForProject(ctx, projectConfig, im.dotNetImporter.console) } if !composeEnabled && len(projectConfig.Resources) > 0 { diff --git a/cli/azd/pkg/project/importer_test.go b/cli/azd/pkg/project/importer_test.go index 168e5c93261..35f76d9b7e3 100644 --- a/cli/azd/pkg/project/importer_test.go +++ b/cli/azd/pkg/project/importer_test.go @@ -392,10 +392,13 @@ resources: - api postgresdb: type: db.postgres + authType: PASSWORD mongodb: type: db.mongo + authType: USER_ASSIGNED_MANAGED_IDENTITY redis: type: db.redis + authType: PASSWORD ` func Test_ImportManager_ProjectInfrastructure_FromResources(t *testing.T) { @@ -405,11 +408,15 @@ func Test_ImportManager_ProjectInfrastructure_FromResources(t *testing.T) { im := &ImportManager{ dotNetImporter: &DotNetImporter{ alphaFeatureManager: alpha.NewFeaturesManagerWithConfig(config.NewEmptyConfig()), + console: mocks.NewMockContext(context.Background()).Console, }, } prjConfig := &ProjectConfig{} err := yaml.Unmarshal([]byte(prjWithResources), prjConfig) + for key, res := range prjConfig.Resources { + res.Name = key + } require.NoError(t, err) infra, err := im.ProjectInfrastructure(context.Background(), prjConfig) @@ -436,12 +443,16 @@ func TestImportManager_SynthAllInfrastructure_FromResources(t *testing.T) { im := &ImportManager{ dotNetImporter: &DotNetImporter{ alphaFeatureManager: alpha.NewFeaturesManagerWithConfig(config.NewEmptyConfig()), + console: mocks.NewMockContext(context.Background()).Console, }, } prjConfig := &ProjectConfig{} err := yaml.Unmarshal([]byte(prjWithResources), prjConfig) require.NoError(t, err) + for key, res := range prjConfig.Resources { + res.Name = key + } projectFs, err := im.SynthAllInfrastructure(context.Background(), prjConfig) require.NoError(t, err) diff --git a/cli/azd/pkg/project/project.go b/cli/azd/pkg/project/project.go index ab2b66fd37a..31a38cd04a8 100644 --- a/cli/azd/pkg/project/project.go +++ b/cli/azd/pkg/project/project.go @@ -21,6 +21,12 @@ import ( "github.com/braydonk/yaml" ) +const ( + //nolint:lll + // todo(haozhan): update this line for sjad private preview, need to revert it when merge into azure-dev/main branch + projectSchemaAnnotation = "# yaml-language-server: $schema=https://raw.githubusercontent.com/azure-javaee/azure-dev/feature/sjad/schemas/alpha/azure.yaml.json" +) + func New(ctx context.Context, projectFilePath string, projectName string) (*ProjectConfig, error) { newProject := &ProjectConfig{ Name: projectName, @@ -275,18 +281,25 @@ func Save(ctx context.Context, projectConfig *ProjectConfig, projectFilePath str copy.Services[name] = &svcCopy } + for name, resource := range projectConfig.Resources { + resourceCopy := *resource + resourceCopy.Project = © + + copy.Resources[name] = &resourceCopy + } + projectBytes, err := yaml.Marshal(copy) if err != nil { return fmt.Errorf("marshalling project yaml: %w", err) } - version := "v1.0" + version := "alpha" if projectConfig.MetaSchemaVersion != "" { version = projectConfig.MetaSchemaVersion } annotation := fmt.Sprintf( - "# yaml-language-server: $schema=https://raw.githubusercontent.com/Azure/azure-dev/main/schemas/%s/azure.yaml.json", + "# yaml-language-server: $schema=https://raw.githubusercontent.com/azure-javaee/azure-dev/feature/sjad/schemas/%s/azure.yaml.json", version) projectFileContents := bytes.NewBufferString(annotation + "\n\n") _, err = projectFileContents.Write(projectBytes) diff --git a/cli/azd/pkg/project/resources.go b/cli/azd/pkg/project/resources.go index 9c1494ec15e..1eb336d6b1b 100644 --- a/cli/azd/pkg/project/resources.go +++ b/cli/azd/pkg/project/resources.go @@ -5,6 +5,7 @@ package project import ( "fmt" + "github.com/azure/azure-dev/cli/azd/internal" "github.com/braydonk/yaml" ) @@ -22,11 +23,17 @@ func AllResourceTypes() []ResourceType { } const ( - ResourceTypeDbRedis ResourceType = "db.redis" - ResourceTypeDbPostgres ResourceType = "db.postgres" - ResourceTypeDbMongo ResourceType = "db.mongo" - ResourceTypeHostContainerApp ResourceType = "host.containerapp" - ResourceTypeOpenAiModel ResourceType = "ai.openai.model" + ResourceTypeDbRedis ResourceType = "db.redis" + ResourceTypeDbPostgres ResourceType = "db.postgres" + ResourceTypeDbMySQL ResourceType = "db.mysql" + ResourceTypeDbMongo ResourceType = "db.mongo" + ResourceTypeDbCosmos ResourceType = "db.cosmos" + ResourceTypeHostContainerApp ResourceType = "host.containerapp" + ResourceTypeOpenAiModel ResourceType = "ai.openai.model" + ResourceTypeMessagingServiceBus ResourceType = "messaging.servicebus" + ResourceTypeMessagingEventHubs ResourceType = "messaging.eventhubs" + ResourceTypeMessagingKafka ResourceType = "messaging.kafka" + ResourceTypeStorage ResourceType = "storage" ) func (r ResourceType) String() string { @@ -35,12 +42,24 @@ func (r ResourceType) String() string { return "Redis" case ResourceTypeDbPostgres: return "PostgreSQL" + case ResourceTypeDbMySQL: + return "MySQL" case ResourceTypeDbMongo: return "MongoDB" + case ResourceTypeDbCosmos: + return "CosmosDB" case ResourceTypeHostContainerApp: return "Container App" case ResourceTypeOpenAiModel: return "Open AI Model" + case ResourceTypeMessagingServiceBus: + return "Service Bus" + case ResourceTypeMessagingEventHubs: + return "Event Hubs" + case ResourceTypeMessagingKafka: + return "Kafka" + case ResourceTypeStorage: + return "Storage Account" } return "" @@ -89,6 +108,46 @@ func (r *ResourceConfig) MarshalYAML() (interface{}, error) { if err != nil { return nil, err } + case ResourceTypeDbPostgres: + err := marshalRawProps(raw.Props.(PostgresProps)) + if err != nil { + return nil, err + } + case ResourceTypeDbMySQL: + err := marshalRawProps(raw.Props.(MySQLProps)) + if err != nil { + return nil, err + } + case ResourceTypeDbMongo: + err := marshalRawProps(raw.Props.(MongoDBProps)) + if err != nil { + return nil, err + } + case ResourceTypeDbCosmos: + err := marshalRawProps(raw.Props.(CosmosDBProps)) + if err != nil { + return nil, err + } + case ResourceTypeMessagingServiceBus: + err := marshalRawProps(raw.Props.(ServiceBusProps)) + if err != nil { + return nil, err + } + case ResourceTypeMessagingEventHubs: + err := marshalRawProps(raw.Props.(EventHubsProps)) + if err != nil { + return nil, err + } + case ResourceTypeMessagingKafka: + err := marshalRawProps(raw.Props.(KafkaProps)) + if err != nil { + return nil, err + } + case ResourceTypeStorage: + err := marshalRawProps(raw.Props.(StorageProps)) + if err != nil { + return nil, err + } } return raw, nil @@ -128,6 +187,54 @@ func (r *ResourceConfig) UnmarshalYAML(value *yaml.Node) error { return err } raw.Props = cap + case ResourceTypeDbMySQL: + mp := MySQLProps{} + if err := unmarshalProps(&mp); err != nil { + return err + } + raw.Props = mp + case ResourceTypeDbPostgres: + pp := PostgresProps{} + if err := unmarshalProps(&pp); err != nil { + return err + } + raw.Props = pp + case ResourceTypeDbMongo: + mp := MongoDBProps{} + if err := unmarshalProps(&mp); err != nil { + return err + } + raw.Props = mp + case ResourceTypeDbCosmos: + cp := CosmosDBProps{} + if err := unmarshalProps(&cp); err != nil { + return err + } + raw.Props = cp + case ResourceTypeMessagingServiceBus: + sb := ServiceBusProps{} + if err := unmarshalProps(&sb); err != nil { + return err + } + raw.Props = sb + case ResourceTypeMessagingEventHubs: + eh := EventHubsProps{} + if err := unmarshalProps(&eh); err != nil { + return err + } + raw.Props = eh + case ResourceTypeMessagingKafka: + kp := KafkaProps{} + if err := unmarshalProps(&kp); err != nil { + return err + } + raw.Props = kp + case ResourceTypeStorage: + sp := StorageProps{} + if err := unmarshalProps(&sp); err != nil { + return err + } + raw.Props = sp } *r = ResourceConfig(raw) @@ -155,3 +262,49 @@ type AIModelPropsModel struct { Name string `yaml:"name,omitempty"` Version string `yaml:"version,omitempty"` } + +type MySQLProps struct { + DatabaseName string `yaml:"databaseName,omitempty"` + AuthType internal.AuthType `yaml:"authType,omitempty"` +} + +type PostgresProps struct { + DatabaseName string `yaml:"databaseName,omitempty"` + AuthType internal.AuthType `yaml:"authType,omitempty"` +} + +type MongoDBProps struct { + DatabaseName string `yaml:"databaseName,omitempty"` +} + +type CosmosDBProps struct { + Containers []CosmosDBContainerProps `yaml:"containers,omitempty"` + DatabaseName string `yaml:"databaseName,omitempty"` +} + +type CosmosDBContainerProps struct { + ContainerName string `yaml:"containerName,omitempty"` + PartitionKeyPaths []string `yaml:"partitionKeyPaths,omitempty"` +} + +type ServiceBusProps struct { + Queues []string `yaml:"queues,omitempty"` + IsJms bool `yaml:"isJms,omitempty"` + AuthType internal.AuthType `yaml:"authType,omitempty"` +} + +type EventHubsProps struct { + EventHubNames []string `yaml:"eventHubNames,omitempty"` + AuthType internal.AuthType `yaml:"authType,omitempty"` +} + +type KafkaProps struct { + Topics []string `yaml:"topics,omitempty"` + AuthType internal.AuthType `yaml:"authType,omitempty"` + SpringBootVersion string `yaml:"springBootVersion,omitempty"` +} + +type StorageProps struct { + Containers []string `yaml:"containers,omitempty"` + AuthType internal.AuthType `yaml:"authType,omitempty"` +} diff --git a/cli/azd/pkg/project/scaffold_gen.go b/cli/azd/pkg/project/scaffold_gen.go index 120f1c63211..b86b49e54e5 100644 --- a/cli/azd/pkg/project/scaffold_gen.go +++ b/cli/azd/pkg/project/scaffold_gen.go @@ -6,6 +6,8 @@ package project import ( "context" "fmt" + "github.com/azure/azure-dev/cli/azd/internal" + "github.com/azure/azure-dev/cli/azd/pkg/input" "io/fs" "os" "path/filepath" @@ -19,13 +21,13 @@ import ( ) // Generates the in-memory contents of an `infra` directory. -func infraFs(_ context.Context, prjConfig *ProjectConfig) (fs.FS, error) { +func infraFs(cxt context.Context, prjConfig *ProjectConfig, console input.Console) (fs.FS, error) { t, err := scaffold.Load() if err != nil { return nil, fmt.Errorf("loading scaffold templates: %w", err) } - infraSpec, err := infraSpec(prjConfig) + infraSpec, err := infraSpec(prjConfig, console, cxt) if err != nil { return nil, fmt.Errorf("generating infrastructure spec: %w", err) } @@ -41,13 +43,14 @@ func infraFs(_ context.Context, prjConfig *ProjectConfig) (fs.FS, error) { // Returns the infrastructure configuration that points to a temporary, generated `infra` directory on the filesystem. func tempInfra( ctx context.Context, - prjConfig *ProjectConfig) (*Infra, error) { + prjConfig *ProjectConfig, + console input.Console) (*Infra, error) { tmpDir, err := os.MkdirTemp("", "azd-infra") if err != nil { return nil, fmt.Errorf("creating temporary directory: %w", err) } - files, err := infraFs(ctx, prjConfig) + files, err := infraFs(ctx, prjConfig, console) if err != nil { return nil, err } @@ -89,8 +92,9 @@ func tempInfra( // Generates the filesystem of all infrastructure files to be placed, rooted at the project directory. // The content only includes `./infra` currently. -func infraFsForProject(ctx context.Context, prjConfig *ProjectConfig) (fs.FS, error) { - infraFS, err := infraFs(ctx, prjConfig) +func infraFsForProject(ctx context.Context, prjConfig *ProjectConfig, + console input.Console) (fs.FS, error) { + infraFS, err := infraFs(ctx, prjConfig, console) if err != nil { return nil, err } @@ -130,53 +134,90 @@ func infraFsForProject(ctx context.Context, prjConfig *ProjectConfig) (fs.FS, er return generatedFS, nil } -func infraSpec(projectConfig *ProjectConfig) (*scaffold.InfraSpec, error) { +func infraSpec(projectConfig *ProjectConfig, + console input.Console, ctx context.Context) (*scaffold.InfraSpec, error) { infraSpec := scaffold.InfraSpec{} - // backends -> frontends - backendMapping := map[string]string{} - - for _, res := range projectConfig.Resources { - switch res.Type { + for _, resource := range projectConfig.Resources { + switch resource.Type { case ResourceTypeDbRedis: infraSpec.DbRedis = &scaffold.DatabaseRedis{} case ResourceTypeDbMongo: infraSpec.DbCosmosMongo = &scaffold.DatabaseCosmosMongo{ - DatabaseName: res.Name, + DatabaseName: resource.Props.(MongoDBProps).DatabaseName, } case ResourceTypeDbPostgres: infraSpec.DbPostgres = &scaffold.DatabasePostgres{ - DatabaseName: res.Name, + DatabaseName: resource.Props.(PostgresProps).DatabaseName, DatabaseUser: "pgadmin", + AuthType: resource.Props.(PostgresProps).AuthType, + } + case ResourceTypeDbMySQL: + infraSpec.DbMySql = &scaffold.DatabaseMySql{ + DatabaseName: resource.Props.(MySQLProps).DatabaseName, + DatabaseUser: "mysqladmin", + AuthType: resource.Props.(MySQLProps).AuthType, + } + case ResourceTypeDbCosmos: + infraSpec.DbCosmos = &scaffold.DatabaseCosmosAccount{ + DatabaseName: resource.Props.(CosmosDBProps).DatabaseName, + } + containers := resource.Props.(CosmosDBProps).Containers + for _, container := range containers { + infraSpec.DbCosmos.Containers = append(infraSpec.DbCosmos.Containers, scaffold.CosmosSqlDatabaseContainer{ + ContainerName: container.ContainerName, + PartitionKeyPaths: container.PartitionKeyPaths, + }) + } + case ResourceTypeMessagingServiceBus: + props := resource.Props.(ServiceBusProps) + infraSpec.AzureServiceBus = &scaffold.AzureDepServiceBus{ + Queues: props.Queues, + AuthType: props.AuthType, + IsJms: props.IsJms, + } + case ResourceTypeMessagingEventHubs: + props := resource.Props.(EventHubsProps) + infraSpec.AzureEventHubs = &scaffold.AzureDepEventHubs{ + EventHubNames: props.EventHubNames, + AuthType: props.AuthType, + UseKafka: false, + } + case ResourceTypeMessagingKafka: + props := resource.Props.(KafkaProps) + infraSpec.AzureEventHubs = &scaffold.AzureDepEventHubs{ + EventHubNames: props.Topics, + AuthType: props.AuthType, + UseKafka: true, + SpringBootVersion: props.SpringBootVersion, + } + case ResourceTypeStorage: + props := resource.Props.(StorageProps) + infraSpec.AzureStorageAccount = &scaffold.AzureDepStorageAccount{ + ContainerNames: props.Containers, + AuthType: props.AuthType, } case ResourceTypeHostContainerApp: - svcSpec := scaffold.ServiceSpec{ - Name: res.Name, + serviceSpec := scaffold.ServiceSpec{ + Name: resource.Name, Port: -1, } - - err := mapContainerApp(res, &svcSpec, &infraSpec) - if err != nil { - return nil, err - } - - err = mapHostUses(res, &svcSpec, backendMapping, projectConfig) + err := handleContainerAppProps(resource, &serviceSpec, &infraSpec) if err != nil { return nil, err } - - infraSpec.Services = append(infraSpec.Services, svcSpec) + infraSpec.Services = append(infraSpec.Services, serviceSpec) case ResourceTypeOpenAiModel: - props := res.Props.(AIModelProps) + props := resource.Props.(AIModelProps) if len(props.Model.Name) == 0 { - return nil, fmt.Errorf("resources.%s.model is required", res.Name) + return nil, fmt.Errorf("resources.%s.model is required", resource.Name) } if len(props.Model.Version) == 0 { - return nil, fmt.Errorf("resources.%s.version is required", res.Name) + return nil, fmt.Errorf("resources.%s.version is required", resource.Name) } infraSpec.AIModels = append(infraSpec.AIModels, scaffold.AIModel{ - Name: res.Name, + Name: resource.Name, Model: scaffold.AIModelModel{ Name: props.Model.Name, Version: props.Model.Version, @@ -185,16 +226,14 @@ func infraSpec(projectConfig *ProjectConfig) (*scaffold.InfraSpec, error) { } } - // create reverse frontends -> backends mapping - for i := range infraSpec.Services { - svc := &infraSpec.Services[i] - if front, ok := backendMapping[svc.Name]; ok { - if svc.Backend == nil { - svc.Backend = &scaffold.Backend{} - } + err := mapUses(&infraSpec, projectConfig) + if err != nil { + return nil, err + } - svc.Backend.Frontends = append(svc.Backend.Frontends, scaffold.ServiceReference{Name: front}) - } + err = printEnvListAboutUses(&infraSpec, projectConfig, console, ctx) + if err != nil { + return nil, err } slices.SortFunc(infraSpec.Services, func(a, b scaffold.ServiceSpec) int { @@ -204,21 +243,194 @@ func infraSpec(projectConfig *ProjectConfig) (*scaffold.InfraSpec, error) { return &infraSpec, nil } -func mapContainerApp(res *ResourceConfig, svcSpec *scaffold.ServiceSpec, infraSpec *scaffold.InfraSpec) error { - props := res.Props.(ContainerAppProps) +func mapUses(infraSpec *scaffold.InfraSpec, projectConfig *ProjectConfig) error { + for i := range infraSpec.Services { + userSpec := &infraSpec.Services[i] + userResourceName := userSpec.Name + userResource, ok := projectConfig.Resources[userResourceName] + if !ok { + return fmt.Errorf("service (%s) exist, but there isn't a resource with that name", + userResourceName) + } + for _, usedResourceName := range userResource.Uses { + usedResource, ok := projectConfig.Resources[usedResourceName] + if !ok { + return fmt.Errorf("in azure.yaml, (%s) uses (%s), but (%s) doesn't", + userResourceName, usedResourceName, usedResourceName) + } + switch usedResource.Type { + case ResourceTypeDbPostgres: + userSpec.DbPostgres = infraSpec.DbPostgres + err := addUsageByEnv(infraSpec, userSpec, usedResource) + if err != nil { + return err + } + case ResourceTypeDbMySQL: + userSpec.DbMySql = infraSpec.DbMySql + err := addUsageByEnv(infraSpec, userSpec, usedResource) + if err != nil { + return err + } + case ResourceTypeDbRedis: + userSpec.DbRedis = infraSpec.DbRedis + err := addUsageByEnv(infraSpec, userSpec, usedResource) + if err != nil { + return err + } + case ResourceTypeDbMongo: + userSpec.DbCosmosMongo = infraSpec.DbCosmosMongo + err := addUsageByEnv(infraSpec, userSpec, usedResource) + if err != nil { + return err + } + case ResourceTypeDbCosmos: + userSpec.DbCosmos = infraSpec.DbCosmos + err := addUsageByEnv(infraSpec, userSpec, usedResource) + if err != nil { + return err + } + case ResourceTypeMessagingServiceBus: + userSpec.AzureServiceBus = infraSpec.AzureServiceBus + err := addUsageByEnv(infraSpec, userSpec, usedResource) + if err != nil { + return err + } + case ResourceTypeMessagingEventHubs, ResourceTypeMessagingKafka: + userSpec.AzureEventHubs = infraSpec.AzureEventHubs + err := addUsageByEnv(infraSpec, userSpec, usedResource) + if err != nil { + return err + } + case ResourceTypeStorage: + userSpec.AzureStorageAccount = infraSpec.AzureStorageAccount + err := addUsageByEnv(infraSpec, userSpec, usedResource) + if err != nil { + return err + } + case ResourceTypeOpenAiModel: + userSpec.AIModels = append(userSpec.AIModels, scaffold.AIModelReference{Name: usedResource.Name}) + err := addUsageByEnv(infraSpec, userSpec, usedResource) + if err != nil { + return err + } + case ResourceTypeHostContainerApp: + err := fulfillFrontendBackend(userSpec, usedResource, infraSpec) + if err != nil { + return err + } + default: + return fmt.Errorf("resource (%s) uses (%s), but the type of (%s) is (%s), which is unsupported", + userResource.Name, usedResource.Name, usedResource.Name, usedResource.Type) + } + } + } + return nil +} + +func getAuthType(infraSpec *scaffold.InfraSpec, resourceType ResourceType) (internal.AuthType, error) { + switch resourceType { + case ResourceTypeDbPostgres: + return infraSpec.DbPostgres.AuthType, nil + case ResourceTypeDbMySQL: + return infraSpec.DbMySql.AuthType, nil + case ResourceTypeDbRedis: + return internal.AuthTypePassword, nil + case ResourceTypeDbMongo, + ResourceTypeDbCosmos, + ResourceTypeOpenAiModel, + ResourceTypeHostContainerApp: + return internal.AuthTypeUserAssignedManagedIdentity, nil + case ResourceTypeMessagingServiceBus: + return infraSpec.AzureServiceBus.AuthType, nil + case ResourceTypeMessagingEventHubs, ResourceTypeMessagingKafka: + return infraSpec.AzureEventHubs.AuthType, nil + case ResourceTypeStorage: + return infraSpec.AzureStorageAccount.AuthType, nil + default: + return internal.AuthTypeUnspecified, fmt.Errorf("can not get authType, resource type: %s", resourceType) + } +} + +func addUsageByEnv(infraSpec *scaffold.InfraSpec, userSpec *scaffold.ServiceSpec, usedResource *ResourceConfig) error { + envs, err := getResourceConnectionEnvs(usedResource, infraSpec) + if err != nil { + return err + } + userSpec.Envs, err = mergeEnvWithDuplicationCheck(userSpec.Envs, envs) + if err != nil { + return err + } + return nil +} + +func printEnvListAboutUses(infraSpec *scaffold.InfraSpec, projectConfig *ProjectConfig, + console input.Console, ctx context.Context) error { + for i := range infraSpec.Services { + userSpec := &infraSpec.Services[i] + userResourceName := userSpec.Name + userResource, ok := projectConfig.Resources[userResourceName] + if !ok { + return fmt.Errorf("service (%s) exist, but there isn't a resource with that name", + userResourceName) + } + for _, usedResourceName := range userResource.Uses { + usedResource, ok := projectConfig.Resources[usedResourceName] + if !ok { + return fmt.Errorf("in azure.yaml, (%s) uses (%s), but (%s) doesn't", + userResourceName, usedResourceName, usedResourceName) + } + console.Message(ctx, fmt.Sprintf("\nInformation about environment variables:\n"+ + "In azure.yaml, '%s' uses '%s'. \n"+ + "The 'uses' relashipship is implemented by environment variables. \n"+ + "Please make sure your application used the right environment variable. \n"+ + "Here is the list of environment variables: ", + userResourceName, usedResourceName)) + switch usedResource.Type { + case ResourceTypeDbPostgres, // do nothing. todo: add all other types + ResourceTypeDbMySQL, + ResourceTypeDbRedis, + ResourceTypeDbMongo, + ResourceTypeDbCosmos, + ResourceTypeMessagingServiceBus, + ResourceTypeMessagingEventHubs, + ResourceTypeMessagingKafka, + ResourceTypeStorage: + variables, err := getResourceConnectionEnvs(usedResource, infraSpec) + if err != nil { + return err + } + for _, variable := range variables { + console.Message(ctx, fmt.Sprintf(" %s=xxx", variable.Name)) + } + case ResourceTypeHostContainerApp: + printHintsAboutUseHostContainerApp(userResourceName, usedResourceName, console, ctx) + default: + return fmt.Errorf("resource (%s) uses (%s), but the type of (%s) is (%s), "+ + "which is doen't add necessary environment variable", + userResource.Name, usedResource.Name, usedResource.Name, usedResource.Type) + } + console.Message(ctx, "\n") + } + } + return nil +} + +func handleContainerAppProps( + resourceConfig *ResourceConfig, serviceSpec *scaffold.ServiceSpec, infraSpec *scaffold.InfraSpec) error { + props := resourceConfig.Props.(ContainerAppProps) for _, envVar := range props.Env { if len(envVar.Value) == 0 && len(envVar.Secret) == 0 { return fmt.Errorf( "environment variable %s for host %s is invalid: both value and secret are empty", envVar.Name, - res.Name) + resourceConfig.Name) } if len(envVar.Value) > 0 && len(envVar.Secret) > 0 { return fmt.Errorf( "environment variable %s for host %s is invalid: both value and secret are set", envVar.Name, - res.Name) + resourceConfig.Name) } isSecret := len(envVar.Secret) > 0 @@ -233,49 +445,19 @@ func mapContainerApp(res *ResourceConfig, svcSpec *scaffold.ServiceSpec, infraSp // Here, DB_HOST is not a secret, but DB_SECRET is. And yet, DB_HOST will be marked as a secret. // This is a limitation of the current implementation, but it's safer to mark both as secrets above. evaluatedValue := genBicepParamsFromEnvSubst(value, isSecret, infraSpec) - svcSpec.Env[envVar.Name] = evaluatedValue + err := addNewEnvironmentVariable(serviceSpec, envVar.Name, evaluatedValue) + if err != nil { + return err + } + return nil } port := props.Port if port < 1 || port > 65535 { - return fmt.Errorf("port value %d for host %s must be between 1 and 65535", port, res.Name) - } - - svcSpec.Port = port - return nil -} - -func mapHostUses( - res *ResourceConfig, - svcSpec *scaffold.ServiceSpec, - backendMapping map[string]string, - prj *ProjectConfig) error { - for _, use := range res.Uses { - useRes, ok := prj.Resources[use] - if !ok { - return fmt.Errorf("resource %s uses %s, which does not exist", res.Name, use) - } - - switch useRes.Type { - case ResourceTypeDbMongo: - svcSpec.DbCosmosMongo = &scaffold.DatabaseReference{DatabaseName: useRes.Name} - case ResourceTypeDbPostgres: - svcSpec.DbPostgres = &scaffold.DatabaseReference{DatabaseName: useRes.Name} - case ResourceTypeDbRedis: - svcSpec.DbRedis = &scaffold.DatabaseReference{DatabaseName: useRes.Name} - case ResourceTypeHostContainerApp: - if svcSpec.Frontend == nil { - svcSpec.Frontend = &scaffold.Frontend{} - } - - svcSpec.Frontend.Backends = append(svcSpec.Frontend.Backends, - scaffold.ServiceReference{Name: use}) - backendMapping[use] = res.Name // record the backend -> frontend mapping - case ResourceTypeOpenAiModel: - svcSpec.AIModels = append(svcSpec.AIModels, scaffold.AIModelReference{Name: use}) - } + return fmt.Errorf("port value %d for host %s must be between 1 and 65535", port, resourceConfig.Name) } + serviceSpec.Port = port return nil } @@ -311,6 +493,7 @@ func setParameter(spec *scaffold.InfraSpec, name string, value string, isSecret // // If the string is a literal, it is returned as is. // If isSecret is true, the parameter is marked as a secret. +// The returned value is string, all expression inside are wrapped by "${}". func genBicepParamsFromEnvSubst( s string, isSecret bool, @@ -325,16 +508,16 @@ func genBicepParamsFromEnvSubst( var result string if len(names) == 0 { - // literal string with no expressions, quote the value as a Bicep string - result = "'" + s + "'" + // literal string with no expressions + result = s } else if len(names) == 1 { // single expression, return the bicep parameter name to reference the expression - result = scaffold.BicepName(names[0]) + result = "${" + scaffold.BicepName(names[0]) + "}" } else { // multiple expressions // construct the string with all expressions replaced by parameter references as a Bicep interpolated string previous := 0 - result = "'" + result = "" for i, loc := range locations { // replace each expression with references by variable name result += s[previous:loc.start] @@ -343,8 +526,47 @@ func genBicepParamsFromEnvSubst( result += "}" previous = loc.stop + 1 } - result += "'" } return result } + +func fulfillFrontendBackend( + userSpec *scaffold.ServiceSpec, usedResource *ResourceConfig, infraSpec *scaffold.InfraSpec) error { + if userSpec.Frontend == nil { + userSpec.Frontend = &scaffold.Frontend{} + } + userSpec.Frontend.Backends = + append(userSpec.Frontend.Backends, scaffold.ServiceReference{Name: usedResource.Name}) + + usedSpec := getServiceSpecByName(infraSpec, usedResource.Name) + if usedSpec == nil { + return fmt.Errorf("'%s' uses '%s', but %s doesn't exist", userSpec.Name, usedResource.Name, usedResource.Name) + } + if usedSpec.Backend == nil { + usedSpec.Backend = &scaffold.Backend{} + } + usedSpec.Backend.Frontends = + append(usedSpec.Backend.Frontends, scaffold.ServiceReference{Name: userSpec.Name}) + return nil +} + +func getServiceSpecByName(infraSpec *scaffold.InfraSpec, name string) *scaffold.ServiceSpec { + for i := range infraSpec.Services { + if infraSpec.Services[i].Name == name { + return &infraSpec.Services[i] + } + } + return nil +} + +func printHintsAboutUseHostContainerApp(userResourceName string, usedResourceName string, + console input.Console, ctx context.Context) { + if console == nil { + return + } + console.Message(ctx, fmt.Sprintf("Environemnt variables in %s:", userResourceName)) + console.Message(ctx, fmt.Sprintf("%s_BASE_URL=xxx", strings.ToUpper(usedResourceName))) + console.Message(ctx, fmt.Sprintf("Environemnt variables in %s:", usedResourceName)) + console.Message(ctx, fmt.Sprintf("%s_BASE_URL=xxx", strings.ToUpper(userResourceName))) +} diff --git a/cli/azd/pkg/project/scaffold_gen_environment_variables.go b/cli/azd/pkg/project/scaffold_gen_environment_variables.go new file mode 100644 index 00000000000..f02ea084f6c --- /dev/null +++ b/cli/azd/pkg/project/scaffold_gen_environment_variables.go @@ -0,0 +1,552 @@ +package project + +import ( + "fmt" + "github.com/azure/azure-dev/cli/azd/internal" + "github.com/azure/azure-dev/cli/azd/internal/scaffold" + "strings" +) + +func getResourceConnectionEnvs(usedResource *ResourceConfig, + infraSpec *scaffold.InfraSpec) ([]scaffold.Env, error) { + resourceType := usedResource.Type + authType, err := getAuthType(infraSpec, usedResource.Type) + if err != nil { + return []scaffold.Env{}, err + } + switch resourceType { + case ResourceTypeDbPostgres: + switch authType { + case internal.AuthTypePassword: + return []scaffold.Env{ + { + Name: "POSTGRES_USERNAME", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbPostgres, scaffold.ResourceInfoTypeUsername), + }, + { + Name: "POSTGRES_PASSWORD", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbPostgres, scaffold.ResourceInfoTypePassword), + }, + { + Name: "POSTGRES_HOST", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbPostgres, scaffold.ResourceInfoTypeHost), + }, + { + Name: "POSTGRES_DATABASE", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbPostgres, scaffold.ResourceInfoTypeDatabaseName), + }, + { + Name: "POSTGRES_PORT", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbPostgres, scaffold.ResourceInfoTypePort), + }, + { + Name: "POSTGRES_URL", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbPostgres, scaffold.ResourceInfoTypeUrl), + }, + { + Name: "spring.datasource.url", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbPostgres, scaffold.ResourceInfoTypeJdbcUrl), + }, + { + Name: "spring.datasource.username", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbPostgres, scaffold.ResourceInfoTypeUsername), + }, + { + Name: "spring.datasource.password", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbPostgres, scaffold.ResourceInfoTypePassword), + }, + }, nil + case internal.AuthTypeUserAssignedManagedIdentity: + return []scaffold.Env{ + { + Name: "POSTGRES_USERNAME", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbPostgres, scaffold.ResourceInfoTypeUsername), + }, + { + Name: "POSTGRES_HOST", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbPostgres, scaffold.ResourceInfoTypeHost), + }, + { + Name: "POSTGRES_DATABASE", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbPostgres, scaffold.ResourceInfoTypeDatabaseName), + }, + { + Name: "POSTGRES_PORT", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbPostgres, scaffold.ResourceInfoTypePort), + }, + { + Name: "spring.datasource.url", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbPostgres, scaffold.ResourceInfoTypeJdbcUrl), + }, + { + Name: "spring.datasource.username", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbPostgres, scaffold.ResourceInfoTypeUsername), + }, + { + Name: "spring.datasource.azure.passwordless-enabled", + Value: "true", + }, + }, nil + default: + return []scaffold.Env{}, unsupportedAuthTypeError(resourceType, authType) + } + case ResourceTypeDbMySQL: + switch authType { + case internal.AuthTypePassword: + return []scaffold.Env{ + { + Name: "MYSQL_USERNAME", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbMySQL, scaffold.ResourceInfoTypeUsername), + }, + { + Name: "MYSQL_PASSWORD", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbMySQL, scaffold.ResourceInfoTypePassword), + }, + { + Name: "MYSQL_HOST", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbMySQL, scaffold.ResourceInfoTypeHost), + }, + { + Name: "MYSQL_DATABASE", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbMySQL, scaffold.ResourceInfoTypeDatabaseName), + }, + { + Name: "MYSQL_PORT", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbMySQL, scaffold.ResourceInfoTypePort), + }, + { + Name: "MYSQL_URL", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbMySQL, scaffold.ResourceInfoTypeUrl), + }, + { + Name: "spring.datasource.url", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbMySQL, scaffold.ResourceInfoTypeJdbcUrl), + }, + { + Name: "spring.datasource.username", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbMySQL, scaffold.ResourceInfoTypeUsername), + }, + { + Name: "spring.datasource.password", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbMySQL, scaffold.ResourceInfoTypePassword), + }, + }, nil + case internal.AuthTypeUserAssignedManagedIdentity: + return []scaffold.Env{ + { + Name: "MYSQL_USERNAME", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbMySQL, scaffold.ResourceInfoTypeUsername), + }, + { + Name: "MYSQL_HOST", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbMySQL, scaffold.ResourceInfoTypeHost), + }, + { + Name: "MYSQL_PORT", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbMySQL, scaffold.ResourceInfoTypePort), + }, + { + Name: "MYSQL_DATABASE", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbMySQL, scaffold.ResourceInfoTypeDatabaseName), + }, + { + Name: "spring.datasource.url", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbMySQL, scaffold.ResourceInfoTypeJdbcUrl), + }, + { + Name: "spring.datasource.username", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbMySQL, scaffold.ResourceInfoTypeUsername), + }, + { + Name: "spring.datasource.azure.passwordless-enabled", + Value: "true", + }, + }, nil + default: + return []scaffold.Env{}, unsupportedAuthTypeError(resourceType, authType) + } + case ResourceTypeDbRedis: + switch authType { + case internal.AuthTypePassword: + return []scaffold.Env{ + { + Name: "REDIS_HOST", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbRedis, scaffold.ResourceInfoTypeHost), + }, + { + Name: "REDIS_PORT", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbRedis, scaffold.ResourceInfoTypePort), + }, + { + Name: "REDIS_ENDPOINT", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbRedis, scaffold.ResourceInfoTypeEndpoint), + }, + { + Name: "REDIS_URL", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbRedis, scaffold.ResourceInfoTypeUrl), + }, + { + Name: "REDIS_PASSWORD", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbRedis, scaffold.ResourceInfoTypePassword), + }, + { + Name: "spring.data.redis.url", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbRedis, scaffold.ResourceInfoTypeUrl), + }, + }, nil + default: + return []scaffold.Env{}, unsupportedAuthTypeError(resourceType, authType) + } + case ResourceTypeDbMongo: + switch authType { + case internal.AuthTypeUserAssignedManagedIdentity: + return []scaffold.Env{ + { + Name: "MONGODB_URL", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbMongo, scaffold.ResourceInfoTypeUrl), + }, + { + Name: "spring.data.mongodb.uri", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbMongo, scaffold.ResourceInfoTypeUrl), + }, + { + Name: "spring.data.mongodb.database", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbMongo, scaffold.ResourceInfoTypeDatabaseName), + }, + }, nil + default: + return []scaffold.Env{}, unsupportedAuthTypeError(resourceType, authType) + } + case ResourceTypeDbCosmos: + switch authType { + case internal.AuthTypeUserAssignedManagedIdentity: + return []scaffold.Env{ + { + Name: "spring.cloud.azure.cosmos.endpoint", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbCosmos, scaffold.ResourceInfoTypeEndpoint), + }, + { + Name: "spring.cloud.azure.cosmos.database", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeDbCosmos, scaffold.ResourceInfoTypeDatabaseName), + }, + }, nil + default: + return []scaffold.Env{}, unsupportedAuthTypeError(resourceType, authType) + } + case ResourceTypeMessagingServiceBus: + if infraSpec.AzureServiceBus.IsJms { + switch authType { + case internal.AuthTypeUserAssignedManagedIdentity: + return []scaffold.Env{ + { + Name: "spring.jms.servicebus.pricing-tier", + Value: "premium", + }, + { + Name: "spring.jms.servicebus.passwordless-enabled", + Value: "true", + }, + { + Name: "spring.jms.servicebus.credential.managed-identity-enabled", + Value: "true", + }, + { + Name: "spring.jms.servicebus.credential.client-id", + Value: scaffold.PlaceHolderForServiceIdentityClientId(), + }, + { + Name: "spring.jms.servicebus.namespace", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeMessagingServiceBus, scaffold.ResourceInfoTypeNamespace), + }, + { + Name: "spring.jms.servicebus.connection-string", + Value: "", + }, + }, nil + case internal.AuthTypeConnectionString: + return []scaffold.Env{ + { + Name: "spring.jms.servicebus.pricing-tier", + Value: "premium", + }, + { + Name: "spring.jms.servicebus.connection-string", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeMessagingServiceBus, scaffold.ResourceInfoTypeConnectionString), + }, + { + Name: "spring.jms.servicebus.passwordless-enabled", + Value: "false", + }, + { + Name: "spring.jms.servicebus.credential.managed-identity-enabled", + Value: "false", + }, + { + Name: "spring.jms.servicebus.credential.client-id", + Value: "", + }, + { + Name: "spring.jms.servicebus.namespace", + Value: "", + }, + }, nil + default: + return []scaffold.Env{}, unsupportedResourceTypeError(resourceType) + } + } else { + // service bus, not jms + switch authType { + case internal.AuthTypeUserAssignedManagedIdentity: + return []scaffold.Env{ + // Not add this: spring.cloud.azure.servicebus.connection-string = "" + // because of this: https://github.com/Azure/azure-sdk-for-java/issues/42880 + { + Name: "spring.cloud.azure.servicebus.credential.managed-identity-enabled", + Value: "true", + }, + { + Name: "spring.cloud.azure.servicebus.credential.client-id", + Value: scaffold.PlaceHolderForServiceIdentityClientId(), + }, + { + Name: "spring.cloud.azure.servicebus.namespace", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeMessagingServiceBus, scaffold.ResourceInfoTypeNamespace), + }, + }, nil + case internal.AuthTypeConnectionString: + return []scaffold.Env{ + { + Name: "spring.cloud.azure.servicebus.namespace", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeMessagingServiceBus, scaffold.ResourceInfoTypeNamespace), + }, + { + Name: "spring.cloud.azure.servicebus.connection-string", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeMessagingServiceBus, scaffold.ResourceInfoTypeConnectionString), + }, + { + Name: "spring.cloud.azure.servicebus.credential.managed-identity-enabled", + Value: "false", + }, + { + Name: "spring.cloud.azure.servicebus.credential.client-id", + Value: "", + }, + }, nil + default: + return []scaffold.Env{}, unsupportedResourceTypeError(resourceType) + } + } + case ResourceTypeMessagingKafka: + // event hubs for kafka + var springBootVersionDecidedInformation []scaffold.Env + if strings.HasPrefix(infraSpec.AzureEventHubs.SpringBootVersion, "2.") { + springBootVersionDecidedInformation = []scaffold.Env{ + { + Name: "spring.cloud.stream.binders.kafka.environment.spring.main.sources", + Value: "com.azure.spring.cloud.autoconfigure.eventhubs.kafka.AzureEventHubsKafkaAutoConfiguration", + }, + } + } else { + springBootVersionDecidedInformation = []scaffold.Env{ + { + Name: "spring.cloud.stream.binders.kafka.environment.spring.main.sources", + Value: "com.azure.spring.cloud.autoconfigure.implementation.eventhubs.kafka.AzureEventHubsKafkaAutoConfiguration", + }, + } + } + var commonInformation []scaffold.Env + switch authType { + case internal.AuthTypeUserAssignedManagedIdentity: + commonInformation = []scaffold.Env{ + // Not add this: spring.cloud.azure.eventhubs.connection-string = "" + // because of this: https://github.com/Azure/azure-sdk-for-java/issues/42880 + { + Name: "spring.cloud.stream.kafka.binder.brokers", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeMessagingKafka, scaffold.ResourceInfoTypeEndpoint), + }, + { + Name: "spring.cloud.azure.eventhubs.credential.managed-identity-enabled", + Value: "true", + }, + { + Name: "spring.cloud.azure.eventhubs.credential.client-id", + Value: scaffold.PlaceHolderForServiceIdentityClientId(), + }, + } + case internal.AuthTypeConnectionString: + commonInformation = []scaffold.Env{ + { + Name: "spring.cloud.stream.kafka.binder.brokers", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeMessagingKafka, scaffold.ResourceInfoTypeEndpoint), + }, + { + Name: "spring.cloud.azure.eventhubs.connection-string", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeMessagingKafka, scaffold.ResourceInfoTypeConnectionString), + }, + { + Name: "spring.cloud.azure.eventhubs.credential.managed-identity-enabled", + Value: "false", + }, + { + Name: "spring.cloud.azure.eventhubs.credential.client-id", + Value: "", + }, + } + default: + return []scaffold.Env{}, unsupportedAuthTypeError(resourceType, authType) + } + return mergeEnvWithDuplicationCheck(springBootVersionDecidedInformation, commonInformation) + case ResourceTypeMessagingEventHubs: + switch authType { + case internal.AuthTypeUserAssignedManagedIdentity: + return []scaffold.Env{ + // Not add this: spring.cloud.azure.eventhubs.connection-string = "" + // because of this: https://github.com/Azure/azure-sdk-for-java/issues/42880 + { + Name: "spring.cloud.azure.eventhubs.credential.managed-identity-enabled", + Value: "true", + }, + { + Name: "spring.cloud.azure.eventhubs.credential.client-id", + Value: scaffold.PlaceHolderForServiceIdentityClientId(), + }, + { + Name: "spring.cloud.azure.eventhubs.namespace", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeMessagingEventHubs, scaffold.ResourceInfoTypeNamespace), + }, + }, nil + case internal.AuthTypeConnectionString: + return []scaffold.Env{ + { + Name: "spring.cloud.azure.eventhubs.namespace", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeMessagingEventHubs, scaffold.ResourceInfoTypeNamespace), + }, + { + Name: "spring.cloud.azure.eventhubs.connection-string", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeMessagingEventHubs, scaffold.ResourceInfoTypeConnectionString), + }, + { + Name: "spring.cloud.azure.eventhubs.credential.managed-identity-enabled", + Value: "false", + }, + { + Name: "spring.cloud.azure.eventhubs.credential.client-id", + Value: "", + }, + }, nil + default: + return []scaffold.Env{}, unsupportedResourceTypeError(resourceType) + } + case ResourceTypeStorage: + switch authType { + case internal.AuthTypeUserAssignedManagedIdentity: + return []scaffold.Env{ + { + Name: "spring.cloud.azure.eventhubs.processor.checkpoint-store.account-name", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeStorage, scaffold.ResourceInfoTypeAccountName), + }, + { + Name: "spring.cloud.azure.eventhubs.processor.checkpoint-store.credential.managed-identity-enabled", + Value: "true", + }, + { + Name: "spring.cloud.azure.eventhubs.processor.checkpoint-store.credential.client-id", + Value: scaffold.PlaceHolderForServiceIdentityClientId(), + }, + { + Name: "spring.cloud.azure.eventhubs.processor.checkpoint-store.connection-string", + Value: "", + }, + }, nil + case internal.AuthTypeConnectionString: + return []scaffold.Env{ + { + Name: "spring.cloud.azure.eventhubs.processor.checkpoint-store.account-name", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeStorage, scaffold.ResourceInfoTypeAccountName), + }, + { + Name: "spring.cloud.azure.eventhubs.processor.checkpoint-store.connection-string", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeStorage, scaffold.ResourceInfoTypeConnectionString), + }, + { + Name: "spring.cloud.azure.eventhubs.processor.checkpoint-store.credential.managed-identity-enabled", + Value: "false", + }, + { + Name: "spring.cloud.azure.eventhubs.processor.checkpoint-store.credential.client-id", + Value: "", + }, + }, nil + default: + return []scaffold.Env{}, unsupportedResourceTypeError(resourceType) + } + case ResourceTypeOpenAiModel: + switch authType { + case internal.AuthTypeUserAssignedManagedIdentity: + return []scaffold.Env{ + { + Name: "AZURE_OPENAI_ENDPOINT", + Value: scaffold.ToResourceConnectionEnv(scaffold.ResourceTypeOpenAiModel, scaffold.ResourceInfoTypeEndpoint), + }, + }, nil + default: + return []scaffold.Env{}, unsupportedResourceTypeError(resourceType) + } + case ResourceTypeHostContainerApp: // todo improve this and delete Frontend and Backend in scaffold.ServiceSpec + switch authType { + case internal.AuthTypeUserAssignedManagedIdentity: + return []scaffold.Env{}, nil + default: + return []scaffold.Env{}, unsupportedAuthTypeError(resourceType, authType) + } + default: + return []scaffold.Env{}, unsupportedResourceTypeError(resourceType) + } +} + +func unsupportedResourceTypeError(resourceType ResourceType) error { + return fmt.Errorf("unsupported resource type, resourceType = %s", resourceType) +} + +func unsupportedAuthTypeError(resourceType ResourceType, authType internal.AuthType) error { + return fmt.Errorf("unsupported auth type, resourceType = %s, authType = %s", resourceType, authType) +} + +func mergeEnvWithDuplicationCheck(a []scaffold.Env, + b []scaffold.Env) ([]scaffold.Env, error) { + ab := append(a, b...) + var result []scaffold.Env + seenName := make(map[string]scaffold.Env) + for _, value := range ab { + if existingValue, exist := seenName[value.Name]; exist { + if value != existingValue { + return []scaffold.Env{}, duplicatedEnvError(existingValue, value) + } + } else { + seenName[value.Name] = value + result = append(result, value) + } + } + return result, nil +} + +func addNewEnvironmentVariable(serviceSpec *scaffold.ServiceSpec, name string, value string) error { + merged, err := mergeEnvWithDuplicationCheck(serviceSpec.Envs, + []scaffold.Env{ + { + Name: name, + Value: value, + }, + }, + ) + if err != nil { + return err + } + serviceSpec.Envs = merged + return nil +} + +func duplicatedEnvError(existingValue scaffold.Env, newValue scaffold.Env) error { + return fmt.Errorf("duplicated environment variable. existingValue = %s, newValue = %s", + existingValue, newValue) +} diff --git a/cli/azd/pkg/project/scaffold_gen_environment_variables_test.go b/cli/azd/pkg/project/scaffold_gen_environment_variables_test.go new file mode 100644 index 00000000000..6bd79a94c44 --- /dev/null +++ b/cli/azd/pkg/project/scaffold_gen_environment_variables_test.go @@ -0,0 +1,92 @@ +package project + +import ( + "fmt" + "github.com/azure/azure-dev/cli/azd/internal/scaffold" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestMergeEnvWithDuplicationCheck(t *testing.T) { + var empty []scaffold.Env + name1Value1 := []scaffold.Env{ + { + Name: "name1", + Value: "value1", + }, + } + name1Value2 := []scaffold.Env{ + { + Name: "name1", + Value: "value2", + }, + } + name2Value2 := []scaffold.Env{ + { + Name: "name2", + Value: "value2", + }, + } + name1Value1Name2Value2 := []scaffold.Env{ + { + Name: "name1", + Value: "value1", + }, + { + Name: "name2", + Value: "value2", + }, + } + + tests := []struct { + name string + a []scaffold.Env + b []scaffold.Env + wantEnv []scaffold.Env + wantError error + }{ + { + name: "2 empty array", + a: empty, + b: empty, + wantEnv: empty, + wantError: nil, + }, + { + name: "one is empty, another is not", + a: empty, + b: name1Value1, + wantEnv: name1Value1, + wantError: nil, + }, + { + name: "no duplication", + a: name1Value1, + b: name2Value2, + wantEnv: name1Value1Name2Value2, + wantError: nil, + }, + { + name: "duplicated name but same value", + a: name1Value1, + b: name1Value1, + wantEnv: name1Value1, + wantError: nil, + }, + { + name: "duplicated name, different value", + a: name1Value1, + b: name1Value2, + wantEnv: []scaffold.Env{}, + wantError: fmt.Errorf("duplicated environment variable. existingValue = %s, newValue = %s", + name1Value1[0], name1Value2[0]), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + env, err := mergeEnvWithDuplicationCheck(tt.a, tt.b) + assert.Equal(t, tt.wantEnv, env) + assert.Equal(t, tt.wantError, err) + }) + } +} diff --git a/cli/azd/pkg/project/scaffold_gen_test.go b/cli/azd/pkg/project/scaffold_gen_test.go index 85cf4125075..a3c11a38119 100644 --- a/cli/azd/pkg/project/scaffold_gen_test.go +++ b/cli/azd/pkg/project/scaffold_gen_test.go @@ -18,23 +18,23 @@ func Test_genBicepParamsFromEnvSubst(t *testing.T) { want string wantParams []scaffold.Parameter }{ - {"foo", false, "'foo'", nil}, - {"${MY_VAR}", false, "myVar", []scaffold.Parameter{{Name: "myVar", Value: "${MY_VAR}", Type: "string"}}}, + {"foo", false, "foo", nil}, + {"${MY_VAR}", false, "${myVar}", []scaffold.Parameter{{Name: "myVar", Value: "${MY_VAR}", Type: "string"}}}, - {"${MY_SECRET}", true, "mySecret", + {"${MY_SECRET}", true, "${mySecret}", []scaffold.Parameter{ {Name: "mySecret", Value: "${MY_SECRET}", Type: "string", Secret: true}}}, - {"Hello, ${world:=okay}!", false, "world", + {"Hello, ${world:=okay}!", false, "${world}", []scaffold.Parameter{ {Name: "world", Value: "${world:=okay}", Type: "string"}}}, - {"${CAT} and ${DOG}", false, "'${cat} and ${dog}'", + {"${CAT} and ${DOG}", false, "${cat} and ${dog}", []scaffold.Parameter{ {Name: "cat", Value: "${CAT}", Type: "string"}, {Name: "dog", Value: "${DOG}", Type: "string"}}}, - {"${DB_HOST:='local'}:${DB_USERNAME:='okay'}", true, "'${dbHost}:${dbUsername}'", + {"${DB_HOST:='local'}:${DB_USERNAME:='okay'}", true, "${dbHost}:${dbUsername}", []scaffold.Parameter{ {Name: "dbHost", Value: "${DB_HOST:='local'}", Type: "string", Secret: true}, {Name: "dbUsername", Value: "${DB_USERNAME:='okay'}", Type: "string", Secret: true}}}, diff --git a/cli/azd/resources/scaffold/base/abbreviations.json b/cli/azd/resources/scaffold/base/abbreviations.json index dc62141f9da..4d4a4c62d6c 100644 --- a/cli/azd/resources/scaffold/base/abbreviations.json +++ b/cli/azd/resources/scaffold/base/abbreviations.json @@ -33,6 +33,7 @@ "dataMigrationServices": "dms-", "dBforMySQLServers": "mysql-", "dBforPostgreSQLServers": "psql-", + "deploymentScript": "dc-", "devicesIotHubs": "iot-", "devicesProvisioningServices": "provs-", "devicesProvisioningServicesCertificates": "pcert-", diff --git a/cli/azd/resources/scaffold/base/modules/set-event-hubs-namespace-connection-string.bicep b/cli/azd/resources/scaffold/base/modules/set-event-hubs-namespace-connection-string.bicep new file mode 100644 index 00000000000..64245640096 --- /dev/null +++ b/cli/azd/resources/scaffold/base/modules/set-event-hubs-namespace-connection-string.bicep @@ -0,0 +1,21 @@ +param eventHubsNamespaceName string +param connectionStringSecretName string +param keyVaultName string + +resource eventHubsNamespace 'Microsoft.EventHub/namespaces@2024-01-01' existing = { + name: eventHubsNamespaceName +} + +resource keyVault 'Microsoft.KeyVault/vaults@2022-07-01' existing = { + name: keyVaultName +} + +resource connectionStringSecret 'Microsoft.KeyVault/vaults/secrets@2022-07-01' = { + name: connectionStringSecretName + parent: keyVault + properties: { + value: listKeys(concat(resourceId('Microsoft.EventHub/namespaces', eventHubsNamespaceName), '/AuthorizationRules/RootManageSharedAccessKey'), eventHubsNamespace.apiVersion).primaryConnectionString + } +} + +output keyVaultUrl string = 'https://${keyVaultName}${environment().suffixes.keyvaultDns}/secrets/${connectionStringSecretName}' diff --git a/cli/azd/resources/scaffold/base/modules/set-redis-conn.bicep b/cli/azd/resources/scaffold/base/modules/set-redis-conn.bicep index 813f96fbcbf..fbe41132a20 100644 --- a/cli/azd/resources/scaffold/base/modules/set-redis-conn.bicep +++ b/cli/azd/resources/scaffold/base/modules/set-redis-conn.bicep @@ -27,3 +27,6 @@ resource urlSecret 'Microsoft.KeyVault/vaults/secrets@2022-07-01' = { } } +output keyVaultUrlForPass string = 'https://${keyVaultName}${environment().suffixes.keyvaultDns}/secrets/${passwordSecretName}' +output keyVaultUrlForUrl string = 'https://${keyVaultName}${environment().suffixes.keyvaultDns}/secrets/${urlSecretName}' + diff --git a/cli/azd/resources/scaffold/base/modules/set-servicebus-namespace-connection-string.bicep b/cli/azd/resources/scaffold/base/modules/set-servicebus-namespace-connection-string.bicep new file mode 100644 index 00000000000..b58a707370d --- /dev/null +++ b/cli/azd/resources/scaffold/base/modules/set-servicebus-namespace-connection-string.bicep @@ -0,0 +1,21 @@ +param serviceBusNamespaceName string +param connectionStringSecretName string +param keyVaultName string + +resource serviceBusNamespace 'Microsoft.ServiceBus/namespaces@2022-10-01-preview' existing = { + name: serviceBusNamespaceName +} + +resource keyVault 'Microsoft.KeyVault/vaults@2022-07-01' existing = { + name: keyVaultName +} + +resource serviceBusConnectionStringSecret 'Microsoft.KeyVault/vaults/secrets@2022-07-01' = { + name: connectionStringSecretName + parent: keyVault + properties: { + value: listKeys(concat(resourceId('Microsoft.ServiceBus/namespaces', serviceBusNamespaceName), '/AuthorizationRules/RootManageSharedAccessKey'), serviceBusNamespace.apiVersion).primaryConnectionString + } +} + +output keyVaultUrl string = 'https://${keyVaultName}${environment().suffixes.keyvaultDns}/secrets/${connectionStringSecretName}' diff --git a/cli/azd/resources/scaffold/base/modules/set-storage-account-connection-string.bicep b/cli/azd/resources/scaffold/base/modules/set-storage-account-connection-string.bicep new file mode 100644 index 00000000000..6e0a7da7912 --- /dev/null +++ b/cli/azd/resources/scaffold/base/modules/set-storage-account-connection-string.bicep @@ -0,0 +1,21 @@ +param storageAccountName string +param connectionStringSecretName string +param keyVaultName string + +resource storageAccount 'Microsoft.Storage/storageAccounts@2023-05-01' existing = { + name: storageAccountName +} + +resource keyVault 'Microsoft.KeyVault/vaults@2022-07-01' existing = { + name: keyVaultName +} + +resource connectionStringSecret 'Microsoft.KeyVault/vaults/secrets@2022-07-01' = { + name: connectionStringSecretName + parent: keyVault + properties: { + value: 'DefaultEndpointsProtocol=https;AccountName=${storageAccount.name};AccountKey=${storageAccount.listKeys().keys[0].value};EndpointSuffix=${environment().suffixes.storage}' + } +} + +output keyVaultUrl string = 'https://${keyVaultName}${environment().suffixes.keyvaultDns}/secrets/${connectionStringSecretName}' diff --git a/cli/azd/resources/scaffold/templates/main.bicept b/cli/azd/resources/scaffold/templates/main.bicept index 8ec2feb9f17..c46fa0355b1 100644 --- a/cli/azd/resources/scaffold/templates/main.bicept +++ b/cli/azd/resources/scaffold/templates/main.bicept @@ -67,4 +67,10 @@ output AZURE_RESOURCE_REDIS_ID string = resources.outputs.AZURE_RESOURCE_REDIS_I {{- if .DbPostgres}} output AZURE_RESOURCE_{{alphaSnakeUpper .DbPostgres.DatabaseName}}_ID string = resources.outputs.AZURE_RESOURCE_{{alphaSnakeUpper .DbPostgres.DatabaseName}}_ID {{- end}} +{{- if .DbMySql}} +output AZURE_MYSQL_FLEXIBLE_SERVER_ID string = resources.outputs.AZURE_MYSQL_FLEXIBLE_SERVER_ID +{{- end}} +{{- if .AzureEventHubs }} +output AZURE_EVENT_HUBS_ID string = resources.outputs.AZURE_EVENT_HUBS_ID +{{- end}} {{ end}} diff --git a/cli/azd/resources/scaffold/templates/next-steps.mdt b/cli/azd/resources/scaffold/templates/next-steps.mdt index 7fe72dec118..932f0c80f20 100644 --- a/cli/azd/resources/scaffold/templates/next-steps.mdt +++ b/cli/azd/resources/scaffold/templates/next-steps.mdt @@ -21,7 +21,7 @@ To troubleshoot any issues, see [troubleshooting](#troubleshooting). Configure environment variables for running services by updating `settings` in [main.parameters.json](./infra/main.parameters.json). {{- range .Services}} -{{- if or .DbPostgres .DbCosmosMongo .DbRedis }} +{{- if or .DbPostgres .DbMySql .DbCosmosMongo .DbRedis }} #### Database connections for `{{.Name}}` @@ -32,6 +32,9 @@ They allow connection to the database instances, and can be modified or adapted - `POSTGRES_URL` - The URL of the Azure Postgres Flexible Server database instance. Individual components are also available as: `POSTGRES_HOST`, `POSTGRES_PORT`, `POSTGRES_DATABASE`, `POSTGRES_USERNAME`, `POSTGRES_PASSWORD`. {{- end}} +{{- if .DbMySql }} +- `MYSQL_*` environment variables are configured in [{{.Name}}.bicep](./infra/app/{{.Name}}.bicep) to connect to the Mysql database. Modify these variables to match your application's needs. +{{- end}} {{- if .DbCosmosMongo }} - `MONGODB_URL` - The URL of the Azure Cosmos DB (MongoDB) instance. {{- end}} @@ -71,6 +74,9 @@ This includes: {{- if .DbPostgres}} - Azure Postgres Flexible Server to host the '{{.DbPostgres.DatabaseName}}' database. {{- end}} +{{- if .DbMySql}} +- [app/db-mysql.bicep](./infra/app/db-mysql.bicep) - Azure MySQL Flexible Server to host the '{{.DbMySql.DatabaseName}}' database. +{{- end}} {{- if .DbCosmosMongo}} - Azure Cosmos DB (MongoDB) to host the '{{.DbCosmosMongo.DatabaseName}}' database. {{- end}} diff --git a/cli/azd/resources/scaffold/templates/resources.bicept b/cli/azd/resources/scaffold/templates/resources.bicept index 26180abdc28..d011af5fa22 100644 --- a/cli/azd/resources/scaffold/templates/resources.bicept +++ b/cli/azd/resources/scaffold/templates/resources.bicept @@ -61,11 +61,21 @@ module containerAppsEnvironment 'br/public:avm/res/app/managed-environment:0.4.5 name: '${abbrs.appManagedEnvironments}${resourceToken}' location: location zoneRedundant: false + {{- if (or (and .DbPostgres (eq .DbPostgres.AuthType "USER_ASSIGNED_MANAGED_IDENTITY")) (and .DbMySql (eq .DbMySql.AuthType "USER_ASSIGNED_MANAGED_IDENTITY")))}} + roleAssignments: [ + { + principalId: connectionCreatorIdentity.outputs.principalId + principalType: 'ServicePrincipal' + roleDefinitionIdOrName: 'b24988ac-6180-42a0-ab88-20f7382dd24c' + } + ] + {{- end}} } } {{- end}} {{- if .DbCosmosMongo}} +var mongoDatabaseName = '{{ .DbCosmosMongo.DatabaseName }}' module cosmos 'br/public:avm/res/document-db/database-account:0.8.1' = { name: 'cosmos' params: { @@ -84,13 +94,11 @@ module cosmos 'br/public:avm/res/document-db/database-account:0.8.1' = { virtualNetworkRules: [] publicNetworkAccess: 'Enabled' } - {{- if .DbCosmosMongo.DatabaseName}} mongodbDatabases: [ { - name: '{{ .DbCosmosMongo.DatabaseName }}' + name: mongoDatabaseName } ] - {{- end}} secretsExportConfiguration: { keyVaultResourceId: keyVault.outputs.resourceId primaryWriteConnectionStringSecretName: 'MONGODB-URL' @@ -99,10 +107,62 @@ module cosmos 'br/public:avm/res/document-db/database-account:0.8.1' = { } } {{- end}} - +{{- if .DbCosmos }} +var cosmosDatabaseName = '{{ .DbCosmos.DatabaseName }}' +module cosmos 'br/public:avm/res/document-db/database-account:0.8.1' = { + name: 'cosmos' + params: { + name: '${abbrs.documentDBDatabaseAccounts}${resourceToken}' + tags: tags + location: location + locations: [ + { + failoverPriority: 0 + isZoneRedundant: false + locationName: location + } + ] + networkRestrictions: { + ipRules: [] + virtualNetworkRules: [] + publicNetworkAccess: 'Enabled' + } + sqlDatabases: [ + { + name: '{{ .DbCosmos.DatabaseName }}' + containers: [ + {{- range .DbCosmos.Containers}} + { + name: '{{ .ContainerName }}' + paths: [ + {{- range $path := .PartitionKeyPaths}} + '{{ $path }}' + {{- end}} + ] + } + {{- end}} + ] + } + ] + sqlRoleAssignmentsPrincipalIds: [ + {{- range .Services}} + {{- if .DbCosmos }} + {{bicepName .Name}}Identity.outputs.principalId + {{- end}} + {{- end}} + ] + sqlRoleDefinitions: [ + { + name: 'service-access-cosmos-sql-role' + } + ] + } +} +{{- end}} {{- if .DbPostgres}} -var databaseName = '{{ .DbPostgres.DatabaseName }}' -var databaseUser = 'psqladmin' + +var postgreSqlDatabaseName = '{{ .DbPostgres.DatabaseName }}' +var postgreSqlDatabaseUser = '{{ .DbPostgres.DatabaseUser }}' module postgreServer 'br/public:avm/res/db-for-postgre-sql/flexible-server:0.1.4' = { name: 'postgreServer' params: { @@ -111,8 +171,8 @@ module postgreServer 'br/public:avm/res/db-for-postgre-sql/flexible-server:0.1.4 skuName: 'Standard_B1ms' tier: 'Burstable' // Non-required parameters - administratorLogin: databaseUser - administratorLoginPassword: databasePassword + administratorLogin: postgreSqlDatabaseUser + administratorLoginPassword: postgreSqlDatabasePassword geoRedundantBackup: 'Disabled' passwordAuth:'Enabled' firewallRules: [ @@ -124,13 +184,268 @@ module postgreServer 'br/public:avm/res/db-for-postgre-sql/flexible-server:0.1.4 ] databases: [ { - name: databaseName + name: postgreSqlDatabaseName + } + ] + location: location + {{- if (and .DbPostgres (eq .DbPostgres.AuthType "USER_ASSIGNED_MANAGED_IDENTITY")) }} + roleAssignments: [ + { + principalId: connectionCreatorIdentity.outputs.principalId + principalType: 'ServicePrincipal' + roleDefinitionIdOrName: 'b24988ac-6180-42a0-ab88-20f7382dd24c' } ] + {{- end}} + } +} +{{- end}} +{{- if .DbMySql}} + +var mysqlDatabaseName = '{{ .DbMySql.DatabaseName }}' +var mysqlDatabaseUser = '{{ .DbMySql.DatabaseUser }}' +{{- if (and .DbMySql (eq .DbMySql.AuthType "USER_ASSIGNED_MANAGED_IDENTITY")) }} +module mysqlIdentity 'br/public:avm/res/managed-identity/user-assigned-identity:0.2.1' = { + name: 'mysqlIdentity' + params: { + name: '${abbrs.managedIdentityUserAssignedIdentities}mysql-${resourceToken}' location: location + roleAssignments: [ + { + principalId: connectionCreatorIdentity.outputs.principalId + principalType: 'ServicePrincipal' + roleDefinitionIdOrName: 'b24988ac-6180-42a0-ab88-20f7382dd24c' + } + ] } } {{- end}} +module mysqlServer 'br/public:avm/res/db-for-my-sql/flexible-server:0.4.1' = { + name: 'mysqlServer' + params: { + // Required parameters + name: '${abbrs.dBforMySQLServers}${resourceToken}' + skuName: 'Standard_B1ms' + tier: 'Burstable' + // Non-required parameters + administratorLogin: mysqlDatabaseUser + administratorLoginPassword: mysqlDatabasePassword + geoRedundantBackup: 'Disabled' + firewallRules: [ + { + name: 'AllowAllIps' + startIpAddress: '0.0.0.0' + endIpAddress: '255.255.255.255' + } + ] + databases: [ + { + name: mysqlDatabaseName + } + ] + location: location + highAvailability: 'Disabled' + {{- if (and .DbMySql (eq .DbMySql.AuthType "USER_ASSIGNED_MANAGED_IDENTITY")) }} + managedIdentities: { + userAssignedResourceIds: [ + mysqlIdentity.outputs.resourceId + ] + } + roleAssignments: [ + { + principalId: connectionCreatorIdentity.outputs.principalId + principalType: 'ServicePrincipal' + roleDefinitionIdOrName: 'b24988ac-6180-42a0-ab88-20f7382dd24c' + } + ] + {{- end}} + } +} +{{- end}} +{{- if (or (and .DbPostgres (eq .DbPostgres.AuthType "USER_ASSIGNED_MANAGED_IDENTITY")) (and .DbMySql (eq .DbMySql.AuthType "USER_ASSIGNED_MANAGED_IDENTITY")))}} + +module connectionCreatorIdentity 'br/public:avm/res/managed-identity/user-assigned-identity:0.2.1' = { + name: 'connectionCreatorIdentity' + params: { + name: '${abbrs.managedIdentityUserAssignedIdentities}cci-${resourceToken}' + location: location + } +} +{{- end}} +{{- if (and .DbPostgres (eq .DbPostgres.AuthType "USER_ASSIGNED_MANAGED_IDENTITY")) }} +{{- range .Services}} +module {{bicepName .Name}}CreateConnectionToPostgreSql 'br/public:avm/res/resources/deployment-script:0.4.0' = { + name: '{{bicepName .Name}}CreateConnectionToPostgreSql' + params: { + kind: 'AzureCLI' + name: '${abbrs.deploymentScript}{{bicepName .Name}}-connection-to-pg-${resourceToken}' + azCliVersion: '2.63.0' + location: location + managedIdentities: { + userAssignedResourcesIds: [ + connectionCreatorIdentity.outputs.resourceId + ] + } + runOnce: false + retentionInterval: 'P1D' + scriptContent: 'apk update; apk add g++; apk add unixodbc-dev; az extension add --name containerapp; az extension add --name serviceconnector-passwordless --upgrade; az containerapp connection create postgres-flexible --connection appConnectToPostgres --source-id ${ {{bicepName .Name}}.outputs.resourceId} --target-id ${postgreServer.outputs.resourceId}/databases/${postgreSqlDatabaseName} --client-type springBoot --user-identity client-id=${ {{bicepName .Name}}Identity.outputs.clientId} subs-id=${subscription().subscriptionId} user-object-id=${connectionCreatorIdentity.outputs.principalId} -c main --yes;' + } +} +{{- end}} +{{- end}} +{{- if (and .DbMySql (eq .DbMySql.AuthType "USER_ASSIGNED_MANAGED_IDENTITY")) }} +{{- range .Services}} +module {{bicepName .Name}}CreateConnectionToMysql 'br/public:avm/res/resources/deployment-script:0.4.0' = { + name: '{{bicepName .Name}}CreateConnectionToMysql' + params: { + kind: 'AzureCLI' + name: '${abbrs.deploymentScript}{{bicepName .Name}}-connection-to-mysql-${resourceToken}' + azCliVersion: '2.63.0' + location: location + managedIdentities: { + userAssignedResourcesIds: [ + connectionCreatorIdentity.outputs.resourceId + ] + } + runOnce: false + retentionInterval: 'P1D' + scriptContent: 'apk update; apk add g++; apk add unixodbc-dev; az extension add --name containerapp; az extension add --name serviceconnector-passwordless --upgrade; az containerapp connection create mysql-flexible --connection appConnectToMysql --source-id ${ {{bicepName .Name}}.outputs.resourceId} --target-id ${mysqlServer.outputs.resourceId}/databases/${mysqlDatabaseName} --client-type springBoot --user-identity client-id=${ {{bicepName .Name}}Identity.outputs.clientId} subs-id=${subscription().subscriptionId} user-object-id=${connectionCreatorIdentity.outputs.principalId} mysql-identity-id=${mysqlIdentity.outputs.resourceId} -c main --yes;' + } +} +{{- end}} +{{- end}} +{{- if .AzureEventHubs }} + +module eventHubNamespace 'br/public:avm/res/event-hub/namespace:0.7.1' = { + name: 'eventHubNamespace' + params: { + name: '${abbrs.eventHubNamespaces}${resourceToken}' + location: location + roleAssignments: [ + {{- range .Services}} + {{- if (and .AzureEventHubs (eq .AzureEventHubs.AuthType "USER_ASSIGNED_MANAGED_IDENTITY")) }} + { + principalId: {{bicepName .Name}}Identity.outputs.principalId + principalType: 'ServicePrincipal' + roleDefinitionIdOrName: subscriptionResourceId('Microsoft.Authorization/roleDefinitions', 'f526a384-b230-433a-b45c-95f59c4a2dec') + } + {{- end}} + {{- end}} + ] + {{- if (and .AzureEventHubs (eq .AzureEventHubs.AuthType "CONNECTION_STRING")) }} + disableLocalAuth: false + {{- end}} + eventhubs: [ + {{- range $eventHubName := .AzureEventHubs.EventHubNames}} + { + name: '{{ $eventHubName }}' + } + {{- end}} + ] + } +} +{{- if (and .AzureEventHubs (eq .AzureEventHubs.AuthType "CONNECTION_STRING")) }} +module eventHubsConnectionString './modules/set-event-hubs-namespace-connection-string.bicep' = { + name: 'eventHubsConnectionString' + params: { + eventHubsNamespaceName: eventHubNamespace.outputs.name + connectionStringSecretName: 'EVENT-HUBS-CONNECTION-STRING' + keyVaultName: keyVault.outputs.name + } +} +{{end}} +{{end}} +{{- if .AzureStorageAccount }} +var storageAccountName = '${abbrs.storageStorageAccounts}${resourceToken}' +module storageAccount 'br/public:avm/res/storage/storage-account:0.14.3' = { + name: 'storageAccount' + params: { + name: storageAccountName + publicNetworkAccess: 'Enabled' + blobServices: { + containers: [ + {{- range $index, $element := .AzureStorageAccount.ContainerNames}} + { + name: '{{ $element }}' + } + {{- end}} + ] + } + location: location + roleAssignments: [ + {{- range .Services}} + {{- if (and .AzureStorageAccount (eq .AzureStorageAccount.AuthType "USER_ASSIGNED_MANAGED_IDENTITY")) }} + { + principalId: {{bicepName .Name}}Identity.outputs.principalId + principalType: 'ServicePrincipal' + roleDefinitionIdOrName: subscriptionResourceId('Microsoft.Authorization/roleDefinitions', 'b7e6dc6d-f1e8-4753-8033-0f276bb0955b') + } + {{- end}} + {{- end}} + ] + networkAcls: { + defaultAction: 'Allow' + } + tags: tags + } +} + +{{- if (and .AzureStorageAccount (eq .AzureStorageAccount.AuthType "CONNECTION_STRING")) }} +module storageAccountConnectionString './modules/set-storage-account-connection-string.bicep' = { + name: 'storageAccountConnectionString' + params: { + storageAccountName: storageAccountName + connectionStringSecretName: 'STORAGE-ACCOUNT-CONNECTION-STRING' + keyVaultName: keyVault.outputs.name + } +} +{{end}} +{{end}} + +{{- if .AzureServiceBus }} + +module serviceBusNamespace 'br/public:avm/res/service-bus/namespace:0.10.0' = { + name: 'serviceBusNamespace' + params: { + // Required parameters + name: '${abbrs.serviceBusNamespaces}${resourceToken}' + // Non-required parameters + location: location + roleAssignments: [ + {{- range .Services}} + {{- if (and .AzureServiceBus (eq .AzureServiceBus.AuthType "USER_ASSIGNED_MANAGED_IDENTITY")) }} + { + principalId: {{bicepName .Name}}Identity.outputs.principalId + principalType: 'ServicePrincipal' + roleDefinitionIdOrName: subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '090c5cfd-751d-490a-894a-3ce6f1109419') + } + {{- end}} + {{- end}} + ] + {{- if (and .AzureServiceBus (eq .AzureServiceBus.AuthType "CONNECTION_STRING")) }} + disableLocalAuth: false + {{- end}} + queues: [ + {{- range $queue := .AzureServiceBus.Queues}} + { + name: '{{ $queue }}' + } + {{- end}} + ] + } +} + +{{- if (and .AzureServiceBus (eq .AzureServiceBus.AuthType "CONNECTION_STRING")) }} +module serviceBusConnectionString './modules/set-servicebus-namespace-connection-string.bicep' = { + name: 'serviceBusConnectionString' + params: { + serviceBusNamespaceName: serviceBusNamespace.outputs.name + connectionStringSecretName: 'SERVICEBUS-CONNECTION-STRING' + keyVaultName: keyVault.outputs.name + } +} +{{end}} +{{end}} {{- if .AIModels}} var accountName = '${abbrs.cognitiveServicesAccounts}${resourceToken}' @@ -172,13 +487,22 @@ resource localUserOpenAIIdentity 'Microsoft.Authorization/roleAssignments@2022-0 } {{- end}} -{{- range .Services}} +{{- range $service := .Services}} module {{bicepName .Name}}Identity 'br/public:avm/res/managed-identity/user-assigned-identity:0.2.1' = { name: '{{bicepName .Name}}identity' params: { name: '${abbrs.managedIdentityUserAssignedIdentities}{{bicepName .Name}}-${resourceToken}' location: location + {{- if (or (and .DbPostgres (eq .DbPostgres.AuthType "USER_ASSIGNED_MANAGED_IDENTITY")) (and .DbMySql (eq .DbMySql.AuthType "USER_ASSIGNED_MANAGED_IDENTITY")))}} + roleAssignments: [ + { + principalId: connectionCreatorIdentity.outputs.principalId + principalType: 'ServicePrincipal' + roleDefinitionIdOrName: 'b24988ac-6180-42a0-ab88-20f7382dd24c' + } + ] + {{- end}} } } @@ -197,7 +521,7 @@ module {{bicepName .Name}}FetchLatestImage './modules/fetch-container-image.bice name: '{{bicepName .Name}}-fetch-image' params: { exists: {{bicepName .Name}}Exists - name: '{{.Name}}' + name: '{{containerAppName .Name}}' } } @@ -215,7 +539,7 @@ var {{bicepName .Name}}Env = map(filter({{bicepName .Name}}AppSettingsArray, i = module {{bicepName .Name}} 'br/public:avm/res/app/container-app:0.8.0' = { name: '{{bicepName .Name}}' params: { - name: '{{.Name}}' + name: '{{containerAppName .Name}}' {{- if ne .Port 0}} ingressTargetPort: {{.Port}} {{- end}} @@ -235,34 +559,22 @@ module {{bicepName .Name}} 'br/public:avm/res/app/container-app:0.8.0' = { scaleMaxReplicas: 10 secrets: { secureList: union([ - {{- if .DbCosmosMongo}} + {{- range $env := .Envs}} + {{- if (shouldAddToBicepFile $service $env.Name) }} + {{- if (eq (toBicepEnv $env).BicepEnvType "keyVaultSecret") }} { - name: 'mongodb-url' - identity:{{bicepName .Name}}Identity.outputs.resourceId - keyVaultUrl: cosmos.outputs.exportedSecrets['MONGODB-URL'].secretUri + name: '{{ (toBicepEnv $env).SecretName }}' + identity:{{bicepName $service.Name}}Identity.outputs.resourceId + keyVaultUrl: {{ (toBicepEnv $env).SecretValue }} } {{- end}} - {{- if .DbPostgres}} - { - name: 'db-pass' - value: databasePassword - } + {{- if (eq (toBicepEnv $env).BicepEnvType "secret") }} { - name: 'db-url' - value: 'postgresql://${databaseUser}:${databasePassword}@${postgreServer.outputs.fqdn}:5432/${databaseName}' + name: '{{ (toBicepEnv $env).SecretName }}' + value: {{ (toBicepEnv $env).SecretValue }} } {{- end}} - {{- if .DbRedis}} - { - name: 'redis-pass' - identity:{{bicepName .Name}}Identity.outputs.resourceId - keyVaultUrl: '${keyVault.outputs.uri}secrets/REDIS-PASSWORD' - } - { - name: 'redis-url' - identity:{{bicepName .Name}}Identity.outputs.resourceId - keyVaultUrl: '${keyVault.outputs.uri}secrets/REDIS-URL' - } + {{- end}} {{- end}} ], map({{bicepName .Name}}Secrets, secret => { @@ -279,76 +591,44 @@ module {{bicepName .Name}} 'br/public:avm/res/app/container-app:0.8.0' = { memory: '1.0Gi' } env: union([ + {{- range $env := .Envs }} + {{- if (shouldAddToBicepFile $service $env.Name) }} + {{- if (or (eq (toBicepEnv $env).BicepEnvType "keyVaultSecret") (eq (toBicepEnv $env).BicepEnvType "secret")) }} { - name: 'APPLICATIONINSIGHTS_CONNECTION_STRING' - value: monitoring.outputs.applicationInsightsConnectionString - } - { - name: 'AZURE_CLIENT_ID' - value: {{bicepName .Name}}Identity.outputs.clientId - } - {{- if .DbCosmosMongo}} - { - name: 'MONGODB_URL' - secretRef: 'mongodb-url' + name: '{{ (toBicepEnv $env).Name }}' + secretRef: '{{ (toBicepEnv $env).SecretName }}' } {{- end}} - {{- if .DbPostgres}} - { - name: 'POSTGRES_HOST' - value: postgreServer.outputs.fqdn - } - { - name: 'POSTGRES_USERNAME' - value: databaseUser - } - { - name: 'POSTGRES_DATABASE' - value: databaseName - } - { - name: 'POSTGRES_PASSWORD' - secretRef: 'db-pass' - } - { - name: 'POSTGRES_URL' - secretRef: 'db-url' - } + {{- if (eq (toBicepEnv $env).BicepEnvType "plainText") }} { - name: 'POSTGRES_PORT' - value: '5432' + name: '{{ (toBicepEnv $env).Name }}' + {{- if (eq (toBicepEnv $env).PlainTextValue "'__PlaceHolderForServiceIdentityClientId'")}} + value: {{bicepName $service.Name}}Identity.outputs.clientId + {{- else}} + value: {{ (toBicepEnv $env).PlainTextValue }} + {{- end}} } {{- end}} - {{- if .DbRedis}} - { - name: 'REDIS_HOST' - value: redis.outputs.hostName - } - { - name: 'REDIS_PORT' - value: string(redis.outputs.sslPort) - } + {{- end}} + {{- end}} { - name: 'REDIS_URL' - secretRef: 'redis-url' + name: 'APPLICATIONINSIGHTS_CONNECTION_STRING' + value: monitoring.outputs.applicationInsightsConnectionString } { - name: 'REDIS_ENDPOINT' - value: '${redis.outputs.hostName}:${string(redis.outputs.sslPort)}' + name: 'AZURE_CLIENT_ID' + value: {{bicepName .Name}}Identity.outputs.clientId } + {{- if .Frontend}} + {{- range $i, $e := .Frontend.Backends}} { - name: 'REDIS_PASSWORD' - secretRef: 'redis-pass' + name: '{{upper .Name}}_BASE_URL' + value: 'https://{{.Name}}.${containerAppsEnvironment.outputs.defaultDomain}' } {{- end}} - {{- if .AIModels}} - { - name: 'AZURE_OPENAI_ENDPOINT' - value: account.outputs.endpoint - } {{- end}} - {{- if .Frontend}} - {{- range $i, $e := .Frontend.Backends}} + {{- if .Backend}} + {{- range $i, $e := .Backend.Frontends}} { name: '{{upper .Name}}_BASE_URL' value: 'https://{{.Name}}.${containerAppsEnvironment.outputs.defaultDomain}' @@ -382,6 +662,15 @@ module {{bicepName .Name}} 'br/public:avm/res/app/container-app:0.8.0' = { environmentResourceId: containerAppsEnvironment.outputs.resourceId location: location tags: union(tags, { 'azd-service-name': '{{.Name}}' }) + {{- if (or (and .DbPostgres (eq .DbPostgres.AuthType "USER_ASSIGNED_MANAGED_IDENTITY")) (and .DbMySql (eq .DbMySql.AuthType "USER_ASSIGNED_MANAGED_IDENTITY")))}} + roleAssignments: [ + { + principalId: connectionCreatorIdentity.outputs.principalId + principalType: 'ServicePrincipal' + roleDefinitionIdOrName: 'b24988ac-6180-42a0-ab88-20f7382dd24c' + } + ] + {{- end}} } } {{- end}} @@ -435,10 +724,16 @@ module keyVault 'br/public:avm/res/key-vault/vault:0.6.1' = { {{- end}} ] secrets: [ - {{- if .DbPostgres}} + {{- if (and .DbPostgres (eq .DbPostgres.AuthType "PASSWORD")) }} + { + name: 'postgresql-password' + value: postgreSqlDatabasePassword + } + {{- end}} + {{- if (and .DbMySql (eq .DbMySql.AuthType "PASSWORD")) }} { - name: 'db-pass' - value: databasePassword + name: 'mysql-password' + value: mysqlDatabasePassword } {{- end}} ] @@ -465,4 +760,13 @@ output AZURE_RESOURCE_REDIS_ID string = redis.outputs.resourceId {{- if .DbPostgres}} output AZURE_RESOURCE_{{alphaSnakeUpper .DbPostgres.DatabaseName}}_ID string = '${postgreServer.outputs.resourceId}/databases/{{.DbPostgres.DatabaseName}}' {{- end}} +{{- if .DbMySql}} +output AZURE_MYSQL_FLEXIBLE_SERVER_ID string = mysqlServer.outputs.resourceId +{{- end}} +{{- if .AzureEventHubs }} +output AZURE_EVENT_HUBS_ID string = eventHubNamespace.outputs.resourceId +{{- end}} +{{- if .AzureServiceBus }} +output AZURE_SERVICE_BUS_ID string = serviceBusNamespace.outputs.resourceId +{{- end}} {{ end}} diff --git a/cli/azd/test/functional/init_test.go b/cli/azd/test/functional/init_test.go index da748fa3e2b..3e4809947a5 100644 --- a/cli/azd/test/functional/init_test.go +++ b/cli/azd/test/functional/init_test.go @@ -203,6 +203,7 @@ func Test_CLI_Init_From_App_With_Infra(t *testing.T) { "Use code in the current directory\n"+ "Confirm and continue initializing my app\n"+ "appdb\n"+ + "User assigned managed identity\n"+ "TESTENV\n", "init", ) diff --git a/ext/vscode/package.json b/ext/vscode/package.json index f9f06a3f6f2..83261a28432 100644 --- a/ext/vscode/package.json +++ b/ext/vscode/package.json @@ -185,11 +185,16 @@ "explorer/context": [ { "submenu": "azure-dev.explorer.submenu", - "when": "resourceFilename =~ /azure.yaml/i", + "when": "resourceFilename =~ /(azure.yaml|pom.xml)/i", "group": "azure-dev" } ], "azure-dev.explorer.submenu": [ + { + "when": "resourceFilename =~ /pom.xml/i", + "command": "azure-dev.commands.cli.init", + "group": "10provision@10" + }, { "when": "resourceFilename =~ /azure.yaml/i", "command": "azure-dev.commands.cli.provision", diff --git a/ext/vscode/package.nls.json b/ext/vscode/package.nls.json index 3b633c1f39f..f0b358ad186 100644 --- a/ext/vscode/package.nls.json +++ b/ext/vscode/package.nls.json @@ -1,7 +1,7 @@ { "azure-dev.commands_category": "Azure Developer CLI (azd)", - "azure-dev.commands.cli.init.title": "Initialize App (init)", + "azure-dev.commands.cli.init.title": "Generate Azure Deployment Script (init)", "azure-dev.commands.cli.provision.title": "Provision Azure Resources (provision)", "azure-dev.commands.cli.deploy.title": "Deploy to Azure (deploy)", "azure-dev.commands.cli.restore.title": "Restore App Dependencies (restore)", diff --git a/schemas/alpha/azure.yaml.json b/schemas/alpha/azure.yaml.json index f1d3016c742..2c5cd04d067 100644 --- a/schemas/alpha/azure.yaml.json +++ b/schemas/alpha/azure.yaml.json @@ -354,6 +354,9 @@ }, "resources": { "type": "object", + "title": "Definition of resources that the application depends on", + "description": "Optional. Provides additional configuration for Azure resources that the application depends on.", + "minProperties": 1, "additionalProperties": { "type": "object", "required": [ @@ -365,9 +368,15 @@ "title": "Type of resource", "description": "The type of resource to be created. (Example: db.postgres)", "enum": [ + "db.mysql", "db.postgres", "db.redis", "db.mongo", + "db.cosmos", + "messaging.servicebus", + "messaging.eventhubs", + "messaging.kafka", + "storage", "ai.openai.model", "host.containerapp" ] @@ -384,9 +393,15 @@ "allOf": [ { "if": { "properties": { "type": { "const": "host.containerapp" }}}, "then": { "$ref": "#/definitions/containerAppResource" } }, { "if": { "properties": { "type": { "const": "ai.openai.model" }}}, "then": { "$ref": "#/definitions/aiModelResource" } }, - { "if": { "properties": { "type": { "const": "db.postgres" }}}, "then": { "$ref": "#/definitions/resource"} }, + { "if": { "properties": { "type": { "const": "db.mysql" }}}, "then": { "$ref": "#/definitions/mySqlDbResource"} }, + { "if": { "properties": { "type": { "const": "db.postgres" }}}, "then": { "$ref": "#/definitions/postgreSqlDbResource"} }, { "if": { "properties": { "type": { "const": "db.redis" }}}, "then": { "$ref": "#/definitions/resource"} }, - { "if": { "properties": { "type": { "const": "db.mongo" }}}, "then": { "$ref": "#/definitions/resource"} } + { "if": { "properties": { "type": { "const": "db.mongo" }}}, "then": { "$ref": "#/definitions/mongoDbResource"} }, + { "if": { "properties": { "type": { "const": "db.cosmos" }}}, "then": { "$ref": "#/definitions/cosmosDbResource"} }, + { "if": { "properties": { "type": { "const": "messaging.servicebus" }}}, "then": { "$ref": "#/definitions/serviceBusResource"} }, + { "if": { "properties": { "type": { "const": "messaging.eventhubs" }}}, "then": { "$ref": "#/definitions/eventHubsResource"} }, + { "if": { "properties": { "type": { "const": "messaging.kafka" }}}, "then": { "$ref": "#/definitions/kafkaResource"} }, + { "if": { "properties": { "type": { "const": "storage" }}}, "then": { "$ref": "#/definitions/storageAccountResource"} } ] } }, @@ -1205,11 +1220,10 @@ "type": { "type": "string", "title": "Type of resource", - "description": "The type of resource to be created. (Example: db.postgres)", + "description": "The type of resource to be created. (Example: db.redis)", "enum": [ - "db.postgres", "db.redis", - "db.mongo", + "storage", "host.containerapp", "ai.openai.model" ] @@ -1298,6 +1312,220 @@ } } } + }, + "mySqlDbResource": { + "type": "object", + "description": "A deployed, ready-to-use Azure Database for MySQL flexible server.", + "additionalProperties": false, + "properties": { + "type": true, + "uses": true, + "authType": { + "type": "string", + "title": "Authentication Type", + "description": "The type of authentication used for Azure MySQL database.", + "enum": [ + "USER_ASSIGNED_MANAGED_IDENTITY", + "PASSWORD" + ] + }, + "databaseName": { + "type": "string", + "title": "The Azure MySQL Database Name", + "description": "The name of Azure MySQL database." + } + } + }, + "postgreSqlDbResource": { + "type": "object", + "description": "A deployed, ready-to-use Azure Database for PostgreSQL flexible server.", + "additionalProperties": false, + "properties": { + "type": true, + "uses": true, + "authType": { + "type": "string", + "title": "Authentication Type", + "description": "The type of authentication used for Azure PostgreSQL database.", + "enum": [ + "USER_ASSIGNED_MANAGED_IDENTITY", + "PASSWORD" + ] + }, + "databaseName": { + "type": "string", + "title": "The Azure PostgreSQL Database Name", + "description": "The name of Azure PostgreSQL database." + } + } + }, + "mongoDbResource": { + "type": "object", + "description": "A deployed, ready-to-use Azure CosmosDB API for MongoDB.", + "additionalProperties": false, + "properties": { + "type": true, + "uses": true, + "databaseName": { + "type": "string", + "title": "The Azure MongoDB Name", + "description": "The name of Azure CosmosDB API for MongoDB." + } + } + }, + "storageAccountResource": { + "type": "object", + "description": "A deployed, ready-to-use Azure Storage Account.", + "additionalProperties": false, + "properties": { + "type": true, + "uses": true, + "authType": { + "type": "string", + "title": "Authentication Type", + "description": "The type of authentication used for Azure Storage Account.", + "enum": [ + "USER_ASSIGNED_MANAGED_IDENTITY", + "CONNECTION_STRING" + ] + }, + "containers": { + "type": "array", + "title": "Azure Storage Account container names.", + "description": "The container names of Azure Storage Account.", + "items": { + "type": "string", + "title": "Azure Storage Account container name", + "description": "The container name of Azure Storage Account." + } + } + } + }, + "cosmosDbResource": { + "type": "object", + "description": "A deployed, ready-to-use Azure Cosmos DB for NoSQL.", + "additionalProperties": false, + "properties": { + "type": true, + "uses": true, + "databaseName": { + "type": "string", + "title": "The Azure Cosmos DB Name", + "description": "The name of Azure Cosmos DB." + }, + "containers": { + "type": "array", + "title": "Azure Cosmos DB Containers", + "description": "A list of containers in the Azure CosmosDB.", + "items": { + "type": "object", + "additionalProperties": false, + "properties": { + "containerName": { + "type": "string", + "title": "Container Name", + "description": "The name of the container." + }, + "partitionKeyPaths": { + "type": "array", + "title": "Partition Key Paths", + "description": "A list of partition key paths for the container.", + "items": { + "type": "string" + } + } + } + } + } + } + }, + "serviceBusResource": { + "type": "object", + "description": "A deployed, ready-to-use Azure Service Bus.", + "additionalProperties": false, + "properties": { + "type": true, + "uses": true, + "queues": { + "type": "array", + "title": "Service Bus Queues", + "description": "A list of Service Bus queues.", + "items": { + "type": "string" + } + }, + "isJms": { + "type": "boolean", + "title": "Is JMS", + "description": "Indicates if JMS is enabled for the Service Bus." + }, + "authType": { + "type": "string", + "title": "Authentication Type", + "description": "The type of authentication used for the Service Bus.", + "enum": [ + "USER_ASSIGNED_MANAGED_IDENTITY", + "CONNECTION_STRING" + ] + } + } + }, + "eventHubsResource": { + "type": "object", + "description": "A deployed, ready-to-use Azure Event Hubs.", + "additionalProperties": false, + "properties": { + "type": true, + "uses": true, + "eventHubNames": { + "type": "array", + "title": "Event Hub Names", + "description": "A list of Event Hub names.", + "items": { + "type": "string" + } + }, + "authType": { + "type": "string", + "title": "Authentication Type", + "description": "The type of authentication used for Event Hubs.", + "enum": [ + "USER_ASSIGNED_MANAGED_IDENTITY", + "CONNECTION_STRING" + ] + } + } + }, + "kafkaResource": { + "type": "object", + "description": "A deployed, ready-to-use Azure Event Hubs for Apache Kafka.", + "additionalProperties": false, + "properties": { + "type": true, + "uses": true, + "topics": { + "type": "array", + "title": "Topics", + "description": "A list of Kafka topics.", + "items": { + "type": "string" + } + }, + "authType": { + "type": "string", + "title": "Authentication Type", + "description": "The type of authentication used for Kafka.", + "enum": [ + "USER_ASSIGNED_MANAGED_IDENTITY", + "CONNECTION_STRING" + ] + }, + "springBootVersion": { + "type": "string", + "title": "Spring Boot Version", + "description": "The Spring Boot version used in the project." + } + } } } } \ No newline at end of file