From 30e1e0f559f588db923ddc068934022b0ac9987e Mon Sep 17 00:00:00 2001 From: Jonathan West Date: Mon, 23 Mar 2026 05:46:30 -0400 Subject: [PATCH] chore: sync upstream e2e tests to downstream Signed-off-by: Jonathan West --- Makefile | 2 +- .../e2e/ginkgo/fixture/application/fixture.go | 37 ++ .../e2e/ginkgo/fixture/argocd/fixture.go | 13 +- .../e2e/ginkgo/fixture/clusterrole/fixture.go | 8 + .../e2e/ginkgo/fixture/k8s/fixture.go | 22 +- ...te_applicationset_in_any_namespace_test.go | 223 ++++++++- ...51_validate_argocd_agent_principal_test.go | 146 +++++- .../1-052_validate_argocd_agent_agent_test.go | 138 +++++- ...e_argocd_agent_principal_connected_test.go | 461 +++++++++++++++++- 9 files changed, 1026 insertions(+), 24 deletions(-) diff --git a/Makefile b/Makefile index 98f4569544f..704b914d41c 100644 --- a/Makefile +++ b/Makefile @@ -211,7 +211,7 @@ build: generate fmt vet ## Build manager binary. .PHONY: run run: manifests generate fmt vet ## Run a controller from your host. - CLUSTER_SCOPED_ARGO_ROLLOUTS_NAMESPACES=argo-rollouts,test-rom-ns-1,rom-ns-1,openshift-gitops ARGOCD_CLUSTER_CONFIG_NAMESPACES="openshift-gitops, argocd-e2e-cluster-config, argocd-test-impersonation-1-046, argocd-agent-principal-1-051, argocd-agent-agent-1-052, appset-argocd, appset-old-ns, appset-new-ns, ns-hosting-principal, ns-hosting-managed-agent, ns-hosting-autonomous-agent" REDIS_CONFIG_PATH="build/redis" go run ./cmd/main.go + CLUSTER_SCOPED_ARGO_ROLLOUTS_NAMESPACES=argo-rollouts,test-rom-ns-1,rom-ns-1,openshift-gitops ARGOCD_CLUSTER_CONFIG_NAMESPACES="openshift-gitops, argocd-e2e-cluster-config, argocd-test-impersonation-1-046, argocd-agent-principal-1-051, argocd-agent-agent-1-052, appset-argocd, appset-old-ns, appset-new-ns, ns-hosting-principal, ns-hosting-managed-agent, ns-hosting-autonomous-agent, appset-argocd-clusterrole" REDIS_CONFIG_PATH="build/redis" go run ./cmd/main.go .PHONY: docker-build docker-build: test ## Build container image with the manager. diff --git a/test/openshift/e2e/ginkgo/fixture/application/fixture.go b/test/openshift/e2e/ginkgo/fixture/application/fixture.go index b44d9d4a9c6..6e18849f896 100644 --- a/test/openshift/e2e/ginkgo/fixture/application/fixture.go +++ b/test/openshift/e2e/ginkgo/fixture/application/fixture.go @@ -1,6 +1,9 @@ package application import ( + "fmt" + "regexp" + . "github.com/onsi/gomega" "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/utils" "k8s.io/client-go/util/retry" @@ -83,6 +86,40 @@ func HaveSyncStatusCode(expected appv1alpha1.SyncStatusCode) matcher.GomegaMatch } +func HaveNoConditions() matcher.GomegaMatcher { + return expectedCondition(func(app *appv1alpha1.Application) bool { + count := len(app.Status.Conditions) + if count == 0 { + return true + } + + GinkgoWriter.Printf("HaveNoConditions - have: %+v\n", app.Status.Conditions) + return false + }) +} + +func HaveConditionMatching(conditionType appv1alpha1.ApplicationConditionType, messagePattern string) matcher.GomegaMatcher { + pattern := regexp.MustCompile(messagePattern) + + return expectedCondition(func(app *appv1alpha1.Application) bool { + conditions := app.Status.Conditions + var found []string + for _, condition := range conditions { + found = append(found, fmt.Sprintf(" - %s/%s", condition.Type, condition.Message)) + + if condition.Type == conditionType && pattern.MatchString(condition.Message) { + return true + } + } + + GinkgoWriter.Printf("HaveConditionMatching - expected: %s/%s; current(%d):\n", conditionType, messagePattern, len(conditions)) + for _, f := range found { + GinkgoWriter.Println(f) + } + return false + }) +} + // Update will keep trying to update object until it succeeds, or times out. func Update(obj *appv1alpha1.Application, modify func(*appv1alpha1.Application)) { k8sClient, _ := utils.GetE2ETestKubeClient() diff --git a/test/openshift/e2e/ginkgo/fixture/argocd/fixture.go b/test/openshift/e2e/ginkgo/fixture/argocd/fixture.go index 239be3a33ca..cc0318969d6 100644 --- a/test/openshift/e2e/ginkgo/fixture/argocd/fixture.go +++ b/test/openshift/e2e/ginkgo/fixture/argocd/fixture.go @@ -191,32 +191,33 @@ func HaveExternalAuthenticationCondition(expected metav1.Condition) matcher.Gome func HaveCondition(condition metav1.Condition) matcher.GomegaMatcher { return fetchArgoCD(func(argocd *argov1beta1api.ArgoCD) bool { - if len(argocd.Status.Conditions) != 1 { - GinkgoWriter.Println("HaveCondition: length is zero") + length := len(argocd.Status.Conditions) + if length != 1 { + GinkgoWriter.Printf("HaveCondition: length is %d\n", length) return false } instanceCondition := argocd.Status.Conditions[0] - GinkgoWriter.Println("HaveCondition - Message:", instanceCondition.Message, condition.Message) + GinkgoWriter.Printf("HaveCondition - Message: '%s' / actual: '%s'\n", condition.Message, instanceCondition.Message) if instanceCondition.Message != condition.Message { GinkgoWriter.Println("HaveCondition: message does not match") return false } - GinkgoWriter.Println("HaveCondition - Reason:", instanceCondition.Reason, condition.Reason) + GinkgoWriter.Printf("HaveCondition - Reason: '%s' / actual: '%s'\n", condition.Reason, instanceCondition.Reason) if instanceCondition.Reason != condition.Reason { GinkgoWriter.Println("HaveCondition: reason does not match") return false } - GinkgoWriter.Println("HaveCondition - Status:", instanceCondition.Status, condition.Status) + GinkgoWriter.Printf("HaveCondition - Status: '%s' / actual: '%s'\n", condition.Status, instanceCondition.Status) if instanceCondition.Status != condition.Status { GinkgoWriter.Println("HaveCondition: status does not match") return false } - GinkgoWriter.Println("HaveCondition - Type:", instanceCondition.Type, condition.Type) + GinkgoWriter.Printf("HaveCondition - Type: '%s' / actual: '%s'\n", condition.Type, instanceCondition.Type) if instanceCondition.Type != condition.Type { GinkgoWriter.Println("HaveCondition: type does not match") return false diff --git a/test/openshift/e2e/ginkgo/fixture/clusterrole/fixture.go b/test/openshift/e2e/ginkgo/fixture/clusterrole/fixture.go index 7c3ea8700e9..83f2048b28c 100644 --- a/test/openshift/e2e/ginkgo/fixture/clusterrole/fixture.go +++ b/test/openshift/e2e/ginkgo/fixture/clusterrole/fixture.go @@ -2,6 +2,7 @@ package clusterrole import ( "context" + "reflect" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -32,6 +33,13 @@ func Update(obj *rbacv1.ClusterRole, modify func(*rbacv1.ClusterRole)) { } +func HaveRules(expectedRules []rbacv1.PolicyRule) matcher.GomegaMatcher { + return fetchRole(func(cr *rbacv1.ClusterRole) bool { + GinkgoWriter.Println("HaveRules - Expected:", expectedRules, "/ Actual:", cr.Rules) + return reflect.DeepEqual(expectedRules, cr.Rules) + }) +} + // This is intentionally NOT exported, for now. Create another function in this file/package that calls this function, and export that. // //nolint:unused diff --git a/test/openshift/e2e/ginkgo/fixture/k8s/fixture.go b/test/openshift/e2e/ginkgo/fixture/k8s/fixture.go index ec517799620..7f1f3486e49 100644 --- a/test/openshift/e2e/ginkgo/fixture/k8s/fixture.go +++ b/test/openshift/e2e/ginkgo/fixture/k8s/fixture.go @@ -98,15 +98,23 @@ func NotHaveLabelWithValue(key string, value string) matcher.GomegaMatcher { // ExistByName checks if the given k8s resource exists, when retrieving it by name/namespace. // - It does NOT check if the resource content matches. It only checks that a resource of that type and name exists. func ExistByName() matcher.GomegaMatcher { + return ExistByNameWithClient(nil) +} - return WithTransform(func(k8sObject client.Object) bool { - k8sClient, _, err := utils.GetE2ETestKubeClientWithError() - if err != nil { - GinkgoWriter.Println(err) - return false - } +// ExistByNameWithClient checks if the given k8s resource exists, when retrieving it by name/namespace. +// - It does NOT check if the resource content matches. It only checks that a resource of that type and name exists. +// +// NOTE: you probably want to instead use ExistByName() +func ExistByNameWithClient(k8sClient client.Client) matcher.GomegaMatcher { + if k8sClient == nil { + var err error + k8sClient, _, err = utils.GetE2ETestKubeClientWithError() + Expect(err).ToNot(HaveOccurred()) + Expect(k8sClient).ShouldNot(BeNil()) + } - err = k8sClient.Get(context.Background(), client.ObjectKeyFromObject(k8sObject), k8sObject) + return WithTransform(func(k8sObject client.Object) bool { + err := k8sClient.Get(context.Background(), client.ObjectKeyFromObject(k8sObject), k8sObject) if err != nil { GinkgoWriter.Println("Object does not exists in ExistByName:", k8sObject.GetName(), err) } else { diff --git a/test/openshift/e2e/ginkgo/sequential/1-037_validate_applicationset_in_any_namespace_test.go b/test/openshift/e2e/ginkgo/sequential/1-037_validate_applicationset_in_any_namespace_test.go index 3847fbec388..565f6f14b3a 100644 --- a/test/openshift/e2e/ginkgo/sequential/1-037_validate_applicationset_in_any_namespace_test.go +++ b/test/openshift/e2e/ginkgo/sequential/1-037_validate_applicationset_in_any_namespace_test.go @@ -7,20 +7,26 @@ import ( "github.com/argoproj-labs/argocd-operator/api/v1beta1" "github.com/argoproj-labs/argocd-operator/common" + "github.com/argoproj/gitops-engine/pkg/health" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture" + applicationFixture "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/application" + appprojectFixture "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/appproject" argocdFixture "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/argocd" + clusterroleFixture "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/clusterrole" deploymentFixture "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/deployment" k8sFixture "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/k8s" namespaceFixture "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/namespace" roleFixture "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/role" "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/utils" + appv1alpha1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -45,7 +51,7 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { AfterEach(func() { fixture.OutputDebugOnFail("appset-argocd", "appset-old-ns", "appset-new-ns", "appset-namespace-scoped", "target-ns-1-037", - "team-1", "team-2", "team-frontend", "team-backend", "team-3", "other-ns") + "team-1", "team-2", "team-frontend", "team-backend", "team-3", "other-ns", "appset-argocd-clusterrole", "appset-target-ns") // Clean up namespaces created for _, namespaceCleanupFunction := range cleanupFunctions { @@ -973,5 +979,220 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { }) + It("verifies ApplicationSet clusterrole rules and creates appset/app in another namespace", func() { + + fixture.SetEnvInOperatorSubscriptionOrDeployment("ARGOCD_CLUSTER_CONFIG_NAMESPACES", "openshift-gitops, argocd-e2e-cluster-config, appset-argocd-clusterrole,appset-target-ns") + + By("creating Argo CD namespace and target source namespace") + argoNamespace, cleanupFunc := fixture.CreateNamespaceWithCleanupFunc("appset-argocd-clusterrole") + cleanupFunctions = append(cleanupFunctions, cleanupFunc) + + targetNS, cleanupFunc := fixture.CreateNamespaceWithCleanupFunc("appset-target-ns") + cleanupFunctions = append(cleanupFunctions, cleanupFunc) + + By("creating Argo CD instance with source namespaces") + argoCD := &v1beta1.ArgoCD{ + ObjectMeta: metav1.ObjectMeta{ + Name: "appset-example", + Namespace: argoNamespace.Name, + }, + Spec: v1beta1.ArgoCDSpec{ + SourceNamespaces: []string{ + targetNS.Name, + }, + ApplicationSet: &v1beta1.ArgoCDApplicationSet{ + SourceNamespaces: []string{ + targetNS.Name, + }, + SCMProviders: []string{ + "github.com", + }, + }, + }, + } + Expect(k8sClient.Create(ctx, argoCD)).To(Succeed()) + + Eventually(argoCD, "5m", "5s").Should(argocdFixture.BeAvailable()) + Eventually(argoCD).Should(argocdFixture.HaveApplicationSetControllerStatus("Running")) + + By("2) verifying that the appset deployment contains matching namespace in the command") + appsetDeployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "appset-example-applicationset-controller", + Namespace: argoCD.Namespace, + }, + } + Eventually(appsetDeployment).Should(k8sFixture.ExistByName()) + + // Verify that target namespace is included + Eventually(appsetDeployment).Should(deploymentFixture.HaveContainerCommandSubstring("--applicationset-namespaces", 0)) + + appProject := &appv1alpha1.AppProject{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: argoCD.Namespace, + }, + } + Eventually(appProject).Should(k8sFixture.ExistByName()) + appprojectFixture.Update(appProject, func(appProject *appv1alpha1.AppProject) { + appProject.Spec.SourceNamespaces = append(appProject.Spec.SourceNamespaces, targetNS.Name) + }) + + By("verifying ApplicationSet controller ClusterRole has expected rules") + appsetClusterRole := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: argoCD.Name + "-" + argoCD.Namespace + "-" + common.ArgoCDApplicationSetControllerComponent, + }, + } + Eventually(appsetClusterRole, "5m", "10s").Should(k8sFixture.ExistByName()) + Eventually(appsetClusterRole, "5m", "10s").Should(clusterroleFixture.HaveRules([]rbacv1.PolicyRule{ + { + APIGroups: []string{"argoproj.io"}, + Resources: []string{ + "applications", + "applicationsets", + "applicationsets/finalizers", + }, + Verbs: []string{ + "create", + "delete", + "get", + "list", + "patch", + "update", + "watch", + }, + }, + { + APIGroups: []string{"argoproj.io"}, + Resources: []string{ + "appprojects", + }, + Verbs: []string{ + "get", + "list", + "watch", + }, + }, + { + APIGroups: []string{"argoproj.io"}, + Resources: []string{ + "applicationsets/status", + }, + Verbs: []string{ + "get", + "patch", + "update", + }, + }, + { + APIGroups: []string{""}, + Resources: []string{ + "events", + }, + Verbs: []string{ + "create", + "get", + "list", + "patch", + "watch", + }, + }, + { + APIGroups: []string{""}, + Resources: []string{ + "secrets", + "configmaps", + }, + Verbs: []string{ + "get", + "list", + "watch", + }, + }, + { + APIGroups: []string{"coordination.k8s.io"}, + Resources: []string{ + "leases", + }, + Verbs: []string{ + "create", + }, + }, + { + APIGroups: []string{"coordination.k8s.io"}, + Resources: []string{ + "leases", + }, + Verbs: []string{ + "get", + "update", + "create", + }, + ResourceNames: []string{ + "58ac56fa.applicationsets.argoproj.io", + }, + }, + })) + + By("creating an ApplicationSet in the target namespace") + appset := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "argoproj.io/v1alpha1", + "kind": "ApplicationSet", + "metadata": map[string]interface{}{ + "name": "guestbook-appset", + "namespace": targetNS.Name, + }, + "spec": map[string]interface{}{ + "generators": []interface{}{ + map[string]interface{}{ + "list": map[string]interface{}{ + "elements": []interface{}{ + map[string]interface{}{ + "name": "guestbook", + }, + }, + }, + }, + }, + "template": map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "{{name}}", + }, + "spec": map[string]interface{}{ + "project": "default", + "source": map[string]interface{}{ + "repoURL": "https://github.com/argoproj/argocd-example-apps.git", + "targetRevision": "HEAD", + "path": "guestbook", + }, + "destination": map[string]interface{}{ + "server": "https://kubernetes.default.svc", + "namespace": targetNS.Name, + }, + }, + }, + }, + }, + } + Expect(k8sClient.Create(ctx, appset)).To(Succeed()) + Eventually(appset).Should(k8sFixture.ExistByName()) + + By("verifying ApplicationSet generates Application in target namespace") + generatedApp := &appv1alpha1.Application{ + ObjectMeta: metav1.ObjectMeta{ + Name: "guestbook", + Namespace: targetNS.Name, + }, + } + Eventually(generatedApp, "5m", "10s").Should(k8sFixture.ExistByName()) + Eventually(generatedApp, "5m", "10s").Should(applicationFixture.HaveHealthStatusCode(health.HealthStatusMissing)) + Eventually(generatedApp, "5m", "10s").Should(applicationFixture.HaveSyncStatusCode(appv1alpha1.SyncStatusCodeOutOfSync)) + By("Cleaning up the ApplicationSet") + Expect(k8sClient.Delete(ctx, appset)).To(Succeed()) + Eventually(appset).Should(k8sFixture.NotExistByName()) + }) + }) }) diff --git a/test/openshift/e2e/ginkgo/sequential/1-051_validate_argocd_agent_principal_test.go b/test/openshift/e2e/ginkgo/sequential/1-051_validate_argocd_agent_principal_test.go index 8477d8cd21d..703ca1fd006 100644 --- a/test/openshift/e2e/ginkgo/sequential/1-051_validate_argocd_agent_principal_test.go +++ b/test/openshift/e2e/ginkgo/sequential/1-051_validate_argocd_agent_principal_test.go @@ -198,6 +198,13 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { }, } + principalNetworkPolicy = &networkingv1.NetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-agent-principal-network-policy", argoCDName), + Namespace: ns.Name, + }, + } + // List environment variables with expected values for the principal deployment expectedEnvVariables = map[string]string{ argocdagent.EnvArgoCDPrincipalLogLevel: "info", @@ -405,7 +412,7 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { By("Create ArgoCD instance") - argoCD.Spec.ArgoCDAgent.Principal.Image = "quay.io/argoprojlabs/argocd-agent:v0.5.0" + argoCD.Spec.ArgoCDAgent.Principal.Image = common.ArgoCDAgentPrincipalDefaultImageName Expect(k8sClient.Create(ctx, argoCD)).To(Succeed()) By("Verify expected resources are created for principal pod") @@ -416,7 +423,7 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { container := deploymentFixture.GetTemplateSpecContainerByName(argoCDAgentPrincipalName, *principalDeployment) Expect(container).ToNot(BeNil()) - Expect(container.Image).To(Equal("quay.io/argoprojlabs/argocd-agent:v0.5.0")) + Expect(container.Image).To(Equal(common.ArgoCDAgentPrincipalDefaultImageName)) By("Verify environment variables are set correctly") @@ -710,6 +717,141 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { }, "30s", "2s").Should(Equal(corev1.ServiceTypeLoadBalancer)) }) + It("should deploy principal via namespace-scoped ArgoCD instance and verify cluster role and cluster role binding are not created", func() { + + By("Create namespace-scoped ArgoCD instance") + + // Create namespace for hosting namespace-scoped ArgoCD instance with principal + nsScoped, cleanupFuncScoped := fixture.CreateNamespaceWithCleanupFunc("argocd-agent-principal-ns-scoped-1-051") + defer cleanupFuncScoped() + + // Update namespace in ArgoCD CR + argoCD.Namespace = nsScoped.Name + + // Update namespace in resource references + serviceAccount.Namespace = nsScoped.Name + role.Namespace = nsScoped.Name + roleBinding.Namespace = nsScoped.Name + principalDeployment.Namespace = nsScoped.Name + principalRoute.Namespace = nsScoped.Name + clusterRole.Name = fmt.Sprintf("%s-%s-agent-principal", argoCDName, nsScoped.Name) + clusterRoleBinding.Name = fmt.Sprintf("%s-%s-agent-principal", argoCDName, nsScoped.Name) + + Expect(k8sClient.Create(ctx, argoCD)).To(Succeed()) + + By("Verify namespace-scoped resources are created for principal") + + Eventually(serviceAccount, "30s", "2s").Should(k8sFixture.ExistByName()) + Eventually(role, "30s", "2s").Should(k8sFixture.ExistByName()) + Eventually(roleBinding, "30s", "2s").Should(k8sFixture.ExistByName()) + Eventually(principalDeployment, "30s", "2s").Should(k8sFixture.ExistByName()) + for _, serviceName := range serviceNames { + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Namespace: nsScoped.Name, + }, + } + Eventually(service, "30s", "2s").Should(k8sFixture.ExistByName(), + "Service '%s' should exist in namespace '%s'", serviceName, nsScoped.Name) + } + + By("Verify ClusterRole and ClusterRoleBinding are not created") + Consistently(clusterRole, "10s", "1s").Should(k8sFixture.NotExistByName(), + "ClusterRole '%s' should not exist for namespace-scoped ArgoCD instance", clusterRole.Name) + + Consistently(clusterRoleBinding, "10s", "1s").Should(k8sFixture.NotExistByName(), + "ClusterRoleBinding '%s' should not exist for namespace-scoped ArgoCD instance", clusterRoleBinding.Name) + + By("Delete ArgoCD instance") + Expect(k8sClient.Delete(ctx, argoCD)).To(Succeed()) + Eventually(func() bool { + err := k8sClient.Get(ctx, client.ObjectKey{Name: argoCD.Name, Namespace: argoCD.Namespace}, argoCD) + return err != nil + }, "60s", "2s").Should(BeTrue(), "ArgoCD should be deleted") + }) + + It("should delete existing cluster role and cluster role binding if ArgoCD instance is namespace-scoped", func() { + + By("Create namespace-scoped ArgoCD instance namespace") + + // Create namespace for hosting namespace-scoped ArgoCD instance with principal + nsScoped, cleanupFuncScoped := fixture.CreateNamespaceWithCleanupFunc("argocd-agent-principal-ns-scoped-1-051") + defer cleanupFuncScoped() + + // Update namespace in ArgoCD CR + argoCD.Namespace = nsScoped.Name + + // Update namespace in resource references + serviceAccount.Namespace = nsScoped.Name + role.Namespace = nsScoped.Name + roleBinding.Namespace = nsScoped.Name + principalDeployment.Namespace = nsScoped.Name + principalRoute.Namespace = nsScoped.Name + clusterRole.Name = fmt.Sprintf("%s-%s-agent-principal", argoCDName, nsScoped.Name) + clusterRoleBinding.Name = fmt.Sprintf("%s-%s-agent-principal", argoCDName, nsScoped.Name) + + By("Pre-create ClusterRole and ClusterRoleBinding before ArgoCD instance") + + preExistingClusterRole := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterRole.Name, + Labels: map[string]string{ + "app.kubernetes.io/name": "test", + }, + }, + } + Expect(k8sClient.Create(ctx, preExistingClusterRole)).To(Succeed()) + + preExistingClusterRoleBinding := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterRoleBinding.Name, + Labels: map[string]string{ + "app.kubernetes.io/name": "test", + }, + }, + Subjects: []rbacv1.Subject{ + { + Kind: rbacv1.ServiceAccountKind, + Name: "default", + Namespace: nsScoped.Name, + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "ClusterRole", + Name: clusterRole.Name, + }, + } + Expect(k8sClient.Create(ctx, preExistingClusterRoleBinding)).To(Succeed()) + + By("Verify pre-existing ClusterRole and ClusterRoleBinding exist") + + Eventually(clusterRole, "30s", "1s").Should(k8sFixture.ExistByName(), + "Pre-existing ClusterRole '%s' should exist before ArgoCD instance creation", clusterRole.Name) + Eventually(clusterRoleBinding, "30s", "1s").Should(k8sFixture.ExistByName(), + "Pre-existing ClusterRoleBinding '%s' should exist before ArgoCD instance creation", clusterRoleBinding.Name) + + By("Create namespace-scoped ArgoCD instance with principal") + + Expect(k8sClient.Create(ctx, argoCD)).To(Succeed()) + + By("Verify pre-existing ClusterRole and ClusterRoleBinding are deleted") + + Eventually(clusterRole, "60s", "2s").Should(k8sFixture.NotExistByName(), + "ClusterRole '%s' should be deleted for namespace-scoped ArgoCD instance", clusterRole.Name) + + Eventually(clusterRoleBinding, "60s", "2s").Should(k8sFixture.NotExistByName(), + "ClusterRoleBinding '%s' should be deleted for namespace-scoped ArgoCD instance", clusterRoleBinding.Name) + + By("Delete ArgoCD instance") + Expect(k8sClient.Delete(ctx, argoCD)).To(Succeed()) + Eventually(func() bool { + err := k8sClient.Get(ctx, client.ObjectKey{Name: argoCD.Name, Namespace: argoCD.Namespace}, argoCD) + return err != nil + }, "60s", "2s").Should(BeTrue(), "ArgoCD should be deleted") + }) + It("should create principal NetworkPolicy if principal is enabled", func() { By("Create ArgoCD instance with principal enabled") diff --git a/test/openshift/e2e/ginkgo/sequential/1-052_validate_argocd_agent_agent_test.go b/test/openshift/e2e/ginkgo/sequential/1-052_validate_argocd_agent_agent_test.go index 1daf0001a69..48a57b9bc17 100644 --- a/test/openshift/e2e/ginkgo/sequential/1-052_validate_argocd_agent_agent_test.go +++ b/test/openshift/e2e/ginkgo/sequential/1-052_validate_argocd_agent_agent_test.go @@ -372,7 +372,7 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { By("Create ArgoCD instance") - argoCD.Spec.ArgoCDAgent.Agent.Image = "quay.io/argoprojlabs/argocd-agent:v0.5.0" + argoCD.Spec.ArgoCDAgent.Agent.Image = common.ArgoCDAgentAgentDefaultImageName Expect(k8sClient.Create(ctx, argoCD)).To(Succeed()) By("Verify expected resources are created for agent pod") @@ -383,7 +383,7 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { container := deploymentFixture.GetTemplateSpecContainerByName(argoCDAgentAgentName, *agentDeployment) Expect(container).ToNot(BeNil()) - Expect(container.Image).To(Equal("quay.io/argoprojlabs/argocd-agent:v0.5.0")) + Expect(container.Image).To(Equal(common.ArgoCDAgentAgentDefaultImageName)) By("Verify environment variables are set correctly") @@ -451,5 +451,139 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { Expect(container.Env).To(ContainElement(corev1.EnvVar{Name: key, Value: value}), "Environment variable %s should be set to %s", key, value) } }) + + It("should deploy agent via namespace-scoped ArgoCD instance and verify cluster role and cluster role binding are not created", func() { + + By("Create namespace-scoped ArgoCD instance") + + // Create namespace for hosting namespace-scoped ArgoCD instance with agent + nsScoped, cleanupFuncScoped := fixture.CreateNamespaceWithCleanupFunc("argocd-agent-agent-ns-scoped-1-052") + defer cleanupFuncScoped() + + // Update namespace in ArgoCD CR + argoCD.Namespace = nsScoped.Name + + // Update namespace in resource references + serviceAccount.Namespace = nsScoped.Name + role.Namespace = nsScoped.Name + roleBinding.Namespace = nsScoped.Name + agentDeployment.Namespace = nsScoped.Name + clusterRole.Name = fmt.Sprintf("%s-%s-agent-agent", argoCDName, nsScoped.Name) + clusterRoleBinding.Name = fmt.Sprintf("%s-%s-agent-agent", argoCDName, nsScoped.Name) + + Expect(k8sClient.Create(ctx, argoCD)).To(Succeed()) + + By("Verify namespace-scoped resources are created for agent") + + Eventually(serviceAccount, "30s", "2s").Should(k8sFixture.ExistByName()) + Eventually(role, "30s", "2s").Should(k8sFixture.ExistByName()) + Eventually(roleBinding, "30s", "2s").Should(k8sFixture.ExistByName()) + Eventually(agentDeployment, "30s", "2s").Should(k8sFixture.ExistByName()) + for _, serviceName := range serviceNames { + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Namespace: nsScoped.Name, + }, + } + Eventually(service, "30s", "2s").Should(k8sFixture.ExistByName(), + "Service '%s' should exist in namespace '%s'", serviceName, nsScoped.Name) + } + + By("Verify ClusterRole and ClusterRoleBinding are not created") + + Consistently(clusterRole, "30s", "1s").Should(k8sFixture.NotExistByName(), + "ClusterRole '%s' should not exist for namespace-scoped ArgoCD instance", clusterRole.Name) + + Consistently(clusterRoleBinding, "30s", "1s").Should(k8sFixture.NotExistByName(), + "ClusterRoleBinding '%s' should not exist for namespace-scoped ArgoCD instance", clusterRoleBinding.Name) + + By("Delete ArgoCD instance") + Expect(k8sClient.Delete(ctx, argoCD)).To(Succeed()) + Eventually(func() bool { + err := k8sClient.Get(ctx, client.ObjectKey{Name: argoCD.Name, Namespace: argoCD.Namespace}, argoCD) + return err != nil + }, "60s", "2s").Should(BeTrue(), "ArgoCD should be deleted") + }) + + It("should delete existing cluster role and cluster role binding if ArgoCD instance is namespace-scoped", func() { + + By("Create namespace-scoped ArgoCD instance namespace") + + // Create namespace for hosting namespace-scoped ArgoCD instance with agent + nsScoped, cleanupFuncScoped := fixture.CreateNamespaceWithCleanupFunc("argocd-agent-agent-ns-scoped-1-052") + defer cleanupFuncScoped() + + // Update namespace in ArgoCD CR + argoCD.Namespace = nsScoped.Name + + // Update namespace in resource references + serviceAccount.Namespace = nsScoped.Name + role.Namespace = nsScoped.Name + roleBinding.Namespace = nsScoped.Name + agentDeployment.Namespace = nsScoped.Name + clusterRole.Name = fmt.Sprintf("%s-%s-agent-agent", argoCDName, nsScoped.Name) + clusterRoleBinding.Name = fmt.Sprintf("%s-%s-agent-agent", argoCDName, nsScoped.Name) + + By("Pre-create ClusterRole and ClusterRoleBinding before ArgoCD instance") + + preExistingClusterRole := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterRole.Name, + Labels: map[string]string{ + "app.kubernetes.io/name": "test", + }, + }, + } + Expect(k8sClient.Create(ctx, preExistingClusterRole)).To(Succeed()) + + preExistingClusterRoleBinding := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterRoleBinding.Name, + Labels: map[string]string{ + "app.kubernetes.io/name": "test", + }, + }, + Subjects: []rbacv1.Subject{ + { + Kind: rbacv1.ServiceAccountKind, + Name: "default", + Namespace: nsScoped.Name, + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "ClusterRole", + Name: clusterRole.Name, + }, + } + Expect(k8sClient.Create(ctx, preExistingClusterRoleBinding)).To(Succeed()) + + By("Verify pre-existing ClusterRole and ClusterRoleBinding exist") + + Eventually(clusterRole, "10s", "1s").Should(k8sFixture.ExistByName(), + "Pre-existing ClusterRole '%s' should exist before ArgoCD instance creation", clusterRole.Name) + Eventually(clusterRoleBinding, "10s", "1s").Should(k8sFixture.ExistByName(), + "Pre-existing ClusterRoleBinding '%s' should exist before ArgoCD instance creation", clusterRoleBinding.Name) + + By("Create namespace-scoped ArgoCD instance with agent") + + Expect(k8sClient.Create(ctx, argoCD)).To(Succeed()) + + By("Verify pre-existing ClusterRole and ClusterRoleBinding are deleted") + + Eventually(clusterRole, "60s", "2s").Should(k8sFixture.NotExistByName(), + "ClusterRole '%s' should be deleted for namespace-scoped ArgoCD instance", clusterRole.Name) + + Eventually(clusterRoleBinding, "60s", "2s").Should(k8sFixture.NotExistByName(), + "ClusterRoleBinding '%s' should be deleted for namespace-scoped ArgoCD instance", clusterRoleBinding.Name) + + By("Delete ArgoCD instance") + Expect(k8sClient.Delete(ctx, argoCD)).To(Succeed()) + Eventually(func() bool { + err := k8sClient.Get(ctx, client.ObjectKey{Name: argoCD.Name, Namespace: argoCD.Namespace}, argoCD) + return err != nil + }, "60s", "2s").Should(BeTrue(), "ArgoCD should be deleted") + }) }) }) diff --git a/test/openshift/e2e/ginkgo/sequential/1-053_validate_argocd_agent_principal_connected_test.go b/test/openshift/e2e/ginkgo/sequential/1-053_validate_argocd_agent_principal_connected_test.go index 63b05440cd7..2a0990e0391 100644 --- a/test/openshift/e2e/ginkgo/sequential/1-053_validate_argocd_agent_principal_connected_test.go +++ b/test/openshift/e2e/ginkgo/sequential/1-053_validate_argocd_agent_principal_connected_test.go @@ -17,8 +17,16 @@ limitations under the License. package sequential import ( + "bufio" "context" + "crypto/tls" + "encoding/json" "fmt" + "io" + "net/http" + "net/url" + "os/exec" + "strings" "time" . "github.com/onsi/ginkgo/v2" @@ -26,20 +34,24 @@ import ( "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture" appFixture "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/application" deploymentFixture "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/deployment" + k8sFixture "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/k8s" fixtureUtils "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/utils" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/argoproj/argo-cd/v3/pkg/apiclient/application" argocdv1alpha1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1" "github.com/argoproj/gitops-engine/pkg/health" argov1beta1api "github.com/argoproj-labs/argocd-operator/api/v1beta1" "github.com/argoproj-labs/argocd-operator/common" + argocdFixture "github.com/argoproj-labs/argocd-operator/tests/ginkgo/fixture/argocd" agentFixture "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/agent" ) @@ -111,7 +123,7 @@ const ( namespaceManagedAgent = "ns-hosting-managed-agent" namespaceAutonomousAgent = "ns-hosting-autonomous-agent" - // Namespaces hosting application resources in managed and autonomous clusters + // Namespaces hosting application resources in managed and autonomous clusters (e.g. this is where the deployments etc, are deployed by Argo CD) managedAgentApplicationNamespace = "ns-hosting-app-in-managed-cluster" autonomousAgentApplicationNamespace = "ns-hosting-app-in-autonomous-cluster" @@ -308,11 +320,198 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { Eventually(application, "180s", "5s").Should(appFixture.HaveHealthStatusCode(health.HealthStatusHealthy), "Application should be healthy") } + runResourceProxyTest := func(argoEndpoint string, password string, app argocdv1alpha1.Application, agentK8sClient client.Client, agentInstallNamespace string) { + + // This test is based on test/e2e/rp_test.go from argocd-agent repo + + cleanupFunc := createRBACForResourceProxyTest(agentK8sClient, agentInstallNamespace) + defer cleanupFunc() + + Eventually(func() bool { + + // Getting an existing resource belonging to the synced app through Argo's + // API must result in success. + resource, err := getResourceForResourceProxyTest(argoEndpoint, password, &app, + "apps", "v1", "Deployment", app.Spec.Destination.Namespace, "spring-petclinic") + if err != nil { + GinkgoWriter.Println("error from getResourceForResourceProxyTest", err) + return false + } + napp := &appsv1.Deployment{} + if err := json.Unmarshal([]byte(resource), napp); err != nil { + GinkgoWriter.Println("unable to unmarshal resource", err) + return false + } + return napp.Kind == "Deployment" && napp.Name == "spring-petclinic" + }, "2m", "5s").Should(BeTrue()) + + // Getting a non-existing resource must result in failure + _, err := getResourceForResourceProxyTest(argoEndpoint, password, &app, + "apps", "v1", "Deployment", app.Spec.Destination.Namespace, "spring-petclinic-backend") + Expect(err).To(HaveOccurred()) + } + + // runRedisTest is based on redis_proxy_test.go E2E test in argocd-agent + // - This test will verify argo cd resourcetree API shows child resources (e.g. pods), which is only possible if redis proxy is working as expected. + runRedisTest := func(argoEndpoint string, password string, managedAgent bool, appOnPrincipal argocdv1alpha1.Application, agentK8sClient client.Client) { + + argocdClient, sessionToken, closer, err := argocdFixture.CreateArgoCDAPIClient(context.Background(), argoEndpoint, password) + Expect(err).ToNot(HaveOccurred()) + defer closer.Close() + + closer, appClient, err := argocdClient.NewApplicationClient() + Expect(err).ToNot(HaveOccurred()) + defer closer.Close() + + cancellableContext, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + + resourceTreeURL := "https://" + argoEndpoint + "/api/v1/stream/applications/" + appOnPrincipal.Name + "/resource-tree?appNamespace=" + appOnPrincipal.Namespace + + // Wait for successful connection to resource tree event source API, on principal Argo CD + // - this allows us to stream an application's resource change events (e.g. pod created/deleted) + var msgChan chan string + Eventually(func() bool { + var err error + msgChan, err = argocdFixture.StreamFromArgoCDEventSourceURL(cancellableContext, resourceTreeURL, sessionToken) + if err != nil { + GinkgoWriter.Println("streamFromEventSource returned error:", err) + return false + } + return true + + }, 5*time.Minute, 5*time.Second).Should(BeTrue()) + + Expect(msgChan).ToNot(BeNil()) + + deploymentNamespace := autonomousAgentApplicationNamespace + if managedAgent { + deploymentNamespace = managedAgentApplicationNamespace + } + + // Find pod (deployed by Argo CD ) in agent deployment namespace + var podList corev1.PodList + Eventually(func() bool { + err := agentK8sClient.List(context.Background(), &podList, client.InNamespace(deploymentNamespace)) + if err != nil { + GinkgoWriter.Println(err) + return false + } + + numPods := len(podList.Items) + // should (only be) one pod + if numPods != 1 { + GinkgoWriter.Println("Waiting for 1 pods: ", numPods) + } + return numPods == 1 + + }, "30s", "5s").Should(BeTrue()) + + // Locate pod + var oldPod corev1.Pod + for idx := range podList.Items { + pod := podList.Items[idx] + if strings.Contains(pod.Name, "spring-petclinic") { + oldPod = pod + break + } + } + Expect(oldPod.Name).ToNot(BeEmpty()) + + // Ensure that the pod appears in the resource tree value returned by Argo CD server (this will only be true if redis proxy is working) + Eventually(func() bool { + tree, err := appClient.ResourceTree(context.Background(), &application.ResourcesQuery{ + ApplicationName: &appOnPrincipal.Name, + AppNamespace: &appOnPrincipal.Namespace, + }) + + if err != nil { + GinkgoWriter.Println("error on ResourceTree:", err) + return false + } + if tree == nil { + GinkgoWriter.Println("tree is nil") + return false + } + + for _, node := range tree.Nodes { + if node.Kind == "Pod" && node.Name == oldPod.Name { + return true + } + } + return false + }, time.Second*60, time.Second*5).Should(BeTrue()) + + // Delete pod on managed agent cluster + err = agentK8sClient.Delete(context.Background(), &oldPod) + Expect(err).ToNot(HaveOccurred()) + + // Wait for new pod to be created, to replace the old one that was deleted + var newPod corev1.Pod + Eventually(func() bool { + var podList corev1.PodList + err := agentK8sClient.List(context.Background(), &podList, client.InNamespace(deploymentNamespace)) + if err != nil { + GinkgoWriter.Println("error on list:", err) + return false + } + + for idx := range podList.Items { + pod := podList.Items[idx] + if strings.Contains(pod.Name, "spring-petclinic") && pod.Name != oldPod.Name { + newPod = pod + break + } + } + + return newPod.Name != "" + + }, time.Second*30, time.Second*5).Should(BeTrue()) + + // Verify the name of the new pod exists in what has been sent from the channel (this will only be true if redis proxy subscription is working) + Eventually(func() bool { + for { + // drain channel looking for name of new pod + select { + case msg := <-msgChan: + GinkgoWriter.Println("Processing message:", msg) + if strings.Contains(msg, newPod.Name) { + GinkgoWriter.Println("new pod name found:", newPod.Name) + return true + } + default: + return false + } + } + }, time.Second*30, time.Second*5).Should(BeTrue()) + + // Ensure that the pod appears in the new resource tree value returned by Argo CD server + tree, err := appClient.ResourceTree(context.Background(), &application.ResourcesQuery{ + ApplicationName: &appOnPrincipal.Name, + AppNamespace: &appOnPrincipal.Namespace, + Project: &appOnPrincipal.Spec.Project, + }) + Expect(err).ToNot(HaveOccurred()) + Expect(tree).ToNot(BeNil()) + + matchFound := false + for _, node := range tree.Nodes { + if node.Kind == "Pod" && node.Name == newPod.Name { + matchFound = true + break + } + } + Expect(matchFound).To(BeTrue()) + + } + // This test verifies that: // 1. A cluster-scoped ArgoCD instance with principal component enabled and a cluster-scoped ArgoCD instance // with agent component enabled are deployed in both "managed" and "autonomous" modes. // 2. Each agent successfully connects to the principal. // 3. Applications can be deployed in both modes, and are verified to be healthy and in sync. + // 4. Redis proxy can be accessed, and it contains data from child resources (e.g. pod), for both managed, and autonomous. + // 5. Resource proxy can be accessed, and it contains data from agent resources. // This validates the core connectivity and basic workflow of agent-principal architecture, including RBAC, connection, and application propagation. It("Should deploy ArgoCD principal and agent instances in both modes and verify they are working as expected", func() { @@ -334,16 +533,57 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { By("Create AppProject for autonomous agent in " + namespaceAutonomousAgent) Expect(k8sClient.Create(ctx, buildAppProjectResource(namespaceAutonomousAgent, argov1beta1api.AgentModeAutonomous))).To(Succeed()) + applicationOfManagedAgent := buildApplicationResource(applicationNameManagedAgent, + managedAgentClusterName, managedAgentClusterName, argoCDAgentInstanceNameAgent, argov1beta1api.AgentModeManaged) By("Deploy application for managed mode") - deployAndValidateApplication(buildApplicationResource(applicationNameManagedAgent, - managedAgentClusterName, managedAgentClusterName, argoCDAgentInstanceNameAgent, argov1beta1api.AgentModeManaged)) + deployAndValidateApplication(applicationOfManagedAgent) + + portForwardCleanup := portForward(namespaceAgentPrincipal, "service/argocd-hub-server", "8443:https") + cleanupFuncs = append(cleanupFuncs, portForwardCleanup) + + principalArgocdPassword := argocdFixture.GetInitialAdminSecretPassword(argoCDAgentInstanceNamePrincipal, namespaceAgentPrincipal, k8sClient) + + By("Running resource proxy test for managed") + // The principal's application is very similar to 'applicationOfManagedAgent' but in a different namespace, and with a different appproject. + // - Since the resource proxy interfaces with principal, we need to give it an Application that matches the principal-side Application + appOnPrincipal := applicationOfManagedAgent.DeepCopy() + appOnPrincipal.Namespace = managedAgentClusterName + appOnPrincipal.Spec.Project = "agent-app-project" + runResourceProxyTest("127.0.0.1:8443", principalArgocdPassword, *appOnPrincipal, k8sClient, namespaceManagedAgent) + + By("Running redis test for managed") + runRedisTest("127.0.0.1:8443", principalArgocdPassword, true, *applicationOfManagedAgent, k8sClient) + + applicationOfAutonomousAgent := buildApplicationResource(applicationNameAutonomousAgent, + namespaceAutonomousAgent, autonomousAgentClusterName, argoCDAgentInstanceNameAgent, argov1beta1api.AgentModeAutonomous) By("Deploy application for autonomous mode") - deployAndValidateApplication(buildApplicationResource(applicationNameAutonomousAgent, - namespaceAutonomousAgent, autonomousAgentClusterName, argoCDAgentInstanceNameAgent, argov1beta1api.AgentModeAutonomous)) + deployAndValidateApplication(applicationOfAutonomousAgent) + + By("Running resource proxy test for autonomous") + // The principal's application is very similar to 'applicationOfManagedAgent' but in a different namespace, and with a different appproject. + // - Since the resource proxy interfaces with principal, we need to give it an Application that matches the principal-side Application + appOnPrincipal = applicationOfAutonomousAgent.DeepCopy() + appOnPrincipal.Namespace = autonomousAgentClusterName + appOnPrincipal.Spec.Project = "autonomous-cluster-in-hub-agent-app-project" // "agent-app-project" + runResourceProxyTest("127.0.0.1:8443", principalArgocdPassword, *appOnPrincipal, k8sClient, namespaceAutonomousAgent) + + By("Running redis test for autonomous") + + // The principal's application is the same as 'applicationOfAutonomousAgent', but in a different namespace. (The spec isn't needed) + // - Since the redis proxy interfaces with principal, we need to give it an Application that matches the principal-side Application + appOnPrincipal = applicationOfAutonomousAgent.DeepCopy() + appOnPrincipal.Namespace = "autonomous-cluster-in-hub" + appOnPrincipal.Spec = argocdv1alpha1.ApplicationSpec{} + + runRedisTest("127.0.0.1:8443", principalArgocdPassword, false, *appOnPrincipal, k8sClient) + }) AfterEach(func() { + + fixture.OutputDebugOnFail(namespaceAgentPrincipal, namespaceManagedAgent, namespaceAutonomousAgent, managedAgentClusterName, autonomousAgentClusterName, managedAgentApplicationNamespace, autonomousAgentApplicationNamespace) + By("Cleanup cluster-scoped resources") _ = k8sClient.Delete(ctx, clusterRolePrincipal) _ = k8sClient.Delete(ctx, clusterRoleBindingPrincipal) @@ -678,3 +918,214 @@ func buildApplicationResource(applicationName, nsName, agentName, argocdInstance } return application } + +func portForward(namespace string, subject string, port string) func() { + + cmdArgs := []string{"kubectl", "port-forward", "-n", namespace, subject, port} + + GinkgoWriter.Println("executing command:", cmdArgs) + + // #nosec G204 + cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...) + + // Create pipes for stdout and stderr to stream output in real-time + stdout, err := cmd.StdoutPipe() + Expect(err).ToNot(HaveOccurred()) + + stderr, err := cmd.StderrPipe() + Expect(err).ToNot(HaveOccurred()) + + // Channel to signal when port-forward is ready (after seeing "Forwarding from" messages) + ready := make(chan struct{}) + + // streamOutput reads from a pipe and writes to GinkgoWriter in real-time. + // It signals readiness when it sees the expected "Forwarding from" message. + streamOutput := func(pipe io.Reader, signalReady func()) { + defer GinkgoRecover() + + // 'kubectl port-forward' will print this output indicating it has successfully started port-forwarding: + // Forwarding from 127.0.0.1:8443 -> 8080 + // Forwarding from [::1]:8443 -> 8080 + + scanner := bufio.NewScanner(pipe) + for scanner.Scan() { + line := scanner.Text() + GinkgoWriter.Println("port-forward:", line) + + // Signal ready when we see the first "Forwarding from" message + if signalReady != nil && strings.HasPrefix(line, "Forwarding from") { + signalReady() + signalReady = nil // Only signal once + } + } + if scanErr := scanner.Err(); scanErr != nil { + GinkgoWriter.Println("port-forward scanner error:", scanErr) + } + } + + // Start the command + err = cmd.Start() + Expect(err).ToNot(HaveOccurred()) + + // Stream stdout (with ready signaling) and stderr in separate goroutines + go streamOutput(stdout, func() { close(ready) }) + go streamOutput(stderr, nil) + + // Wait for the process to complete in a separate goroutine + go func() { + defer GinkgoRecover() + + err := cmd.Wait() + if err != nil && !strings.Contains(err.Error(), "killed") && !strings.Contains(err.Error(), "signal: killed") { + GinkgoWriter.Println("port-forward process error:", err) + } + }() + + // Wait for the port-forward to be ready before returning + select { + case <-ready: + GinkgoWriter.Println("port-forward is ready") + case <-time.After(60 * time.Second): + Fail("timed out waiting for port-forward to be ready") + } + + return func() { + + GinkgoWriter.Println("terminating port forward") + + if cmd.Process != nil { + err := cmd.Process.Kill() + if err != nil && !strings.Contains(err.Error(), "process already finished") { + GinkgoWriter.Println("error on process kill:", err) + } + } + } + +} + +func createRBACForResourceProxyTest(agentK8sClient client.Client, agentInstallNamespace string) func() { + ctx := context.Background() + resourceProxyClusterRole := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: "resource-proxy-e2e-test-cluster-role", + }, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{"apps"}, + Resources: []string{"deployments"}, + Verbs: []string{"get", "list", "watch"}, + }, + }, + } + resourceProxyClusterRoleGet := resourceProxyClusterRole.DeepCopy() + if err := agentK8sClient.Get(ctx, client.ObjectKeyFromObject(resourceProxyClusterRoleGet), resourceProxyClusterRoleGet); err == nil { + Expect(agentK8sClient.Delete(ctx, resourceProxyClusterRoleGet)).To(Succeed()) + } else if !apierrors.IsNotFound(err) { + Expect(err).ToNot(HaveOccurred()) + } + Expect(agentK8sClient.Create(ctx, resourceProxyClusterRole)).To(Succeed()) + + resourceProxyClusterRoleBinding := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "resource-proxy-e2e-test-cluster-role-binding", + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: "argocd-spoke-agent-agent", + Namespace: agentInstallNamespace, + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: resourceProxyClusterRole.Name, + }, + } + resourceProxyClusterRoleBindingGet := resourceProxyClusterRoleBinding.DeepCopy() + if err := agentK8sClient.Get(ctx, client.ObjectKeyFromObject(resourceProxyClusterRoleBindingGet), resourceProxyClusterRoleBindingGet); err == nil { + Expect(agentK8sClient.Delete(ctx, resourceProxyClusterRoleBindingGet)).To(Succeed()) + } else if !apierrors.IsNotFound(err) { + Expect(err).ToNot(HaveOccurred()) + } + Expect(agentK8sClient.Create(ctx, resourceProxyClusterRoleBinding)).To(Succeed()) + + return func() { + Expect(agentK8sClient.Delete(ctx, resourceProxyClusterRole)).To(Succeed()) + Eventually(resourceProxyClusterRole).Should(k8sFixture.NotExistByName()) + + Expect(agentK8sClient.Delete(ctx, resourceProxyClusterRoleBinding)).To(Succeed()) + Eventually(resourceProxyClusterRoleBinding).Should(k8sFixture.NotExistByName()) + + } +} + +func getResourceForResourceProxyTest(endpointURL string, password string, app *argocdv1alpha1.Application, group, version, kind, namespace, name string) (string, error) { + + _, sessionToken, closer, err := argocdFixture.CreateArgoCDAPIClient(context.Background(), endpointURL, password) + if err != nil { + return "", fmt.Errorf("unable to create argocd api client: %v", err) + } + defer closer.Close() + + c := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + }, + } + + reqURL := constructURLForResourceProxyTest(endpointURL, + "appNamespace", app.Namespace, + "project", app.Spec.Project, + "namespace", namespace, + "resourceName", name, + "group", group, + "version", version, + "kind", kind, + ) + reqURL.Path = fmt.Sprintf("/api/v1/applications/%s/resource", app.Name) + + GinkgoWriter.Println("resourceProxyURL:", *reqURL) + + req := http.Request{Method: http.MethodGet, URL: reqURL, Header: make(http.Header)} + + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", sessionToken)) + + resp, err := c.Do(&req) + if err != nil { + return "", err + } + defer resp.Body.Close() + if resp.StatusCode != 200 { + return "", fmt.Errorf("expected HTTP 200, got %d", resp.StatusCode) + } + type manifestResponse struct { + Manifest string `json:"manifest"` + } + manifest := &manifestResponse{} + jsonData, err := io.ReadAll(resp.Body) + if err != nil { + return "", err + } + err = json.Unmarshal(jsonData, manifest) + if err != nil { + return "", err + } + return manifest.Manifest, nil +} + +func constructURLForResourceProxyTest(endpoint string, params ...string) *url.URL { + u := &url.URL{Scheme: "https", Host: endpoint} + if len(params)%2 == 0 { + q := make(url.Values) + for i := 0; i < len(params)-1; i += 2 { + q.Add(params[i], params[i+1]) + } + u.RawQuery = q.Encode() + } else if len(params) != 0 { + panic("params must be given in pairs") + } + return u +}