diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml new file mode 100644 index 0000000..aad6236 --- /dev/null +++ b/.github/workflows/unit-tests.yml @@ -0,0 +1,48 @@ +name: Unit Tests +permissions: + contents: read + +on: + push: + +jobs: + unit-tests: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v5 + + - name: Set up Go + uses: actions/setup-go@v6 + with: + go-version: "1.24" + + - name: Create fake kube config + run: | + mkdir -p /home/runner/.kube + cat < /home/runner/.kube/config + apiVersion: v1 + kind: Config + preferences: {} + clusters: + - cluster: + server: https://fake-server + certificate-authority-data: FAKE + name: fake-cluster + contexts: + - context: + cluster: fake-cluster + user: fake-user + name: fake-context + current-context: fake-context + users: + - name: fake-user + user: + token: FAKE + EOF + - name: Run Unit Tests + run: | + go clean -testcache && go test -v ./... -coverprofile=coverage.out + go tool cover -func=coverage.out + go tool cover -html=coverage.out -o coverage.html diff --git a/Makefile b/Makefile index a9ec57f..62d0509 100644 --- a/Makefile +++ b/Makefile @@ -6,4 +6,12 @@ nginx-utils: docker buildx build --build-context project=nginx-utils --platform linux/amd64 -t nginx-utils -f nginx-utils/Dockerfile . install: build - sudo cp cmd/kubectl-nginx_supportpkg /usr/local/bin \ No newline at end of file + sudo cp cmd/kubectl-nginx_supportpkg /usr/local/bin + +clean: + rm -f cmd/kubectl-nginx_supportpkg + +test: + go clean -testcache && go test -v ./... -coverprofile=coverage.out + go tool cover -func=coverage.out + go tool cover -html=coverage.out -o coverage.html \ No newline at end of file diff --git a/go.mod b/go.mod index 532f2ba..064ae64 100644 --- a/go.mod +++ b/go.mod @@ -3,8 +3,11 @@ module github.com/nginxinc/nginx-k8s-supportpkg go 1.24.3 require ( - github.com/mittwald/go-helm-client v0.12.17 + github.com/mittwald/go-helm-client v0.12.18 github.com/spf13/cobra v1.10.1 + github.com/stretchr/testify v1.10.0 + go.uber.org/mock v0.5.0 + helm.sh/helm/v3 v3.18.5 k8s.io/client-go v0.34.0 ) @@ -90,7 +93,6 @@ require ( google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 // indirect google.golang.org/grpc v1.72.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect - helm.sh/helm/v3 v3.18.5 // indirect k8s.io/apiserver v0.34.0 // indirect k8s.io/cli-runtime v0.33.3 // indirect k8s.io/component-base v0.34.0 // indirect @@ -136,5 +138,5 @@ require ( k8s.io/metrics v0.33.3 k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect - sigs.k8s.io/yaml v1.6.0 // indirect + sigs.k8s.io/yaml v1.6.0 ) diff --git a/go.sum b/go.sum index 350dcc2..7c11316 100644 --- a/go.sum +++ b/go.sum @@ -189,8 +189,8 @@ github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQ github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mittwald/go-helm-client v0.12.17 h1:PncoE1u3fXuHWLineNDQ4hI5J4uVbMW3JWrtdBR86TI= -github.com/mittwald/go-helm-client v0.12.17/go.mod h1:GQxuPspUcMsxWWDtYzjRdxOAjh3LKADIfgqtUf9mjHk= +github.com/mittwald/go-helm-client v0.12.18 h1:i9cJNv/YC3ZPKUKVNYTlrOO7ZO6YFKE/ak3J5TeYHPU= +github.com/mittwald/go-helm-client v0.12.18/go.mod h1:dLl5NkdKCvwKvLIdZzg4MDbxhSKmuimdmM3WpsAzS0I= github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= @@ -326,6 +326,8 @@ go.opentelemetry.io/proto/otlp v1.6.0 h1:jQjP+AQyTf+Fe7OKj/MfkDrmK4MNVtw2NpXsf9f go.opentelemetry.io/proto/otlp v1.6.0/go.mod h1:cicgGehlFuNdgZkcALOCh3VE6K/u2tAjzlRhDwmVpZc= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= +go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= diff --git a/pkg/data_collector/data_collector.go b/pkg/data_collector/data_collector.go index 8fbec5a..51c2d86 100644 --- a/pkg/data_collector/data_collector.go +++ b/pkg/data_collector/data_collector.go @@ -35,6 +35,7 @@ import ( helmClient "github.com/mittwald/go-helm-client" "github.com/nginxinc/nginx-k8s-supportpkg/pkg/crds" corev1 "k8s.io/api/core/v1" + apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" crdClient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -53,12 +54,14 @@ type DataCollector struct { Logger *log.Logger LogFile *os.File K8sRestConfig *rest.Config - K8sCoreClientSet *kubernetes.Clientset - K8sCrdClientSet *crdClient.Clientset - K8sMetricsClientSet *metricsClient.Clientset + K8sCoreClientSet kubernetes.Interface + K8sCrdClientSet apiextensionsclientset.Interface + K8sMetricsClientSet metricsClient.Interface K8sHelmClientSet map[string]helmClient.Client ExcludeDBData bool ExcludeTimeSeriesData bool + PodExecutor func(namespace, pod, container string, command []string, ctx context.Context) ([]byte, error) + QueryCRD func(crd crds.Crd, namespace string, ctx context.Context) ([]byte, error) } type Manifest struct { @@ -148,6 +151,8 @@ func NewDataCollector(collector *DataCollector) error { collector.LogFile = logFile collector.Logger = log.New(logFile, "", log.LstdFlags|log.LUTC|log.Lmicroseconds|log.Lshortfile) collector.K8sHelmClientSet = make(map[string]helmClient.Client) + collector.PodExecutor = collector.RealPodExecutor + collector.QueryCRD = collector.RealQueryCRD //Initialize clients collector.K8sRestConfig = config @@ -260,7 +265,7 @@ func (c *DataCollector) WrapUp(product string) (string, error) { return tarballName, nil } -func (c *DataCollector) PodExecutor(namespace string, pod string, container string, command []string, ctx context.Context) ([]byte, error) { +func (c *DataCollector) RealPodExecutor(namespace string, pod string, container string, command []string, ctx context.Context) ([]byte, error) { req := c.K8sCoreClientSet.CoreV1().RESTClient().Post(). Namespace(namespace). Resource("pods"). @@ -293,7 +298,7 @@ func (c *DataCollector) PodExecutor(namespace string, pod string, container stri } } -func (c *DataCollector) QueryCRD(crd crds.Crd, namespace string, ctx context.Context) ([]byte, error) { +func (c *DataCollector) RealQueryCRD(crd crds.Crd, namespace string, ctx context.Context) ([]byte, error) { schemeGroupVersion := schema.GroupVersion{Group: crd.Group, Version: crd.Version} negotiatedSerializer := scheme.Codecs.WithoutConversion() diff --git a/pkg/data_collector/data_collector_test.go b/pkg/data_collector/data_collector_test.go new file mode 100644 index 0000000..0dbe711 --- /dev/null +++ b/pkg/data_collector/data_collector_test.go @@ -0,0 +1,133 @@ +package data_collector + +import ( + "bytes" + "context" + "io" + "log" + "os" + "path/filepath" + "testing" + + "github.com/nginxinc/nginx-k8s-supportpkg/pkg/crds" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/rest" +) + +func TestNewDataCollector_Success(t *testing.T) { + dc := &DataCollector{Namespaces: []string{"default"}} + err := NewDataCollector(dc) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + if dc.BaseDir == "" { + t.Error("BaseDir should be set") + } + if dc.Logger == nil { + t.Error("Logger should be set") + } + if dc.LogFile == nil { + t.Error("LogFile should be set") + } + if dc.K8sCoreClientSet == nil { + t.Error("K8sCoreClientSet should be set") + } + if dc.K8sCrdClientSet == nil { + t.Error("K8sCrdClientSet should be set") + } + if dc.K8sMetricsClientSet == nil { + t.Error("K8sMetricsClientSet should be set") + } + if dc.K8sHelmClientSet == nil { + t.Error("K8sHelmClientSet should be set") + } +} + +func TestWrapUp_CreatesTarball(t *testing.T) { + tmpDir := t.TempDir() + logFile, _ := os.Create(filepath.Join(tmpDir, "supportpkg.log")) + dc := &DataCollector{ + BaseDir: tmpDir, + LogFile: logFile, + Logger: log.New(io.Discard, "", 0), + } + product := "nginx" + tarball, err := dc.WrapUp(product) + if err != nil { + t.Fatalf("WrapUp failed: %v", err) + } + if _, err := os.Stat(tarball); err != nil { + t.Errorf("tarball not created: %v", err) + } + _ = os.Remove(tarball) +} + +func TestRealPodExecutor_ReturnsOutput(t *testing.T) { + dc := &DataCollector{ + K8sCoreClientSet: fake.NewClientset(), + K8sRestConfig: &rest.Config{}, + } + // Replace RealPodExecutor with a mock for testing + dc.PodExecutor = func(namespace, pod, container string, command []string, ctx context.Context) ([]byte, error) { + return []byte("output"), nil + } + out, err := dc.PodExecutor("default", "pod", "container", []string{"echo", "hello"}, context.TODO()) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + if !bytes.Contains(out, []byte("output")) { + t.Errorf("expected output, got %s", string(out)) + } +} + +func TestRealQueryCRD_ReturnsErrorOnInvalidConfig(t *testing.T) { + dc := &DataCollector{ + K8sRestConfig: &rest.Config{}, + } + crd := crds.Crd{Group: "test", Version: "v1", Resource: "foos"} + _, err := dc.RealQueryCRD(crd, "default", context.TODO()) + if err == nil { + t.Error("expected error for invalid config") + } +} + +func TestAllNamespacesExist_AllExist(t *testing.T) { + client := fake.NewClientset(&corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "default"}}) + dc := &DataCollector{ + Namespaces: []string{"default"}, + K8sCoreClientSet: client, + Logger: log.New(io.Discard, "", 0), + } + if !dc.AllNamespacesExist() { + t.Error("expected all namespaces to exist") + } +} + +func TestAllNamespacesExist_NotExist(t *testing.T) { + client := fake.NewClientset() + dc := &DataCollector{ + Namespaces: []string{"missing"}, + K8sCoreClientSet: client, + Logger: log.New(io.Discard, "", 0), + } + if dc.AllNamespacesExist() { + t.Error("expected namespaces to not exist") + } +} + +func TestWrapUp_ErrorOnLogFileClose(t *testing.T) { + tmpDir := t.TempDir() + logFile, _ := os.Create(filepath.Join(tmpDir, "supportpkg.log")) + logFile.Close() // Already closed + dc := &DataCollector{ + BaseDir: tmpDir, + LogFile: logFile, + Logger: log.New(io.Discard, "", 0), + } + _, err := dc.WrapUp("nginx") + if err == nil { + t.Error("expected error on closing already closed log file") + } +} diff --git a/pkg/jobs/common_job_list.go b/pkg/jobs/common_job_list.go index 6b6ce23..30e9501 100644 --- a/pkg/jobs/common_job_list.go +++ b/pkg/jobs/common_job_list.go @@ -146,7 +146,14 @@ func CommonJobList() []Job { Execute: func(dc *data_collector.DataCollector, ctx context.Context, ch chan JobResult) { jobResult := JobResult{Files: make(map[string][]byte), Error: nil} for _, namespace := range dc.Namespaces { - result, err := dc.K8sCoreClientSet.DiscoveryClient.ServerPreferredResources() + discoveryClient, ok := dc.K8sCoreClientSet.Discovery().(interface { + ServerPreferredResources() ([]*metav1.APIResourceList, error) + }) + if !ok { + dc.Logger.Printf("\tDiscovery() does not implement ServerPreferredResources for namespace %s\n", namespace) + continue + } + result, err := discoveryClient.ServerPreferredResources() if err != nil { dc.Logger.Printf("\tCould not retrieve API resources list %s: %v\n", namespace, err) } else { @@ -163,7 +170,7 @@ func CommonJobList() []Job { Execute: func(dc *data_collector.DataCollector, ctx context.Context, ch chan JobResult) { jobResult := JobResult{Files: make(map[string][]byte), Error: nil} for _, namespace := range dc.Namespaces { - result, err := dc.K8sCoreClientSet.DiscoveryClient.ServerGroups() + result, err := dc.K8sCoreClientSet.Discovery().ServerGroups() if err != nil { dc.Logger.Printf("\tCould not retrieve API versions list %s: %v\n", namespace, err) } else { @@ -367,7 +374,7 @@ func CommonJobList() []Job { Timeout: time.Second * 10, Execute: func(dc *data_collector.DataCollector, ctx context.Context, ch chan JobResult) { jobResult := JobResult{Files: make(map[string][]byte), Error: nil} - result, err := dc.K8sCoreClientSet.ServerVersion() + result, err := dc.K8sCoreClientSet.Discovery().ServerVersion() if err != nil { dc.Logger.Printf("\tCould not retrieve server version: %v\n", err) } else { diff --git a/pkg/jobs/common_job_list_test.go b/pkg/jobs/common_job_list_test.go new file mode 100644 index 0000000..3bb9a1b --- /dev/null +++ b/pkg/jobs/common_job_list_test.go @@ -0,0 +1,514 @@ +package jobs + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "log" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/nginxinc/nginx-k8s-supportpkg/pkg/data_collector" + "github.com/nginxinc/nginx-k8s-supportpkg/pkg/mock" + "github.com/stretchr/testify/assert" + "go.uber.org/mock/gomock" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + k8stesting "k8s.io/client-go/testing" // Add this line +) + +func TestCommonJobList_SelectedJobsProduceFiles(t *testing.T) { + dc := mock.SetupMockDataCollector(t) + jobList := CommonJobList() + + for _, job := range jobList { + + ch := make(chan JobResult, 1) + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + go job.Execute(dc, ctx, ch) + + select { + case res := <-ch: + if res.Error != nil { + t.Fatalf("job %s returned unexpected error: %v", job.Name, res.Error) + } + if len(res.Files) == 0 { + t.Fatalf("job %s produced no files", job.Name) + } + // Basic path sanity + non-empty content + for path, content := range res.Files { + if len(content) == 0 { + t.Fatalf("job %s file %s has empty content", job.Name, path) + } + if !strings.HasPrefix(filepath.ToSlash(path), filepath.ToSlash(dc.BaseDir)) { + t.Fatalf("job %s file path %s does not start with basedir %s", job.Name, path, dc.BaseDir) + } + } + case <-ctx.Done(): + t.Fatalf("job %s timed out", job.Name) + } + } +} + +func TestCommonJobList_PodListJSONKeyPresence(t *testing.T) { + dc := mock.SetupMockDataCollector(t) + var podListJob *Job + jobs := CommonJobList() + for i, j := range jobs { + if j.Name == "pod-list" { + podListJob = &jobs[i] + break + } + } + if podListJob == nil { + t.Fatalf("pod-list job not found") + } + + ch := make(chan JobResult, 1) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + go podListJob.Execute(dc, ctx, ch) + + res := <-ch + if res.Error != nil { + t.Fatalf("pod-list job returned error: %v", res.Error) + } + if len(res.Files) != 1 { + t.Fatalf("expected 1 file from pod-list job, got %d", len(res.Files)) + } + for path, content := range res.Files { + if filepath.Base(path) != "pods.json" { + t.Fatalf("expected pods.json file, got %s", path) + } + // Quick check JSON starts with '{' (marshaled list) or '[' depending on structure (PodList marshals to object) + if len(content) == 0 || content[0] != '{' { + t.Fatalf("unexpected JSON content in %s", path) + } + } +} + +func TestCommonJobList_PodListError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Create mock clients + mockClient := fake.NewClientset() + + // Add a reactor that returns an error for pod list operations + mockClient.PrependReactor("list", "pods", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, fmt.Errorf("mock API error: pods not available") + }) + + // Setup data collector with error-prone client + tmpDir := t.TempDir() + var logOutput bytes.Buffer + + dc := &data_collector.DataCollector{ + BaseDir: tmpDir, + Namespaces: []string{"default", "test-namespace"}, + Logger: log.New(&logOutput, "", 0), + K8sCoreClientSet: mockClient, + } + + // Execute the pod-list job + var podJob []Job + jobs := CommonJobList() + for _, job := range jobs { + if job.Name == "pod-list" || job.Name == "collect-pods-logs" { + podJob = append(podJob, job) + } + } + // podJob := jobs[0] // First job is pod-list + + ctx := context.Background() + ch := make(chan JobResult, 1) + for _, job := range podJob { + job.Execute(dc, ctx, ch) + + result := <-ch + + // Assertions + logContent := logOutput.String() + assert.Contains(t, logContent, "Could not retrieve pod list for namespace default") + assert.Contains(t, logContent, "Could not retrieve pod list for namespace test-namespace") + assert.Contains(t, logContent, "mock API error: pods not available") + + // No files should be created since API calls failed + assert.Empty(t, result.Files) + assert.Nil(t, result.Error) // The job itself doesn't fail, just logs errors + } +} + +func TestCommonJobList_FileCreation(t *testing.T) { + tests := []struct { + name string + jobName string + jobIndex int + expectedFiles []string + setupMockObjects func() []runtime.Object + verifyFileContent func(t *testing.T, files map[string][]byte, tmpDir string) + }{ + { + name: "pod-list creates pods.json files", + jobName: "pod-list", + jobIndex: 0, + expectedFiles: []string{ + "resources/default/pods.json", + "resources/test-ns/pods.json", + }, + setupMockObjects: func() []runtime.Object { + return []runtime.Object{ + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "test-pod", Namespace: "default"}, + }, + } + }, + verifyFileContent: func(t *testing.T, files map[string][]byte, tmpDir string) { + // Verify JSON structure + for path, content := range files { + if strings.Contains(path, "pods.json") { + var podList corev1.PodList + err := json.Unmarshal(content, &podList) + assert.NoError(t, err, "Should be valid JSON") + assert.GreaterOrEqual(t, len(podList.Items), 0, "Should contain pod items") + } + } + }, + }, + // Add more test cases for other jobs... + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tmpDir := t.TempDir() + var logOutput bytes.Buffer + + // Setup mock objects + mockObjects := tt.setupMockObjects() + client := fake.NewSimpleClientset(mockObjects...) + + dc := &data_collector.DataCollector{ + BaseDir: tmpDir, + Namespaces: []string{"default", "test-ns"}, + Logger: log.New(&logOutput, "", 0), + K8sCoreClientSet: client, + } + + // Execute the specific job + jobs := CommonJobList() + job := jobs[tt.jobIndex] + assert.Equal(t, tt.jobName, job.Name) + + ctx := context.Background() + ch := make(chan JobResult, 1) + job.Execute(dc, ctx, ch) + + result := <-ch + + // Verify expected number of files + assert.Len(t, result.Files, len(tt.expectedFiles), + "Should create expected number of files") + + // Verify each expected file exists + for _, expectedFile := range tt.expectedFiles { + expectedPath := filepath.Join(tmpDir, expectedFile) + content, exists := result.Files[expectedPath] + assert.True(t, exists, "Expected file should exist: %s", expectedFile) + assert.NotEmpty(t, content, "File content should not be empty: %s", expectedFile) + } + + // Custom content verification + if tt.verifyFileContent != nil { + tt.verifyFileContent(t, result.Files, tmpDir) + } + + // Verify no errors for successful operations + if len(tt.expectedFiles) > 0 { + assert.Nil(t, result.Error, "Should not have errors for successful execution") + } + }) + } +} + +func TestCommonJobList_ResourceListJobs(t *testing.T) { + resourceTests := []struct { + jobName string + jobIndex int + resourceType string + fileName string + setupObjects func() []runtime.Object + }{ + { + jobName: "pod-list", + jobIndex: 0, + resourceType: "pods", + fileName: "pods.json", + setupObjects: func() []runtime.Object { + return []runtime.Object{ + &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-pod", Namespace: "default"}}, + } + }, + }, + { + jobName: "service-list", + jobIndex: 9, + resourceType: "services", + fileName: "services.json", + setupObjects: func() []runtime.Object { + return []runtime.Object{ + &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-svc", Namespace: "default"}}, + } + }, + }, + { + jobName: "configmap-list", + jobIndex: 8, + resourceType: "configmaps", + fileName: "configmaps.json", + setupObjects: func() []runtime.Object { + return []runtime.Object{ + &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-cm", Namespace: "default"}}, + } + }, + }, + } + + for _, tt := range resourceTests { + t.Run(tt.jobName, func(t *testing.T) { + tmpDir := t.TempDir() + var logOutput bytes.Buffer + + client := fake.NewSimpleClientset(tt.setupObjects()...) + + dc := &data_collector.DataCollector{ + BaseDir: tmpDir, + Namespaces: []string{"default"}, + Logger: log.New(&logOutput, "", 0), + K8sCoreClientSet: client, + } + + jobs := CommonJobList() + job := jobs[tt.jobIndex] + + ctx := context.Background() + ch := make(chan JobResult, 1) + job.Execute(dc, ctx, ch) + + result := <-ch + + // Verify file creation + expectedPath := filepath.Join(tmpDir, "resources/default", tt.fileName) + content, exists := result.Files[expectedPath] + assert.True(t, exists, "Expected %s file should exist", tt.fileName) + assert.NotEmpty(t, content, "File content should not be empty") + + // Verify JSON structure + var jsonData map[string]interface{} + err := json.Unmarshal(content, &jsonData) + assert.NoError(t, err, "Content should be valid JSON") + assert.Contains(t, jsonData, "items", "Should contain 'items' field") + }) + } +} + +func TestCommonJobList_CollectPodsLogs_FileCreation(t *testing.T) { + tmpDir := t.TempDir() + var logOutput bytes.Buffer + + testPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-pod", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "nginx", Image: "nginx:latest"}, + {Name: "sidecar", Image: "busybox:latest"}, + }, + }, + } + + client := fake.NewSimpleClientset(testPod) + + dc := &data_collector.DataCollector{ + BaseDir: tmpDir, + Namespaces: []string{"default"}, + Logger: log.New(&logOutput, "", 0), + K8sCoreClientSet: client, + } + + jobs := CommonJobList() + collectLogsJob := jobs[1] // collect-pods-logs + + ctx := context.Background() + ch := make(chan JobResult, 1) + collectLogsJob.Execute(dc, ctx, ch) + + result := <-ch + + // Expected log files (will fail with fake client, but we can verify the attempt) + expectedLogFiles := []string{ + "logs/default/nginx-pod__nginx.txt", + "logs/default/nginx-pod__sidecar.txt", + } + + // Verify expected file paths would be constructed correctly + for _, expectedFile := range expectedLogFiles { + expectedFullPath := filepath.Join(tmpDir, expectedFile) + _, exists := result.Files[expectedFullPath] + assert.True(t, exists, "Expected file should exist: %s", expectedFile) + } +} + +func TestCommonJobList_ClusterLevelJobs(t *testing.T) { + clusterTests := []struct { + name string + jobName string + jobIndex int + expectedFile string + }{ + { + name: "k8s-version creates version.json", + jobName: "k8s-version", + jobIndex: 18, + expectedFile: "k8s/version.json", + }, + { + name: "clusterroles-info creates clusterroles.json", + jobName: "clusterroles-info", + jobIndex: 20, + expectedFile: "k8s/rbac/clusterroles.json", + }, + { + name: "nodes-info creates nodes.json and platform_info.json", + jobName: "nodes-info", + jobIndex: 22, + expectedFile: "k8s/nodes.json", + }, + } + + for _, tt := range clusterTests { + t.Run(tt.name, func(t *testing.T) { + tmpDir := t.TempDir() + var logOutput bytes.Buffer + + // Setup mock objects based on job type + var mockObjects []runtime.Object + if tt.jobName == "nodes-info" { + mockObjects = []runtime.Object{ + &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node", + Labels: map[string]string{ + "node-role.kubernetes.io/control-plane": "", + }, + }, + Status: corev1.NodeStatus{ + NodeInfo: corev1.NodeSystemInfo{ + OSImage: "Ubuntu 20.04", + OperatingSystem: "linux", + Architecture: "amd64", + }, + }, + }, + } + } + + client := fake.NewSimpleClientset(mockObjects...) + + dc := &data_collector.DataCollector{ + BaseDir: tmpDir, + Namespaces: []string{"default"}, + Logger: log.New(&logOutput, "", 0), + K8sCoreClientSet: client, + } + + jobs := CommonJobList() + job := jobs[tt.jobIndex] + assert.Equal(t, tt.jobName, job.Name) + + ctx := context.Background() + ch := make(chan JobResult, 1) + job.Execute(dc, ctx, ch) + + result := <-ch + + // Verify main file creation + expectedPath := filepath.Join(tmpDir, tt.expectedFile) + content, exists := result.Files[expectedPath] + assert.True(t, exists, "Expected file should exist: %s", tt.expectedFile) + assert.NotEmpty(t, content, "File content should not be empty") + + // Special case for nodes-info which creates additional platform_info.json + if tt.jobName == "nodes-info" { + platformInfoPath := filepath.Join(tmpDir, "platform_info.json") + platformContent, platformExists := result.Files[platformInfoPath] + assert.True(t, platformExists, "platform_info.json should exist") + assert.NotEmpty(t, platformContent, "Platform info should not be empty") + + // Verify platform info structure + var platformInfo data_collector.PlatformInfo + err := json.Unmarshal(platformContent, &platformInfo) + assert.NoError(t, err, "Platform info should be valid JSON") + assert.NotEmpty(t, platformInfo.PlatformType, "Platform type should be set") + assert.NotEmpty(t, platformInfo.Hostname, "Hostname should be set") + } + }) + } +} + +func TestCommonJobList_AllJobsFileCreation(t *testing.T) { + dc := mock.SetupMockDataCollector(t) + jobs := CommonJobList() + + // Track all created files across all jobs + allFiles := make(map[string][]byte) + + for i, job := range jobs { + t.Run(fmt.Sprintf("job_%d_%s", i, job.Name), func(t *testing.T) { + ctx := context.Background() + ch := make(chan JobResult, 1) + job.Execute(dc, ctx, ch) + + result := <-ch + + // Collect files from this job + for path, content := range result.Files { + allFiles[path] = content + } + + // Verify files are within base directory + for filePath := range result.Files { + assert.True(t, strings.HasPrefix(filePath, dc.BaseDir), + "File should be within base directory: %s", filePath) + } + }) + } + + // Verify overall file structure + t.Run("verify_overall_structure", func(t *testing.T) { + // Check that we have files in expected directories + expectedDirs := []string{"resources", "k8s", "k8s/rbac"} + + for _, expectedDir := range expectedDirs { + found := false + for filePath := range allFiles { + if strings.Contains(filePath, expectedDir) { + found = true + break + } + } + assert.True(t, found, "Should have files in directory: %s", expectedDir) + } + + // Verify minimum number of files created + assert.Greater(t, len(allFiles), 10, "Should create multiple files across all jobs") + }) +} diff --git a/pkg/jobs/job_test.go b/pkg/jobs/job_test.go new file mode 100644 index 0000000..b1aeda7 --- /dev/null +++ b/pkg/jobs/job_test.go @@ -0,0 +1,102 @@ +package jobs + +import ( + "context" + "errors" + "os" + "path/filepath" + "testing" + "time" + + "github.com/nginxinc/nginx-k8s-supportpkg/pkg/data_collector" + "github.com/nginxinc/nginx-k8s-supportpkg/pkg/mock" +) + +// Test successful job execution and file writing +func TestJobCollect_Success(t *testing.T) { + dc := mock.SetupMockDataCollector(t) + job := Job{ + Name: "test-job", + Timeout: time.Second, + Execute: func(dc *data_collector.DataCollector, ctx context.Context, ch chan JobResult) { + files := map[string][]byte{ + filepath.Join(dc.BaseDir, "output.txt"): []byte("hello world"), + } + ch <- JobResult{Files: files} + }, + } + + err, skipped, _ := job.Collect(dc) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + if skipped { + t.Fatalf("expected not skipped") + } + // Check file was written + content, err := os.ReadFile(filepath.Join(dc.BaseDir, "output.txt")) + if err != nil { + t.Fatalf("file not written: %v", err) + } + if string(content) != "hello world" { + t.Fatalf("unexpected file content: %s", string(content)) + } +} + +// Test job skipped scenario +func TestJobCollect_Skipped(t *testing.T) { + dc := mock.SetupMockDataCollector(t) + job := Job{ + Name: "skip-job", + Timeout: time.Second, + Execute: func(dc *data_collector.DataCollector, ctx context.Context, ch chan JobResult) { + ch <- JobResult{Skipped: true} + }, + } + err, skipped, _ := job.Collect(dc) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + if !skipped { + t.Fatalf("expected skipped") + } +} + +// Test job error scenario +func TestJobCollect_Error(t *testing.T) { + dc := mock.SetupMockDataCollector(t) + job := Job{ + Name: "error-job", + Timeout: time.Second, + Execute: func(dc *data_collector.DataCollector, ctx context.Context, ch chan JobResult) { + ch <- JobResult{Error: errors.New("fail")} + }, + } + err, skipped, _ := job.Collect(dc) + if err == nil || err.Error() != "fail" { + t.Fatalf("expected error 'fail', got %v", err) + } + if skipped { + t.Fatalf("expected not skipped") + } +} + +// Test job timeout scenario +func TestJobCollect_Timeout(t *testing.T) { + dc := mock.SetupMockDataCollector(t) + job := Job{ + Name: "timeout-job", + Timeout: time.Millisecond * 10, + Execute: func(dc *data_collector.DataCollector, ctx context.Context, ch chan JobResult) { + time.Sleep(time.Second) + ch <- JobResult{} + }, + } + err, skipped, _ := job.Collect(dc) + if err == nil { + t.Fatalf("expected timeout error, got nil") + } + if skipped { + t.Fatalf("expected not skipped") + } +} diff --git a/pkg/jobs/ngf_job_list_test.go b/pkg/jobs/ngf_job_list_test.go new file mode 100644 index 0000000..0cec5f1 --- /dev/null +++ b/pkg/jobs/ngf_job_list_test.go @@ -0,0 +1,721 @@ +package jobs + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "log" + "path/filepath" + "strings" + "testing" + + "github.com/nginxinc/nginx-k8s-supportpkg/pkg/crds" + "github.com/nginxinc/nginx-k8s-supportpkg/pkg/data_collector" + "github.com/nginxinc/nginx-k8s-supportpkg/pkg/mock" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + k8stesting "k8s.io/client-go/testing" +) + +func TestNGFJobList(t *testing.T) { + jobs := NGFJobList() + if len(jobs) == 0 { + t.Error("expected jobs to be returned") + } + + expectedJobs := []string{"exec-nginx-gateway-version", "exec-nginx-t", "crd-objects"} + if len(jobs) != len(expectedJobs) { + t.Errorf("expected %d jobs, got %d", len(expectedJobs), len(jobs)) + } + + for i, job := range jobs { + if job.Name != expectedJobs[i] { + t.Errorf("expected job name %s, got %s", expectedJobs[i], job.Name) + } + if job.Execute == nil { + t.Errorf("job %s should have Execute function", job.Name) + } + if job.Timeout == 0 { + t.Errorf("job %s should have timeout set", job.Name) + } + } +} + +func TestNGFJobExecNginxGatewayVersion(t *testing.T) { + client := fake.NewClientset(&corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-gateway-test-pod", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "nginx-gateway"}, + }, + }, + }) + + dc := mock.SetupMockDataCollector(t) + dc.K8sCoreClientSet = client + dc.PodExecutor = func(namespace, pod, container string, command []string, ctx context.Context) ([]byte, error) { + return []byte("gateway version output"), nil + } + + jobs := NGFJobList() + var versionJob Job + for _, job := range jobs { + if job.Name == "exec-nginx-gateway-version" { + versionJob = job + break + } + } + + ch := make(chan JobResult, 1) + ctx := context.Background() + + versionJob.Execute(dc, ctx, ch) + result := <-ch + + if result.Error != nil { + t.Errorf("expected no error, got %v", result.Error) + } + + if len(result.Files) == 0 { + t.Error("expected files to be created") + } + + found := false + for filename := range result.Files { + if strings.Contains(filename, "nginx-gateway-version.txt") { + found = true + break + } + } + if !found { + t.Error("expected nginx-gateway-version.txt file to be created") + } +} + +func TestNGFJobExecNginxT(t *testing.T) { + client := fake.NewClientset(&corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-gateway-test-pod", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "nginx"}, + }, + }, + }) + + dc := mock.SetupMockDataCollector(t) + dc.Namespaces = []string{"default"} + dc.K8sCoreClientSet = client + dc.PodExecutor = func(namespace, pod, container string, command []string, ctx context.Context) ([]byte, error) { + return []byte("nginx -t output"), nil + } + dc.Logger = log.New(io.Discard, "", 0) + + jobs := NGFJobList() + var tJob Job + for _, job := range jobs { + if job.Name == "exec-nginx-t" { + tJob = job + break + } + } + + ch := make(chan JobResult, 1) + ctx := context.Background() + + tJob.Execute(dc, ctx, ch) + result := <-ch + + if result.Error != nil { + t.Errorf("expected no error, got %v", result.Error) + } + + if len(result.Files) == 0 { + t.Error("expected files to be created") + } + + found := false + for filename := range result.Files { + if strings.Contains(filename, "nginx-t.txt") { + found = true + break + } + } + if !found { + t.Error("expected nginx-t.txt file to be created") + } +} + +func TestNGFJobList_ExecNginxGatewayVersion_PodListError(t *testing.T) { + tmpDir := t.TempDir() + var logOutput bytes.Buffer + + // Create a fake client that will return an error for pod listing + client := fake.NewClientset() + client.PrependReactor("list", "pods", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, fmt.Errorf("failed to retrieve pod list") + }) + + dc := &data_collector.DataCollector{ + BaseDir: tmpDir, + Namespaces: []string{"default", "nginx-gateway"}, + Logger: log.New(&logOutput, "", 0), + K8sCoreClientSet: client, + PodExecutor: func(namespace, podName, containerName string, command []string, ctx context.Context) ([]byte, error) { + return []byte("mock output"), nil + }, + } + + // Get the exec-nginx-gateway-version job + jobs := NGFJobList() + var execJob Job + for _, job := range jobs { + if job.Name == "exec-nginx-gateway-version" { + execJob = job + break + } + } + + // Execute the job + ctx := context.Background() + ch := make(chan JobResult, 1) + execJob.Execute(dc, ctx, ch) + + result := <-ch + logContent := logOutput.String() + + // Verify the error was logged for each namespace + assert.Contains(t, logContent, "Could not retrieve pod list for namespace default: failed to retrieve pod list") + assert.Contains(t, logContent, "Could not retrieve pod list for namespace nginx-gateway: failed to retrieve pod list") + + // Verify no files were created since pod listing failed + assert.Empty(t, result.Files, "No files should be created when pod list fails") + assert.Nil(t, result.Error, "Job should not fail, just log the error") +} + +func TestNGFJobList_ExecNginxT_PodListError(t *testing.T) { + tmpDir := t.TempDir() + var logOutput bytes.Buffer + + // Create a fake client that will return an error for pod listing + client := fake.NewClientset() + client.PrependReactor("list", "pods", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, fmt.Errorf("pod list API error") + }) + + dc := &data_collector.DataCollector{ + BaseDir: tmpDir, + Namespaces: []string{"test-namespace"}, + Logger: log.New(&logOutput, "", 0), + K8sCoreClientSet: client, + PodExecutor: func(namespace, podName, containerName string, command []string, ctx context.Context) ([]byte, error) { + return []byte("mock nginx config"), nil + }, + } + + // Get the exec-nginx-t job + jobs := NGFJobList() + var execJob Job + for _, job := range jobs { + if job.Name == "exec-nginx-t" { + execJob = job + break + } + } + + // Execute the job + ctx := context.Background() + ch := make(chan JobResult, 1) + execJob.Execute(dc, ctx, ch) + + result := <-ch + logContent := logOutput.String() + + // Verify the error was logged + assert.Contains(t, logContent, "Could not retrieve pod list for namespace test-namespace: pod list API error") + assert.Empty(t, result.Files, "No files should be created when pod list fails") + assert.Nil(t, result.Error, "Job should not fail, just log the error") +} + +func TestNGFJobList_MultipleNamespaces_PodListErrors(t *testing.T) { + tmpDir := t.TempDir() + var logOutput bytes.Buffer + + // Create a fake client that returns different errors for different namespaces + client := fake.NewClientset() + client.PrependReactor("list", "pods", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + listAction := action.(k8stesting.ListAction) + namespace := listAction.GetNamespace() + + switch namespace { + case "error-ns1": + return true, nil, fmt.Errorf("network timeout") + case "error-ns2": + return true, nil, fmt.Errorf("permission denied") + default: + // Let other namespaces succeed + return false, nil, nil + } + }) + + dc := &data_collector.DataCollector{ + BaseDir: tmpDir, + Namespaces: []string{"error-ns1", "error-ns2", "success-ns"}, + Logger: log.New(&logOutput, "", 0), + K8sCoreClientSet: client, + PodExecutor: func(namespace, podName, containerName string, command []string, ctx context.Context) ([]byte, error) { + return []byte("mock output"), nil + }, + } + + // Test both jobs that have the same error handling pattern + jobs := NGFJobList() + + for _, jobName := range []string{"exec-nginx-gateway-version", "exec-nginx-t"} { + t.Run(jobName, func(t *testing.T) { + var targetJob Job + for _, job := range jobs { + if job.Name == jobName { + targetJob = job + break + } + } + + // Clear log output for this subtest + logOutput.Reset() + + ctx := context.Background() + ch := make(chan JobResult, 1) + targetJob.Execute(dc, ctx, ch) + + result := <-ch + logContent := logOutput.String() + + // Verify errors are logged for the failing namespaces + assert.Contains(t, logContent, "Could not retrieve pod list for namespace error-ns1: network timeout") + assert.Contains(t, logContent, "Could not retrieve pod list for namespace error-ns2: permission denied") + + // success-ns should not have error logs + assert.NotContains(t, logContent, "Could not retrieve pod list for namespace success-ns") + + // No files should be created since no nginx-gateway pods exist in success-ns + assert.Empty(t, result.Files) + assert.Nil(t, result.Error) + }) + } +} + +func TestNGFJobList_PodListError_LogFormat(t *testing.T) { + tmpDir := t.TempDir() + var logOutput bytes.Buffer + + client := fake.NewClientset() + client.PrependReactor("list", "pods", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, fmt.Errorf("specific error message for testing") + }) + + dc := &data_collector.DataCollector{ + BaseDir: tmpDir, + Namespaces: []string{"test-ns"}, + Logger: log.New(&logOutput, "", 0), + K8sCoreClientSet: client, + PodExecutor: func(namespace, podName, containerName string, command []string, ctx context.Context) ([]byte, error) { + return []byte("output"), nil + }, + } + + jobs := NGFJobList() + execJob := jobs[0] // exec-nginx-gateway-version + + ctx := context.Background() + ch := make(chan JobResult, 1) + execJob.Execute(dc, ctx, ch) + + <-ch + logContent := logOutput.String() + + // Verify the exact log format + expectedLogMessage := "\tCould not retrieve pod list for namespace test-ns: specific error message for testing" + assert.Contains(t, logContent, expectedLogMessage) + + // Verify it starts with tab character for indentation + assert.Contains(t, logContent, "\tCould not retrieve pod list") + + // Verify it contains the namespace and error + assert.Contains(t, logContent, "test-ns") + assert.Contains(t, logContent, "specific error message for testing") +} + +func TestNGFJobList_PodListFailure(t *testing.T) { + tests := []struct { + name string + jobName string + jobIndex int + }{ + { + name: "exec-nginx-gateway-version pod list failure", + jobName: "exec-nginx-gateway-version", + jobIndex: 0, + }, + { + name: "exec-nginx-t pod list failure", + jobName: "exec-nginx-t", + jobIndex: 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tmpDir := t.TempDir() + var logOutput bytes.Buffer + + // Create a fake client that will return an error for pod listing + client := fake.NewSimpleClientset() + client.PrependReactor("list", "pods", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, fmt.Errorf("failed to retrieve pod list") + }) + + dc := &data_collector.DataCollector{ + BaseDir: tmpDir, + Namespaces: []string{"default", "nginx-gateway"}, + Logger: log.New(&logOutput, "", 0), + K8sCoreClientSet: client, + PodExecutor: func(namespace, podName, containerName string, command []string, ctx context.Context) ([]byte, error) { + return []byte("mock output"), nil + }, + } + + // Get the specific job + jobs := NGFJobList() + job := jobs[tt.jobIndex] + assert.Equal(t, tt.jobName, job.Name, "Job name should match expected") + + // Execute the job + ctx := context.Background() + ch := make(chan JobResult, 1) + job.Execute(dc, ctx, ch) + + result := <-ch + logContent := logOutput.String() + + // Verify the error was logged for each namespace + assert.Contains(t, logContent, "Could not retrieve pod list for namespace default: failed to retrieve pod list") + assert.Contains(t, logContent, "Could not retrieve pod list for namespace nginx-gateway: failed to retrieve pod list") + + // Verify no files were created since pod listing failed + assert.Empty(t, result.Files, "No files should be created when pod list fails") + assert.Nil(t, result.Error, "Job should not fail, just log the error") + }) + } +} + +func TestNGFJobList_PodListFailure_MultipleNamespaces(t *testing.T) { + tmpDir := t.TempDir() + var logOutput bytes.Buffer + + // Create a fake client that returns different errors for different namespaces + client := fake.NewSimpleClientset() + client.PrependReactor("list", "pods", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + listAction := action.(k8stesting.ListAction) + namespace := listAction.GetNamespace() + + switch namespace { + case "error-ns1": + return true, nil, fmt.Errorf("network timeout") + case "error-ns2": + return true, nil, fmt.Errorf("permission denied") + case "error-ns3": + return true, nil, fmt.Errorf("resource not found") + default: + // Let other namespaces succeed (but with no nginx-gateway pods) + return false, nil, nil + } + }) + + dc := &data_collector.DataCollector{ + BaseDir: tmpDir, + Namespaces: []string{"error-ns1", "error-ns2", "error-ns3", "success-ns"}, + Logger: log.New(&logOutput, "", 0), + K8sCoreClientSet: client, + PodExecutor: func(namespace, podName, containerName string, command []string, ctx context.Context) ([]byte, error) { + return []byte("mock output"), nil + }, + } + + // Test both jobs that have the same error handling pattern + jobs := NGFJobList() + + for _, jobName := range []string{"exec-nginx-gateway-version", "exec-nginx-t"} { + t.Run(jobName, func(t *testing.T) { + var targetJob Job + for _, job := range jobs { + if job.Name == jobName { + targetJob = job + break + } + } + + // Clear log output for this subtest + logOutput.Reset() + + ctx := context.Background() + ch := make(chan JobResult, 1) + targetJob.Execute(dc, ctx, ch) + + result := <-ch + logContent := logOutput.String() + + // Verify errors are logged for the failing namespaces + assert.Contains(t, logContent, "Could not retrieve pod list for namespace error-ns1: network timeout") + assert.Contains(t, logContent, "Could not retrieve pod list for namespace error-ns2: permission denied") + assert.Contains(t, logContent, "Could not retrieve pod list for namespace error-ns3: resource not found") + + // success-ns should not have error logs + assert.NotContains(t, logContent, "Could not retrieve pod list for namespace success-ns") + + // No files should be created since no nginx-gateway pods exist in success-ns + assert.Empty(t, result.Files) + assert.Nil(t, result.Error) + }) + } +} + +func TestNGFJobList_CommandExecutionFailure(t *testing.T) { + tests := []struct { + name string + jobName string + jobIndex int + expectedCommand []string + expectedContainer string + expectedFileExt string + }{ + { + name: "exec-nginx-gateway-version command failure", + jobName: "exec-nginx-gateway-version", + jobIndex: 0, + expectedCommand: []string{"/usr/bin/gateway", "--help"}, + expectedContainer: "nginx-gateway", + expectedFileExt: "__nginx-gateway-version.txt", + }, + { + name: "exec-nginx-t command failure", + jobName: "exec-nginx-t", + jobIndex: 1, + expectedCommand: []string{"/usr/sbin/nginx", "-T"}, + expectedContainer: "nginx", + expectedFileExt: "__nginx-t.txt", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tmpDir := t.TempDir() + var logOutput bytes.Buffer + + // Create nginx-gateway pod for testing + nginxGatewayPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-gateway-deployment-123", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "nginx-gateway", Image: "nginx-gateway:latest"}, + {Name: "nginx", Image: "nginx:latest"}, + }, + }, + } + + client := fake.NewSimpleClientset(nginxGatewayPod) + + dc := &data_collector.DataCollector{ + BaseDir: tmpDir, + Namespaces: []string{"default"}, + Logger: log.New(&logOutput, "", 0), + K8sCoreClientSet: client, + PodExecutor: func(namespace, podName, containerName string, command []string, ctx context.Context) ([]byte, error) { + // Verify correct parameters are passed + assert.Equal(t, "default", namespace) + assert.Equal(t, "nginx-gateway-deployment-123", podName) + assert.Equal(t, tt.expectedContainer, containerName) + assert.Equal(t, tt.expectedCommand, command) + + // Return error to test failure path + return nil, fmt.Errorf("command execution failed: %v", command) + }, + } + + // Execute the specific job + jobs := NGFJobList() + job := jobs[tt.jobIndex] + assert.Equal(t, tt.jobName, job.Name) + + ctx := context.Background() + ch := make(chan JobResult, 1) + job.Execute(dc, ctx, ch) + + result := <-ch + logContent := logOutput.String() + + // Verify the error was set + assert.NotNil(t, result.Error, "Job should have error when command execution fails") + assert.Contains(t, result.Error.Error(), "command execution failed") + + // Verify the error was logged + expectedLogMessage := fmt.Sprintf("Command execution %s failed for pod nginx-gateway-deployment-123 in namespace default", tt.expectedCommand) + assert.Contains(t, logContent, expectedLogMessage) + assert.Contains(t, logContent, "command execution failed") + + // Verify no files were created when command execution fails + assert.Empty(t, result.Files, "No files should be created when command execution fails") + }) + } +} + +func TestNGFJobList_CRDObjects_Success(t *testing.T) { + tmpDir := t.TempDir() + var logOutput bytes.Buffer + + dc := &data_collector.DataCollector{ + BaseDir: tmpDir, + Namespaces: []string{"default", "nginx-gateway"}, + Logger: log.New(&logOutput, "", 0), + QueryCRD: func(crd crds.Crd, namespace string, ctx context.Context) ([]byte, error) { + // Mock successful CRD query + mockData := map[string]interface{}{ + "apiVersion": crd.Group + "/" + crd.Version, + "kind": crd.Resource, + "items": []map[string]interface{}{ + { + "metadata": map[string]interface{}{ + "name": "test-" + crd.Resource, + "namespace": namespace, + }, + "spec": map[string]interface{}{ + "host": "example.com", + }, + }, + }, + } + return json.Marshal(mockData) + }, + } + + // Get the crd-objects job + jobs := NGFJobList() + crdJob := jobs[2] // crd-objects is at index 2 + assert.Equal(t, "crd-objects", crdJob.Name) + + ctx := context.Background() + ch := make(chan JobResult, 1) + crdJob.Execute(dc, ctx, ch) + + result := <-ch + + // Verify no errors + assert.Nil(t, result.Error, "Should not have errors for successful CRD collection") + + // Get the expected CRDs from GetNGFCRDList() + expectedCRDs := crds.GetNGFCRDList() + expectedFileCount := len(expectedCRDs) * len(dc.Namespaces) + + // Verify expected number of files created + assert.Len(t, result.Files, expectedFileCount, + "Should create files for each CRD in each namespace") + + // Verify file paths and content for each CRD and namespace + for _, namespace := range dc.Namespaces { + for _, crd := range expectedCRDs { + expectedPath := filepath.Join(tmpDir, "crds", namespace, crd.Resource+".json") + content, exists := result.Files[expectedPath] + + assert.True(t, exists, "File should exist for CRD %s in namespace %s", crd.Resource, namespace) + assert.NotEmpty(t, content, "File content should not be empty") + + // Verify JSON structure + var jsonData map[string]interface{} + err := json.Unmarshal(content, &jsonData) + assert.NoError(t, err, "Content should be valid JSON") + assert.Contains(t, jsonData, "items", "Should contain items field") + } + } + + // Verify no error messages in logs + logContent := logOutput.String() + assert.NotContains(t, logContent, "could not be collected", "Should not have CRD collection errors") +} + +func TestNGFJobList_CRDObjects_QueryFailure(t *testing.T) { + tmpDir := t.TempDir() + var logOutput bytes.Buffer + + dc := &data_collector.DataCollector{ + BaseDir: tmpDir, + Namespaces: []string{"default", "test-ns"}, + Logger: log.New(&logOutput, "", 0), + QueryCRD: func(crd crds.Crd, namespace string, ctx context.Context) ([]byte, error) { + // Return different errors based on CRD and namespace + if namespace == "test-ns" && crd.Resource == "nginxgateways" { + return nil, fmt.Errorf("permission denied") + } + if namespace == "default" && crd.Resource == "clientsettingspolicies" { + return nil, fmt.Errorf("resource not found") + } + + // Success for other combinations + mockData := map[string]interface{}{ + "apiVersion": crd.Group + "/" + crd.Version, + "kind": crd.Resource, + "items": []interface{}{}, + } + return json.Marshal(mockData) + }, + } + + jobs := NGFJobList() + crdJob := jobs[2] + + ctx := context.Background() + ch := make(chan JobResult, 1) + crdJob.Execute(dc, ctx, ch) + + result := <-ch + logContent := logOutput.String() + + // Verify error logging for specific failures + assert.Contains(t, logContent, "CRD nginxgateways.gateway.nginx.org/v1alpha1 could not be collected in namespace test-ns: permission denied") + assert.Contains(t, logContent, "CRD clientsettingspolicies.gateway.nginx.org/v1alpha1 could not be collected in namespace default: resource not found") + + // Verify successful CRDs still created files (only failures are logged) + expectedCRDs := crds.GetNGFCRDList() + successfulFiles := 0 + + for _, namespace := range dc.Namespaces { + for _, crd := range expectedCRDs { + // Skip the ones we know should fail + if (namespace == "test-ns" && crd.Resource == "gateways") || + (namespace == "default" && crd.Resource == "httproutes") { + continue + } + + expectedPath := filepath.Join(tmpDir, "crds", namespace, crd.Resource+".json") + _, exists := result.Files[expectedPath] + if exists { + successfulFiles++ + } + } + } + + assert.Greater(t, successfulFiles, 0, "Should have some successful CRD files") + assert.Nil(t, result.Error, "Job should not fail even if some CRDs fail to collect") +} diff --git a/pkg/jobs/ngx_job_list_test.go b/pkg/jobs/ngx_job_list_test.go new file mode 100644 index 0000000..20a36ec --- /dev/null +++ b/pkg/jobs/ngx_job_list_test.go @@ -0,0 +1,709 @@ +package jobs + +import ( + "bytes" + "context" + "fmt" + "path/filepath" + "strings" + "testing" + "time" + + "log" + + "github.com/nginxinc/nginx-k8s-supportpkg/pkg/data_collector" + "github.com/nginxinc/nginx-k8s-supportpkg/pkg/mock" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + k8stesting "k8s.io/client-go/testing" +) + +func TestNGXJobList_ExecNginxT(t *testing.T) { + dc := mock.SetupMockDataCollector(t) + + // Create a fake pod named "nginx-123" in the "default" namespace + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-123", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "nginx"}, + }, + }, + } + dc.K8sCoreClientSet = fake.NewClientset(pod) + + // Mock PodExecutor + dc.PodExecutor = func(namespace, pod, container string, command []string, ctx context.Context) ([]byte, error) { + return []byte("nginx -T output"), nil + } + + jobList := NGXJobList() + if len(jobList) != 1 { + t.Fatalf("expected 1 job, got %d", len(jobList)) + } + job := jobList[0] + ch := make(chan JobResult, 1) + go job.Execute(dc, context.Background(), ch) + select { + case result := <-ch: + if result.Error != nil { + t.Fatalf("unexpected error: %v", result.Error) + } + found := false + for file, content := range result.Files { + if !strings.HasSuffix(file, "__nginx-t.txt") { + t.Errorf("unexpected file name: %s", file) + } + if string(content) != "nginx -T output" { + t.Errorf("unexpected file content: %s", string(content)) + } + if !strings.HasPrefix(filepath.ToSlash(file), filepath.ToSlash(dc.BaseDir)) { + t.Errorf("file path %s does not start with tmpDir %s", file, dc.BaseDir) + } + found = true + } + if !found { + t.Errorf("no output file created by job") + } + case <-time.After(time.Second): + t.Fatal("job execution timed out") + } +} + +func TestNGXJobList_ExecNginxT_PodListError(t *testing.T) { + tmpDir := t.TempDir() + var logOutput bytes.Buffer + + // Create a fake client that will return an error for pod listing + client := fake.NewClientset() + client.PrependReactor("list", "pods", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, fmt.Errorf("failed to retrieve pod list") + }) + + dc := &data_collector.DataCollector{ + BaseDir: tmpDir, + Namespaces: []string{"default", "nginx-ingress"}, + Logger: log.New(&logOutput, "", 0), + K8sCoreClientSet: client, + PodExecutor: func(namespace, podName, containerName string, command []string, ctx context.Context) ([]byte, error) { + return []byte("mock nginx config"), nil + }, + } + + // Get the exec-nginx-t job + jobs := NGXJobList() + execJob := jobs[0] // exec-nginx-t is the only job + + // Execute the job + ctx := context.Background() + ch := make(chan JobResult, 1) + execJob.Execute(dc, ctx, ch) + + result := <-ch + logContent := logOutput.String() + + // Verify the error was logged for each namespace + assert.Contains(t, logContent, "Could not retrieve pod list for namespace default: failed to retrieve pod list") + assert.Contains(t, logContent, "Could not retrieve pod list for namespace nginx-ingress: failed to retrieve pod list") + + // Verify no files were created since pod listing failed + assert.Empty(t, result.Files, "No files should be created when pod list fails") + assert.Nil(t, result.Error, "Job should not fail, just log the error") +} + +func TestNGXJobList_MultipleNamespaces_PodListErrors(t *testing.T) { + tmpDir := t.TempDir() + var logOutput bytes.Buffer + + // Create a fake client that returns different errors for different namespaces + client := fake.NewClientset() + client.PrependReactor("list", "pods", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + listAction := action.(k8stesting.ListAction) + namespace := listAction.GetNamespace() + + switch namespace { + case "error-ns1": + return true, nil, fmt.Errorf("network timeout") + case "error-ns2": + return true, nil, fmt.Errorf("permission denied") + case "error-ns3": + return true, nil, fmt.Errorf("resource not found") + default: + // Let other namespaces succeed (but with no nginx pods) + return false, nil, nil + } + }) + + dc := &data_collector.DataCollector{ + BaseDir: tmpDir, + Namespaces: []string{"error-ns1", "error-ns2", "error-ns3", "success-ns"}, + Logger: log.New(&logOutput, "", 0), + K8sCoreClientSet: client, + PodExecutor: func(namespace, podName, containerName string, command []string, ctx context.Context) ([]byte, error) { + return []byte("nginx config output"), nil + }, + } + + jobs := NGXJobList() + execJob := jobs[0] + + ctx := context.Background() + ch := make(chan JobResult, 1) + execJob.Execute(dc, ctx, ch) + + result := <-ch + logContent := logOutput.String() + + // Verify errors are logged for the failing namespaces + assert.Contains(t, logContent, "Could not retrieve pod list for namespace error-ns1: network timeout") + assert.Contains(t, logContent, "Could not retrieve pod list for namespace error-ns2: permission denied") + assert.Contains(t, logContent, "Could not retrieve pod list for namespace error-ns3: resource not found") + + // success-ns should not have error logs + assert.NotContains(t, logContent, "Could not retrieve pod list for namespace success-ns") + + // No files should be created since no nginx pods exist in success-ns + assert.Empty(t, result.Files) + assert.Nil(t, result.Error) +} + +func TestNGXJobList_PodListError_LogFormat(t *testing.T) { + tmpDir := t.TempDir() + var logOutput bytes.Buffer + + client := fake.NewClientset() + client.PrependReactor("list", "pods", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, fmt.Errorf("specific error message for testing") + }) + + dc := &data_collector.DataCollector{ + BaseDir: tmpDir, + Namespaces: []string{"test-ns"}, + Logger: log.New(&logOutput, "", 0), + K8sCoreClientSet: client, + PodExecutor: func(namespace, podName, containerName string, command []string, ctx context.Context) ([]byte, error) { + return []byte("output"), nil + }, + } + + jobs := NGXJobList() + execJob := jobs[0] + + ctx := context.Background() + ch := make(chan JobResult, 1) + execJob.Execute(dc, ctx, ch) + + <-ch + logContent := logOutput.String() + + // Verify the exact log format + expectedLogMessage := "\tCould not retrieve pod list for namespace test-ns: specific error message for testing" + assert.Contains(t, logContent, expectedLogMessage) + + // Verify it starts with tab character for indentation + assert.Contains(t, logContent, "\tCould not retrieve pod list") + + // Verify it contains the namespace and error + assert.Contains(t, logContent, "test-ns") + assert.Contains(t, logContent, "specific error message for testing") +} + +func TestNGXJobList_PodListError_vs_ExecutionError(t *testing.T) { + tests := []struct { + name string + podListError error + executionError error + expectPodLog bool + expectExecLog bool + expectedFiles int + }{ + { + name: "pod list fails", + podListError: fmt.Errorf("pod list API error"), + executionError: nil, + expectPodLog: true, + expectExecLog: false, + expectedFiles: 0, + }, + { + name: "pod list succeeds but execution fails", + podListError: nil, + executionError: fmt.Errorf("command execution failed"), + expectPodLog: false, + expectExecLog: true, + expectedFiles: 0, + }, + { + name: "both succeed", + podListError: nil, + executionError: nil, + expectPodLog: false, + expectExecLog: false, + expectedFiles: 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tmpDir := t.TempDir() + var logOutput bytes.Buffer + + // Create nginx pod for testing + nginxPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-deployment-123", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "nginx", Image: "nginx:latest"}, + }, + }, + } + + client := fake.NewClientset(nginxPod) + + if tt.podListError != nil { + client.PrependReactor("list", "pods", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, tt.podListError + }) + } + + dc := &data_collector.DataCollector{ + BaseDir: tmpDir, + Namespaces: []string{"default"}, + Logger: log.New(&logOutput, "", 0), + K8sCoreClientSet: client, + PodExecutor: func(namespace, podName, containerName string, command []string, ctx context.Context) ([]byte, error) { + if tt.executionError != nil { + return nil, tt.executionError + } + return []byte("nginx configuration"), nil + }, + } + + jobs := NGXJobList() + execJob := jobs[0] + + ctx := context.Background() + ch := make(chan JobResult, 1) + execJob.Execute(dc, ctx, ch) + + result := <-ch + logContent := logOutput.String() + + if tt.expectPodLog { + assert.Contains(t, logContent, "Could not retrieve pod list for namespace default") + } else { + assert.NotContains(t, logContent, "Could not retrieve pod list for namespace default") + } + + if tt.expectExecLog { + assert.Contains(t, logContent, "Command execution") + assert.Contains(t, logContent, "failed for pod") + } else { + assert.NotContains(t, logContent, "Command execution") + } + + assert.Len(t, result.Files, tt.expectedFiles) + }) + } +} + +func TestNGXJobList_ExecNginxT_CreatesExpectedFiles(t *testing.T) { + tests := []struct { + name string + pods []*corev1.Pod + namespaces []string + expectedFiles []string + podExecutor func(namespace, podName, containerName string, command []string, ctx context.Context) ([]byte, error) + }{ + { + name: "single nginx pod creates one file", + namespaces: []string{"default"}, + pods: []*corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-deployment-123", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "nginx", Image: "nginx:latest"}, + }, + }, + }, + }, + expectedFiles: []string{ + "exec/default/nginx-deployment-123__nginx-t.txt", + }, + podExecutor: func(namespace, podName, containerName string, command []string, ctx context.Context) ([]byte, error) { + return []byte("nginx configuration output"), nil + }, + }, + { + name: "multiple nginx pods create multiple files", + namespaces: []string{"default", "production"}, + pods: []*corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-web-1", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "nginx", Image: "nginx:latest"}, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-api-2", + Namespace: "production", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "nginx", Image: "nginx:latest"}, + }, + }, + }, + }, + expectedFiles: []string{ + "exec/default/nginx-web-1__nginx-t.txt", + "exec/production/nginx-api-2__nginx-t.txt", + }, + podExecutor: func(namespace, podName, containerName string, command []string, ctx context.Context) ([]byte, error) { + return []byte(fmt.Sprintf("nginx config for %s/%s", namespace, podName)), nil + }, + }, + { + name: "non-nginx pods create no files", + namespaces: []string{"default"}, + pods: []*corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "redis-deployment-456", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "redis", Image: "redis:latest"}, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "postgres-db-789", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "postgres", Image: "postgres:latest"}, + }, + }, + }, + }, + expectedFiles: []string{}, // No files expected for non-nginx pods + podExecutor: func(namespace, podName, containerName string, command []string, ctx context.Context) ([]byte, error) { + return []byte("should not be called"), nil + }, + }, + { + name: "mixed pods only create files for nginx", + namespaces: []string{"default"}, + pods: []*corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-proxy-1", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "nginx", Image: "nginx:latest"}, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "redis-cache-2", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "redis", Image: "redis:latest"}, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-ingress-3", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "nginx", Image: "nginx:latest"}, + }, + }, + }, + }, + expectedFiles: []string{ + "exec/default/nginx-proxy-1__nginx-t.txt", + "exec/default/nginx-ingress-3__nginx-t.txt", + }, + podExecutor: func(namespace, podName, containerName string, command []string, ctx context.Context) ([]byte, error) { + return []byte(fmt.Sprintf("nginx config for %s", podName)), nil + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tmpDir := t.TempDir() + var logOutput bytes.Buffer + + // Create fake client with test pods + var runtimeObjects []runtime.Object + for _, pod := range tt.pods { + runtimeObjects = append(runtimeObjects, pod) + } + client := fake.NewSimpleClientset(runtimeObjects...) + + dc := &data_collector.DataCollector{ + BaseDir: tmpDir, + Namespaces: tt.namespaces, + Logger: log.New(&logOutput, "", 0), + K8sCoreClientSet: client, + PodExecutor: tt.podExecutor, + } + + // Execute the job + jobs := NGXJobList() + execJob := jobs[0] // exec-nginx-t + + ctx := context.Background() + ch := make(chan JobResult, 1) + execJob.Execute(dc, ctx, ch) + + result := <-ch + + // Verify the number of files created + assert.Len(t, result.Files, len(tt.expectedFiles), "Number of files should match expected") + + // Verify each expected file is created + for _, expectedFile := range tt.expectedFiles { + expectedPath := filepath.Join(tmpDir, expectedFile) + content, exists := result.Files[expectedPath] + assert.True(t, exists, "Expected file should exist: %s", expectedFile) + assert.NotEmpty(t, content, "File content should not be empty for: %s", expectedFile) + + // Verify content contains expected data + contentStr := string(content) + if strings.Contains(expectedFile, "nginx-web-1") { + assert.Contains(t, contentStr, "default/nginx-web-1") + } else if strings.Contains(expectedFile, "nginx-api-2") { + assert.Contains(t, contentStr, "production/nginx-api-2") + } + } + + // Verify no unexpected files are created + for filePath := range result.Files { + relativePath, err := filepath.Rel(tmpDir, filePath) + assert.NoError(t, err) + assert.Contains(t, tt.expectedFiles, relativePath, "Unexpected file created: %s", relativePath) + } + + // Verify no errors if execution was successful + if len(tt.expectedFiles) > 0 { + assert.Nil(t, result.Error, "Should not have errors for successful execution") + } + }) + } +} + +func TestNGXJobList_ExecNginxT_FileContents(t *testing.T) { + tmpDir := t.TempDir() + var logOutput bytes.Buffer + + nginxPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-test-pod", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "nginx", Image: "nginx:latest"}, + }, + }, + } + + client := fake.NewSimpleClientset(nginxPod) + + expectedConfig := `server { + listen 80; + server_name example.com; + location / { + proxy_pass http://backend; + } +}` + + dc := &data_collector.DataCollector{ + BaseDir: tmpDir, + Namespaces: []string{"default"}, + Logger: log.New(&logOutput, "", 0), + K8sCoreClientSet: client, + PodExecutor: func(namespace, podName, containerName string, command []string, ctx context.Context) ([]byte, error) { + // Verify the correct command is passed + assert.Equal(t, []string{"/usr/sbin/nginx", "-T"}, command) + assert.Equal(t, "default", namespace) + assert.Equal(t, "nginx-test-pod", podName) + assert.Equal(t, "nginx", containerName) + + return []byte(expectedConfig), nil + }, + } + + jobs := NGXJobList() + execJob := jobs[0] + + ctx := context.Background() + ch := make(chan JobResult, 1) + execJob.Execute(dc, ctx, ch) + + result := <-ch + + // Verify file is created with correct content + assert.Len(t, result.Files, 1) + + expectedPath := filepath.Join(tmpDir, "exec/default/nginx-test-pod__nginx-t.txt") + content, exists := result.Files[expectedPath] + assert.True(t, exists, "Expected file should exist") + assert.Equal(t, expectedConfig, string(content), "File content should match expected nginx config") +} + +func TestNGXJobList_ExecNginxT_FilePaths(t *testing.T) { + tests := []struct { + name string + podName string + namespace string + expectedPath string + }{ + { + name: "standard pod name", + podName: "nginx-deployment-abc123", + namespace: "default", + expectedPath: "exec/default/nginx-deployment-abc123__nginx-t.txt", + }, + { + name: "pod with dashes", + podName: "nginx-ingress-controller-xyz", + namespace: "ingress-nginx", + expectedPath: "exec/ingress-nginx/nginx-ingress-controller-xyz__nginx-t.txt", + }, + { + name: "short pod name", + podName: "nginx-1", + namespace: "prod", + expectedPath: "exec/prod/nginx-1__nginx-t.txt", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tmpDir := t.TempDir() + var logOutput bytes.Buffer + + nginxPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: tt.podName, + Namespace: tt.namespace, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "nginx", Image: "nginx:latest"}, + }, + }, + } + + client := fake.NewSimpleClientset(nginxPod) + + dc := &data_collector.DataCollector{ + BaseDir: tmpDir, + Namespaces: []string{tt.namespace}, + Logger: log.New(&logOutput, "", 0), + K8sCoreClientSet: client, + PodExecutor: func(namespace, podName, containerName string, command []string, ctx context.Context) ([]byte, error) { + return []byte("nginx config"), nil + }, + } + + jobs := NGXJobList() + execJob := jobs[0] + + ctx := context.Background() + ch := make(chan JobResult, 1) + execJob.Execute(dc, ctx, ch) + + result := <-ch + + // Verify the file path is constructed correctly + expectedFullPath := filepath.Join(tmpDir, tt.expectedPath) + content, exists := result.Files[expectedFullPath] + assert.True(t, exists, "File should exist at expected path: %s", tt.expectedPath) + assert.Equal(t, "nginx config", string(content)) + }) + } +} + +func TestNGXJobList_ExecNginxT_NoFilesOnError(t *testing.T) { + tmpDir := t.TempDir() + var logOutput bytes.Buffer + + nginxPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-pod", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "nginx", Image: "nginx:latest"}, + }, + }, + } + + client := fake.NewSimpleClientset(nginxPod) + + dc := &data_collector.DataCollector{ + BaseDir: tmpDir, + Namespaces: []string{"default"}, + Logger: log.New(&logOutput, "", 0), + K8sCoreClientSet: client, + PodExecutor: func(namespace, podName, containerName string, command []string, ctx context.Context) ([]byte, error) { + return nil, fmt.Errorf("command execution failed") + }, + } + + jobs := NGXJobList() + execJob := jobs[0] + + ctx := context.Background() + ch := make(chan JobResult, 1) + execJob.Execute(dc, ctx, ch) + + result := <-ch + + // Verify no files are created when command execution fails + assert.Empty(t, result.Files, "No files should be created when command execution fails") + assert.NotNil(t, result.Error, "Error should be set when command execution fails") + + // Verify error is logged + logContent := logOutput.String() + assert.Contains(t, logContent, "Command execution") + assert.Contains(t, logContent, "failed for pod nginx-pod") + assert.Contains(t, logContent, "command execution failed") +} diff --git a/pkg/jobs/nic_job_list.go b/pkg/jobs/nic_job_list.go index f28556e..4fd5687 100644 --- a/pkg/jobs/nic_job_list.go +++ b/pkg/jobs/nic_job_list.go @@ -232,7 +232,6 @@ func NICJobList() []Job { } else { jobResult.Files[filepath.Join(dc.BaseDir, fileName)] = jsonBytes } - ch <- jobResult } } } @@ -240,6 +239,7 @@ func NICJobList() []Job { } } } + ch <- jobResult }, }, } diff --git a/pkg/jobs/nic_job_list_test.go b/pkg/jobs/nic_job_list_test.go new file mode 100644 index 0000000..7922946 --- /dev/null +++ b/pkg/jobs/nic_job_list_test.go @@ -0,0 +1,558 @@ +package jobs + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "log" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/nginxinc/nginx-k8s-supportpkg/pkg/crds" + "github.com/nginxinc/nginx-k8s-supportpkg/pkg/data_collector" + "github.com/nginxinc/nginx-k8s-supportpkg/pkg/mock" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + k8stesting "k8s.io/client-go/testing" +) + +// mockPodExecutor simulates PodExecutor for testing +func mockPodExecutor(namespace, pod, container string, command []string, ctx context.Context) ([]byte, error) { + return []byte("mock-output"), nil +} + +// mockQueryCRD simulates QueryCRD for testing +func mockQueryCRD(crd crds.Crd, namespace string, ctx context.Context) ([]byte, error) { + return json.Marshal(map[string]string{"kind": crd.Resource}) +} + +func TestNICJobList_ExecJobs(t *testing.T) { + dc := mock.SetupMockDataCollector(t) + dc.Namespaces = []string{"test-ns"} + + // Mock PodExecutor and QueryCRD + dc.PodExecutor = mockPodExecutor + dc.QueryCRD = mockQueryCRD + + // Use a real or fake clientset (kubernetes.Interface) + dc.K8sCoreClientSet = fake.NewClientset(&corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "ingress-pod", Namespace: "test-ns"}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "nginx-ingress", Image: "nginx-ingress:latest"}, + }, + }, + }) + + jobList := NICJobList() + for _, job := range jobList { + ch := make(chan JobResult, 1) + go job.Execute(dc, context.Background(), ch) + select { + case result := <-ch: + if result.Error != nil { + t.Errorf("Job %s returned error: %v", job.Name, result.Error) + } + for file, content := range result.Files { + if !strings.HasPrefix(filepath.ToSlash(file), filepath.ToSlash(dc.BaseDir)) { + t.Errorf("File path %s does not start with tmpDir", file) + } + if len(content) == 0 { + t.Errorf("File %s has empty content", file) + } + } + case <-time.After(time.Second): + t.Errorf("Job %s timed out", job.Name) + } + } +} + +func TestNICJobList_CRDObjects(t *testing.T) { + dc := mock.SetupMockDataCollector(t) + dc.Namespaces = []string{"test-ns"} + dc.QueryCRD = mockQueryCRD + + jobList := NICJobList() + var found bool + for _, job := range jobList { + if job.Name == "crd-objects" { + found = true + ch := make(chan JobResult, 1) + go job.Execute(dc, context.Background(), ch) + select { + case result := <-ch: + if result.Error != nil { + t.Errorf("CRD job returned error: %v", result.Error) + } + for file, content := range result.Files { + if !strings.HasPrefix(filepath.ToSlash(file), filepath.ToSlash(dc.BaseDir)) { + t.Errorf("File path %s does not start with tmpDir", file) + } + var out map[string]interface{} + if err := json.Unmarshal(content, &out); err != nil { + t.Errorf("Invalid JSON in file %s: %v", file, err) + } + } + case <-time.After(time.Second): + t.Errorf("CRD job timed out") + } + } + } + if !found { + t.Errorf("crd-objects job not found in NICJobList") + } +} + +func TestNICJobList_CRDObjects_QueryFailure(t *testing.T) { + tmpDir := t.TempDir() + var logOutput bytes.Buffer + + dc := &data_collector.DataCollector{ + BaseDir: tmpDir, + Namespaces: []string{"default", "nginx-ingress"}, + Logger: log.New(&logOutput, "", 0), + QueryCRD: func(crd crds.Crd, namespace string, ctx context.Context) ([]byte, error) { + // Return different errors based on CRD and namespace + if namespace == "nginx-ingress" && crd.Resource == "virtualservers" { + return nil, fmt.Errorf("permission denied") + } + if namespace == "default" && crd.Resource == "policies" { + return nil, fmt.Errorf("resource not found") + } + + // Success for other combinations + mockData := map[string]interface{}{ + "apiVersion": crd.Group + "/" + crd.Version, + "kind": crd.Resource, + "items": []interface{}{}, + } + return json.Marshal(mockData) + }, + } + + // Get the crd-objects job (index 4) + jobs := NICJobList() + crdJob := jobs[4] + assert.Equal(t, "crd-objects", crdJob.Name) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + ch := make(chan JobResult, 1) + go crdJob.Execute(dc, ctx, ch) + + select { + case result := <-ch: + logContent := logOutput.String() + + // Verify error logging for specific failures + assert.Contains(t, logContent, "CRD virtualservers.k8s.nginx.org/v1 could not be collected in namespace nginx-ingress: permission denied") + assert.Contains(t, logContent, "CRD policies.k8s.nginx.org/v1 could not be collected in namespace default: resource not found") + + // Verify successful CRDs still created files (only failures are logged) + expectedCRDs := crds.GetNICCRDList() + successfulFiles := 0 + + for _, namespace := range dc.Namespaces { + for _, crd := range expectedCRDs { + // Skip the ones we know should fail + if (namespace == "nginx-ingress" && crd.Resource == "virtualservers") || + (namespace == "default" && crd.Resource == "policies") { + continue + } + + expectedPath := filepath.Join(tmpDir, "crds", namespace, crd.Resource+".json") + _, exists := result.Files[expectedPath] + if exists { + successfulFiles++ + } + } + } + + assert.Greater(t, successfulFiles, 0, "Should have some successful CRD files") + assert.Nil(t, result.Error, "Job should not fail even if some CRDs fail to collect") + + case <-ctx.Done(): + t.Fatal("CRD job timed out") + } +} + +func TestNICJobList_PodListFailure_AllJobs(t *testing.T) { + tests := []struct { + name string + jobName string + jobIndex int + }{ + { + name: "exec-nginx-ingress-version pod list failure", + jobName: "exec-nginx-ingress-version", + jobIndex: 0, + }, + { + name: "exec-nginx-t pod list failure", + jobName: "exec-nginx-t", + jobIndex: 1, + }, + { + name: "exec-agent-conf pod list failure", + jobName: "exec-agent-conf", + jobIndex: 2, + }, + { + name: "exec-agent-version pod list failure", + jobName: "exec-agent-version", + jobIndex: 3, + }, + { + name: "collect-product-platform-info pod list failure", + jobName: "collect-product-platform-info", + jobIndex: 5, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fmt.Printf("Running subtest: %s\n", tt.name) + tmpDir := t.TempDir() + var logOutput bytes.Buffer + + // Create a fake client that will return an error for pod listing + client := fake.NewSimpleClientset() + client.PrependReactor("list", "pods", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, fmt.Errorf("failed to retrieve pod list") + }) + + dc := &data_collector.DataCollector{ + BaseDir: tmpDir, + Namespaces: []string{"default", "nginx-ingress"}, + Logger: log.New(&logOutput, "", 0), + K8sCoreClientSet: client, + PodExecutor: func(namespace, podName, containerName string, command []string, ctx context.Context) ([]byte, error) { + return []byte("mock output"), nil + }, + } + + // Get the specific job + jobs := NICJobList() + job := jobs[tt.jobIndex] + assert.Equal(t, tt.jobName, job.Name, "Job name should match expected") + + // Execute the job + ctx := context.Background() + ch := make(chan JobResult, 1) + job.Execute(dc, ctx, ch) + + result := <-ch + logContent := logOutput.String() + + // Verify the error was logged for each namespace + assert.Contains(t, logContent, "Could not retrieve pod list for namespace default: failed to retrieve pod list") + assert.Contains(t, logContent, "Could not retrieve pod list for namespace nginx-ingress: failed to retrieve pod list") + + // Verify no files were created since pod listing failed + assert.Empty(t, result.Files, "No files should be created when pod list fails") + assert.Nil(t, result.Error, "Job should not fail, just log the error") + }) + } +} + +func TestNICJobList_PodListFailure_vs_CommandExecutionFailure(t *testing.T) { + tests := []struct { + name string + podListError error + executionError error + expectPodLog bool + expectExecLog bool + expectedFiles int + }{ + { + name: "pod list fails", + podListError: fmt.Errorf("pod list API error"), + executionError: nil, + expectPodLog: true, + expectExecLog: false, + expectedFiles: 0, + }, + { + name: "pod list succeeds but execution fails", + podListError: nil, + executionError: fmt.Errorf("command execution failed"), + expectPodLog: false, + expectExecLog: true, + expectedFiles: 0, + }, + { + name: "both succeed", + podListError: nil, + executionError: nil, + expectPodLog: false, + expectExecLog: false, + expectedFiles: 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tmpDir := t.TempDir() + var logOutput bytes.Buffer + + // Create nginx ingress pod for testing + nginxIngressPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-ingress-controller-123", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "nginx-ingress", Image: "nginx-ingress:latest"}, + }, + }, + } + + client := fake.NewSimpleClientset(nginxIngressPod) + + if tt.podListError != nil { + client.PrependReactor("list", "pods", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, tt.podListError + }) + } + + dc := &data_collector.DataCollector{ + BaseDir: tmpDir, + Namespaces: []string{"default"}, + Logger: log.New(&logOutput, "", 0), + K8sCoreClientSet: client, + PodExecutor: func(namespace, podName, containerName string, command []string, ctx context.Context) ([]byte, error) { + if tt.executionError != nil { + return nil, tt.executionError + } + return []byte("nginx ingress version output"), nil + }, + } + + jobs := NICJobList() + execJob := jobs[0] // exec-nginx-ingress-version + + ctx := context.Background() + ch := make(chan JobResult, 1) + execJob.Execute(dc, ctx, ch) + + result := <-ch + logContent := logOutput.String() + + if tt.expectPodLog { + assert.Contains(t, logContent, "Could not retrieve pod list for namespace default") + } else { + assert.NotContains(t, logContent, "Could not retrieve pod list for namespace default") + } + + if tt.expectExecLog { + assert.Contains(t, logContent, "Command execution") + assert.Contains(t, logContent, "failed for pod") + } else { + assert.NotContains(t, logContent, "Command execution") + } + + assert.Len(t, result.Files, tt.expectedFiles) + + // Verify error state + if tt.executionError != nil { + assert.NotNil(t, result.Error, "Should have error when execution fails") + } else { + assert.Nil(t, result.Error, "Should not have error when execution succeeds") + } + }) + } +} + +func TestNICJobList_CommandExecutionFailure(t *testing.T) { + tests := []struct { + name string + jobName string + jobIndex int + expectedCommand []string + expectedContainer string + expectedFileExt string + }{ + { + name: "exec-nginx-ingress-version command failure", + jobName: "exec-nginx-ingress-version", + jobIndex: 0, + expectedCommand: []string{"./nginx-ingress", "--version"}, + expectedContainer: "nginx-ingress", + expectedFileExt: "__nginx-ingress-version.txt", + }, + { + name: "exec-nginx-t command failure", + jobName: "exec-nginx-t", + jobIndex: 1, + expectedCommand: []string{"/usr/sbin/nginx", "-T"}, + expectedContainer: "nginx-ingress", + expectedFileExt: "__nginx-t.txt", + }, + { + name: "exec-agent-conf command failure", + jobName: "exec-agent-conf", + jobIndex: 2, + expectedCommand: []string{"cat", "/etc/nginx-agent/nginx-agent.conf"}, + expectedContainer: "nginx-ingress", + expectedFileExt: "__nginx-agent.conf", + }, + { + name: "exec-agent-version command failure", + jobName: "exec-agent-version", + jobIndex: 3, + expectedCommand: []string{"/usr/bin/nginx-agent", "--version"}, + expectedContainer: "nginx-ingress", + expectedFileExt: "__nginx-agent-version.txt", + }, + { + name: "collect-product-platform-info command failure", + jobName: "collect-product-platform-info", + jobIndex: 5, + expectedCommand: []string{"./nginx-ingress", "--version"}, + expectedContainer: "nginx-ingress", + expectedFileExt: "__nginx-ingress-version.txt", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tmpDir := t.TempDir() + var logOutput bytes.Buffer + + // Create nginx-ingress pod for testing + nginxIngressPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-ingress-controller-123", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "nginx-ingress", Image: "nginx-ingress:latest"}, + }, + }, + } + + client := fake.NewSimpleClientset(nginxIngressPod) + + dc := &data_collector.DataCollector{ + BaseDir: tmpDir, + Namespaces: []string{"default"}, + Logger: log.New(&logOutput, "", 0), + K8sCoreClientSet: client, + PodExecutor: func(namespace, podName, containerName string, command []string, ctx context.Context) ([]byte, error) { + // Verify correct parameters are passed + assert.Equal(t, "default", namespace) + assert.Equal(t, "nginx-ingress-controller-123", podName) + assert.Equal(t, tt.expectedContainer, containerName) + assert.Equal(t, tt.expectedCommand, command) + + // Return error to test failure path + return nil, fmt.Errorf("command execution failed: %v", command) + }, + } + + // Execute the specific job + jobs := NICJobList() + job := jobs[tt.jobIndex] + assert.Equal(t, tt.jobName, job.Name) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + ch := make(chan JobResult, 1) + go job.Execute(dc, ctx, ch) + + select { + case result := <-ch: + logContent := logOutput.String() + + // Verify the error was set + assert.NotNil(t, result.Error, "Job should have error when command execution fails") + assert.Contains(t, result.Error.Error(), "command execution failed") + + // Verify the error was logged + expectedLogMessage := fmt.Sprintf("Command execution %s failed for pod nginx-ingress-controller-123 in namespace default", tt.expectedCommand) + assert.Contains(t, logContent, expectedLogMessage) + assert.Contains(t, logContent, "command execution failed") + + // Verify no files were created when command execution fails + assert.Empty(t, result.Files, "No files should be created when command execution fails") + + case <-ctx.Done(): + t.Fatalf("Job %s timed out", tt.jobName) + } + }) + } +} + +func TestNICJobList_CollectProductPlatformInfo_JSONMarshalFailure(t *testing.T) { + tmpDir := t.TempDir() + var logOutput bytes.Buffer + + nginxIngressPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-ingress-controller", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "nginx-ingress", Image: "nginx-ingress:latest"}, + }, + }, + } + + client := fake.NewSimpleClientset(nginxIngressPod) + + dc := &data_collector.DataCollector{ + BaseDir: tmpDir, + Namespaces: []string{"default"}, + Logger: log.New(&logOutput, "", 0), + K8sCoreClientSet: client, + PodExecutor: func(namespace, podName, containerName string, command []string, ctx context.Context) ([]byte, error) { + // Return successful command output + return []byte("Version=1.2.3 Commit=abc123"), nil + }, + } + + // Test the collect-product-platform-info job (index 5) + jobs := NICJobList() + job := jobs[5] + assert.Equal(t, "collect-product-platform-info", job.Name) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + ch := make(chan JobResult, 1) + go job.Execute(dc, ctx, ch) + + select { + case result := <-ch: + // This should succeed since JSON marshaling should work + assert.Nil(t, result.Error, "Should not have JSON marshal error with valid data") + assert.Len(t, result.Files, 1, "Should create product_info.json file") + + // Verify product_info.json was created + expectedPath := filepath.Join(tmpDir, "product_info.json") + content, exists := result.Files[expectedPath] + assert.True(t, exists, "product_info.json should exist") + + // Verify JSON structure + var productInfo data_collector.ProductInfo + err := json.Unmarshal(content, &productInfo) + assert.NoError(t, err, "Should be valid JSON") + assert.Equal(t, "1.2.3", productInfo.Version) + assert.Equal(t, "abc123", productInfo.Build) + assert.Equal(t, "NGINX Ingress Controller", productInfo.Product) + + case <-ctx.Done(): + t.Fatal("Job timed out") + } +} diff --git a/pkg/jobs/nim_job_list_test.go b/pkg/jobs/nim_job_list_test.go new file mode 100644 index 0000000..79dcce7 --- /dev/null +++ b/pkg/jobs/nim_job_list_test.go @@ -0,0 +1,621 @@ +package jobs + +import ( + "bytes" + "context" + "fmt" + "log" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/nginxinc/nginx-k8s-supportpkg/pkg/data_collector" + "github.com/nginxinc/nginx-k8s-supportpkg/pkg/mock" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + k8stesting "k8s.io/client-go/testing" +) + +func TestNIMJobList_ExecJobs(t *testing.T) { + dc := mock.SetupMockDataCollector(t) + dc.Namespaces = []string{"default"} + + // Create fake pods for each job type + pods := []*corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "apigw-123", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "apigw"}}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "clickhouse-456", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "clickhouse-server"}}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "core-789", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "core"}}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "dpm-101", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "dpm"}}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "integrations-102", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "integrations"}}, + }, + }, + } + + objects := make([]runtime.Object, len(pods)) + for i, pod := range pods { + objects[i] = pod + } + dc.K8sCoreClientSet = fake.NewClientset(objects...) + + // Mock PodExecutor to return predictable output + dc.PodExecutor = func(namespace, pod, container string, command []string, ctx context.Context) ([]byte, error) { + return []byte(strings.Join(command, " ")), nil + } + + // Run all jobs in NIMJobList + for _, job := range NIMJobList() { + ch := make(chan JobResult, 1) + go job.Execute(dc, context.Background(), ch) + select { + case result := <-ch: + if result.Error != nil { + t.Errorf("Job %s returned error: %v", job.Name, result.Error) + } + for file, content := range result.Files { + if !strings.HasPrefix(filepath.ToSlash(file), filepath.ToSlash(dc.BaseDir)) { + t.Errorf("File path %s does not start with tmpDir %s", file, dc.BaseDir) + } + if len(content) == 0 { + t.Errorf("File %s has empty content", file) + } + } + case <-time.After(2 * time.Second): + t.Errorf("Job %s timed out", job.Name) + } + } +} + +func TestNIMJobList_ExcludeFlags(t *testing.T) { + dc := mock.SetupMockDataCollector(t) + dc.Namespaces = []string{"default"} + dc.K8sCoreClientSet = fake.NewClientset(&corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "clickhouse-456", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "clickhouse-server"}}, + }, + }) + dc.PodExecutor = func(namespace, pod, container string, command []string, ctx context.Context) ([]byte, error) { + return []byte("output"), nil + } + // Test ExcludeTimeSeriesData for exec-clickhouse-data + dc.ExcludeTimeSeriesData = true + for _, job := range NIMJobList() { + if job.Name == "exec-clickhouse-data" { + ch := make(chan JobResult, 1) + go job.Execute(dc, context.Background(), ch) + select { + case result := <-ch: + if !result.Skipped { + t.Errorf("Expected job to be skipped when ExcludeTimeSeriesData is true") + } + case <-time.After(time.Second): + t.Fatal("Job exec-clickhouse-data timed out") + } + } + } + + // Test ExcludeDBData for exec-dqlite-dump + dc.ExcludeDBData = true + for _, job := range NIMJobList() { + if job.Name == "exec-dqlite-dump" { + ch := make(chan JobResult, 1) + go job.Execute(dc, context.Background(), ch) + select { + case result := <-ch: + if !result.Skipped { + t.Errorf("Expected job to be skipped when ExcludeDBData is true") + } + case <-time.After(time.Second): + t.Fatal("Job exec-dqlite-dump timed out") + } + } + } +} + +func TestNIMJobList_PodListErrors(t *testing.T) { + tests := []struct { + name string + jobName string + jobIndex int + }{ + { + name: "exec-apigw-nginx-t pod list error", + jobName: "exec-apigw-nginx-t", + jobIndex: 0, + }, + { + name: "exec-apigw-nginx-version pod list error", + jobName: "exec-apigw-nginx-version", + jobIndex: 1, + }, + { + name: "exec-clickhouse-version pod list error", + jobName: "exec-clickhouse-version", + jobIndex: 2, + }, + { + name: "exec-clickhouse-data pod list error", + jobName: "exec-clickhouse-data", + jobIndex: 3, + }, + { + name: "exec-dqlite-dump pod list error", + jobName: "exec-dqlite-dump", + jobIndex: 4, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tmpDir := t.TempDir() + var logOutput bytes.Buffer + + // Create a fake client that will return an error for pod listing + client := fake.NewClientset() + client.PrependReactor("list", "pods", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, fmt.Errorf("failed to retrieve pod list") + }) + + dc := &data_collector.DataCollector{ + BaseDir: tmpDir, + Namespaces: []string{"default", "nim-system"}, + Logger: log.New(&logOutput, "", 0), + K8sCoreClientSet: client, + ExcludeTimeSeriesData: false, + ExcludeDBData: false, + PodExecutor: func(namespace, podName, containerName string, command []string, ctx context.Context) ([]byte, error) { + return []byte("mock output"), nil + }, + } + + // Get the specific job + jobs := NIMJobList() + execJob := jobs[tt.jobIndex] + assert.Equal(t, tt.jobName, execJob.Name, "Job name should match expected") + + // Execute the job + ctx := context.Background() + ch := make(chan JobResult, 1) + execJob.Execute(dc, ctx, ch) + + result := <-ch + logContent := logOutput.String() + + // Verify the error was logged for each namespace + assert.Contains(t, logContent, "Could not retrieve pod list for namespace default: failed to retrieve pod list") + assert.Contains(t, logContent, "Could not retrieve pod list for namespace nim-system: failed to retrieve pod list") + + // Verify no files were created since pod listing failed + assert.Empty(t, result.Files, "No files should be created when pod list fails") + assert.Nil(t, result.Error, "Job should not fail, just log the error") + }) + } +} + +func TestNIMJobList_MultipleNamespaces_PodListErrors(t *testing.T) { + tmpDir := t.TempDir() + var logOutput bytes.Buffer + + // Create a fake client that returns different errors for different namespaces + client := fake.NewClientset() + client.PrependReactor("list", "pods", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + listAction := action.(k8stesting.ListAction) + namespace := listAction.GetNamespace() + + switch namespace { + case "error-ns1": + return true, nil, fmt.Errorf("network timeout") + case "error-ns2": + return true, nil, fmt.Errorf("permission denied") + case "error-ns3": + return true, nil, fmt.Errorf("resource not found") + default: + // Let other namespaces succeed (but with no nim pods) + return false, nil, nil + } + }) + + dc := &data_collector.DataCollector{ + BaseDir: tmpDir, + Namespaces: []string{"error-ns1", "error-ns2", "error-ns3", "success-ns"}, + Logger: log.New(&logOutput, "", 0), + K8sCoreClientSet: client, + ExcludeTimeSeriesData: false, + ExcludeDBData: false, + PodExecutor: func(namespace, podName, containerName string, command []string, ctx context.Context) ([]byte, error) { + return []byte("mock output"), nil + }, + } + + // Test the first job (exec-apigw-nginx-t) + jobs := NIMJobList() + execJob := jobs[0] + + ctx := context.Background() + ch := make(chan JobResult, 1) + execJob.Execute(dc, ctx, ch) + + result := <-ch + logContent := logOutput.String() + + // Verify errors are logged for the failing namespaces + assert.Contains(t, logContent, "Could not retrieve pod list for namespace error-ns1: network timeout") + assert.Contains(t, logContent, "Could not retrieve pod list for namespace error-ns2: permission denied") + assert.Contains(t, logContent, "Could not retrieve pod list for namespace error-ns3: resource not found") + + // success-ns should not have error logs + assert.NotContains(t, logContent, "Could not retrieve pod list for namespace success-ns") + + // No files should be created since no apigw pods exist in success-ns + assert.Empty(t, result.Files) + assert.Nil(t, result.Error) +} + +func TestNIMJobList_PodListError_LogFormat(t *testing.T) { + tmpDir := t.TempDir() + var logOutput bytes.Buffer + + client := fake.NewClientset() + client.PrependReactor("list", "pods", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, fmt.Errorf("specific error message for testing") + }) + + dc := &data_collector.DataCollector{ + BaseDir: tmpDir, + Namespaces: []string{"test-ns"}, + Logger: log.New(&logOutput, "", 0), + K8sCoreClientSet: client, + ExcludeTimeSeriesData: false, + ExcludeDBData: false, + PodExecutor: func(namespace, podName, containerName string, command []string, ctx context.Context) ([]byte, error) { + return []byte("output"), nil + }, + } + + jobs := NIMJobList() + execJob := jobs[0] // Test with first job + + ctx := context.Background() + ch := make(chan JobResult, 1) + execJob.Execute(dc, ctx, ch) + + <-ch + logContent := logOutput.String() + + // Verify the exact log format + expectedLogMessage := "\tCould not retrieve pod list for namespace test-ns: specific error message for testing" + assert.Contains(t, logContent, expectedLogMessage) + + // Verify it starts with tab character for indentation + assert.Contains(t, logContent, "\tCould not retrieve pod list") + + // Verify it contains the namespace and error + assert.Contains(t, logContent, "test-ns") + assert.Contains(t, logContent, "specific error message for testing") +} + +func TestNIMJobList_ClickhouseData_SkippedVsPodListError(t *testing.T) { + tests := []struct { + name string + excludeTimeSeriesData bool + podListError error + expectSkipped bool + expectPodListError bool + }{ + { + name: "excluded time series data - should skip", + excludeTimeSeriesData: true, + podListError: nil, + expectSkipped: true, + expectPodListError: false, + }, + { + name: "pod list error - should log error", + excludeTimeSeriesData: false, + podListError: fmt.Errorf("pod list API error"), + expectSkipped: false, + expectPodListError: true, + }, + { + name: "normal execution", + excludeTimeSeriesData: false, + podListError: nil, + expectSkipped: false, + expectPodListError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tmpDir := t.TempDir() + var logOutput bytes.Buffer + + client := fake.NewClientset() + if tt.podListError != nil { + client.PrependReactor("list", "pods", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, tt.podListError + }) + } + + dc := &data_collector.DataCollector{ + BaseDir: tmpDir, + Namespaces: []string{"default"}, + Logger: log.New(&logOutput, "", 0), + K8sCoreClientSet: client, + ExcludeTimeSeriesData: tt.excludeTimeSeriesData, + ExcludeDBData: false, + PodExecutor: func(namespace, podName, containerName string, command []string, ctx context.Context) ([]byte, error) { + return []byte("clickhouse output"), nil + }, + } + + // Get the exec-clickhouse-data job (index 3) + jobs := NIMJobList() + execJob := jobs[3] + + ctx := context.Background() + ch := make(chan JobResult, 1) + execJob.Execute(dc, ctx, ch) + + result := <-ch + logContent := logOutput.String() + + if tt.expectSkipped { + assert.True(t, result.Skipped, "Job should be skipped") + assert.Contains(t, logContent, "Skipping clickhouse data dump as ExcludeTimeSeriesData is set to true") + } else { + assert.False(t, result.Skipped, "Job should not be skipped") + } + + if tt.expectPodListError { + assert.Contains(t, logContent, "Could not retrieve pod list for namespace default") + assert.Contains(t, logContent, "pod list API error") + } else if !tt.expectSkipped { + assert.NotContains(t, logContent, "Could not retrieve pod list for namespace default") + } + }) + } +} + +func TestNIMJobList_DqliteDump_SkippedVsPodListError(t *testing.T) { + tests := []struct { + name string + excludeDBData bool + podListError error + expectSkipped bool + expectPodListError bool + }{ + { + name: "excluded DB data - should skip", + excludeDBData: true, + podListError: nil, + expectSkipped: true, + expectPodListError: false, + }, + { + name: "pod list error - should log error", + excludeDBData: false, + podListError: fmt.Errorf("pod list API error"), + expectSkipped: false, + expectPodListError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tmpDir := t.TempDir() + var logOutput bytes.Buffer + + client := fake.NewClientset() + if tt.podListError != nil { + client.PrependReactor("list", "pods", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, tt.podListError + }) + } + + dc := &data_collector.DataCollector{ + BaseDir: tmpDir, + Namespaces: []string{"default"}, + Logger: log.New(&logOutput, "", 0), + K8sCoreClientSet: client, + ExcludeTimeSeriesData: false, + ExcludeDBData: tt.excludeDBData, + PodExecutor: func(namespace, podName, containerName string, command []string, ctx context.Context) ([]byte, error) { + return []byte("dqlite output"), nil + }, + } + + // Get the exec-dqlite-dump job (index 4) + jobs := NIMJobList() + execJob := jobs[4] + + ctx := context.Background() + ch := make(chan JobResult, 1) + execJob.Execute(dc, ctx, ch) + + result := <-ch + logContent := logOutput.String() + + if tt.expectSkipped { + assert.True(t, result.Skipped, "Job should be skipped") + assert.Contains(t, logContent, "Skipping dqlite dump as ExcludeDBData is set to true") + } else { + assert.False(t, result.Skipped, "Job should not be skipped") + } + + if tt.expectPodListError { + assert.Contains(t, logContent, "Could not retrieve pod list for namespace default") + assert.Contains(t, logContent, "pod list API error") + } else if !tt.expectSkipped { + assert.NotContains(t, logContent, "Could not retrieve pod list for namespace default") + } + }) + } +} + +func TestNIMJobList_CommandExecutionFailure(t *testing.T) { + tests := []struct { + name string + jobName string + jobIndex int + podNamePattern string + expectedContainer string + expectedCommands [][]string + }{ + { + name: "exec-apigw-nginx-t command failure", + jobName: "exec-apigw-nginx-t", + jobIndex: 0, + podNamePattern: "apigw", + expectedContainer: "apigw", + expectedCommands: [][]string{{"/usr/sbin/nginx", "-T"}}, + }, + { + name: "exec-apigw-nginx-version command failure", + jobName: "exec-apigw-nginx-version", + jobIndex: 1, + podNamePattern: "apigw", + expectedContainer: "apigw", + expectedCommands: [][]string{{"/usr/sbin/nginx", "-v"}}, + }, + { + name: "exec-clickhouse-version command failure", + jobName: "exec-clickhouse-version", + jobIndex: 2, + podNamePattern: "clickhouse", + expectedContainer: "clickhouse-server", + expectedCommands: [][]string{{"clickhouse-server", "--version"}}, + }, + { + name: "exec-dqlite-dump command failure", + jobName: "exec-dqlite-dump", + jobIndex: 4, + podNamePattern: "core", + expectedContainer: "core", + expectedCommands: [][]string{ + {"/etc/nms/scripts/dqlite-backup", "-n", "core", "-c", "/etc/nms/nms.conf", "-a", "0.0.0.0:7891", "-o", "/tmp/core.sql", "-k"}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tmpDir := t.TempDir() + var logOutput bytes.Buffer + + // Create pod matching the pattern + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: tt.podNamePattern + "-test-pod", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: tt.expectedContainer, Image: "test:latest"}, + }, + }, + } + + client := fake.NewSimpleClientset(pod) + + var executedCommands [][]string + dc := &data_collector.DataCollector{ + BaseDir: tmpDir, + Namespaces: []string{"default"}, + Logger: log.New(&logOutput, "", 0), + K8sCoreClientSet: client, + PodExecutor: func(namespace, podName, containerName string, command []string, ctx context.Context) ([]byte, error) { + // Track executed commands + executedCommands = append(executedCommands, command) + + // Verify correct parameters + assert.Equal(t, "default", namespace) + assert.Equal(t, tt.podNamePattern+"-test-pod", podName) + assert.Equal(t, tt.expectedContainer, containerName) + + // Return error to test failure path + return nil, fmt.Errorf("command execution failed: %v", command) + }, + } + + // Execute the specific job + jobs := NIMJobList() + job := jobs[tt.jobIndex] + assert.Equal(t, tt.jobName, job.Name) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + ch := make(chan JobResult, 1) + go job.Execute(dc, ctx, ch) + + select { + case result := <-ch: + logContent := logOutput.String() + + // Verify the error was set + assert.NotNil(t, result.Error, "Job should have error when command execution fails") + assert.Contains(t, result.Error.Error(), "command execution failed") + + // Verify the error was logged + assert.Contains(t, logContent, "Command execution") + assert.Contains(t, logContent, "failed for pod") + assert.Contains(t, logContent, "command execution failed") + + // Verify no files were created when command execution fails + assert.Empty(t, result.Files, "No files should be created when command execution fails") + + // Verify expected commands were called + assert.NotEmpty(t, executedCommands, "Should have executed commands") + + case <-ctx.Done(): + t.Fatalf("Job %s timed out", tt.jobName) + } + }) + } +} diff --git a/pkg/mock/mock_data_collector.go b/pkg/mock/mock_data_collector.go new file mode 100644 index 0000000..3a6a0c6 --- /dev/null +++ b/pkg/mock/mock_data_collector.go @@ -0,0 +1,114 @@ +package mock + +import ( + "bytes" + "embed" + "io" + "log" + "testing" + + helmclient "github.com/mittwald/go-helm-client" + mockHelmClient "github.com/mittwald/go-helm-client/mock" + "github.com/nginxinc/nginx-k8s-supportpkg/pkg/data_collector" + "go.uber.org/mock/gomock" + "helm.sh/helm/v3/pkg/cli" + "helm.sh/helm/v3/pkg/release" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsfake "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + metricsfake "k8s.io/metrics/pkg/client/clientset/versioned/fake" + "sigs.k8s.io/yaml" +) + +//go:embed testdata/crds.yaml +//go:embed testdata/objects.yaml +var testDataFS embed.FS + +func loadObjectsFromYAML(filename string) ([]runtime.Object, error) { + data, err := testDataFS.ReadFile(filename) + if err != nil { + return nil, err + } + + var objects []runtime.Object + + // Split YAML documents by "---" + docs := bytes.Split(data, []byte("---")) + + for _, doc := range docs { + doc = bytes.TrimSpace(doc) + if len(doc) == 0 { + continue + } + + // Use the universal deserializer directly + obj, _, err := scheme.Codecs.UniversalDeserializer().Decode(doc, nil, nil) + if err != nil { + return nil, err + } + + if obj != nil { + objects = append(objects, obj) + } + } + return objects, nil +} + +func SetupMockDataCollector(t *testing.T) *data_collector.DataCollector { + t.Helper() + + tmpDir := t.TempDir() + + // Load objects from YAML instead of hardcoding + objs, err := loadObjectsFromYAML("testdata/objects.yaml") + if err != nil { + t.Fatalf("Failed to load test objects: %v", err) + } + + client := fake.NewClientset(objs...) + + // Mock rest.Config + restConfig := &rest.Config{ + Host: "https://mock-k8s-server", + } + + // Use embedded file + data, err := testDataFS.ReadFile("testdata/crds.yaml") + if err != nil { + t.Fatalf("failed to read embedded testdata/crds.yaml: %v", err) + } + + crd := &apiextensionsv1.CustomResourceDefinition{} + if err := yaml.Unmarshal(data, crd); err != nil { + t.Fatalf("failed to unmarshal CRD from testdata/crds.yaml: %v", err) + } + + crdClient := apiextensionsfake.NewClientset(crd) + metricsClient := metricsfake.NewSimpleClientset() + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + helmClient := mockHelmClient.NewMockClient(ctrl) + + helmClient.EXPECT().GetSettings().Return(&cli.EnvSettings{}).AnyTimes() + var mockedRelease = release.Release{ + Name: "test", + Namespace: "test", + Manifest: "apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: example-config\n namespace: default\ndata:\n key: value\n", + } + helmClient.EXPECT().ListDeployedReleases().Return([]*release.Release{&mockedRelease}, nil).AnyTimes() + + return &data_collector.DataCollector{ + BaseDir: tmpDir, + Namespaces: []string{"default"}, + Logger: log.New(io.Discard, "", 0), + K8sCoreClientSet: client, + K8sCrdClientSet: crdClient, + K8sRestConfig: restConfig, + K8sMetricsClientSet: metricsClient, + K8sHelmClientSet: map[string]helmclient.Client{"default": helmClient}, + } +} diff --git a/pkg/mock/testdata/crds.yaml b/pkg/mock/testdata/crds.yaml new file mode 100644 index 0000000..59310fc --- /dev/null +++ b/pkg/mock/testdata/crds.yaml @@ -0,0 +1,23 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: testcrd.example.com +spec: + group: example.com + names: + kind: TestCRD + plural: testcrds + singular: testcrd + scope: Namespaced + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + status: + type: object \ No newline at end of file diff --git a/pkg/mock/testdata/objects.yaml b/pkg/mock/testdata/objects.yaml new file mode 100644 index 0000000..80dd18c --- /dev/null +++ b/pkg/mock/testdata/objects.yaml @@ -0,0 +1,55 @@ +apiVersion: v1 +kind: Pod +metadata: + name: pod-1 + namespace: default +spec: + containers: + - name: c1 + image: nginx:latest +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-1 + namespace: default +spec: + selector: + app: demo +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: dep-1 + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + app: demo + template: + metadata: + labels: + app: demo + spec: + containers: + - name: dep-c1 + image: nginx:latest +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: role-1 + namespace: default +rules: +- apiGroups: [""] + resources: ["pods"] + verbs: ["get"] +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm-1 + namespace: default +data: + k: v \ No newline at end of file