Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
254 changes: 232 additions & 22 deletions source/pod_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@ package source

import (
"context"
"fmt"
"math/rand"
"testing"

log "github.com/sirupsen/logrus"
Expand All @@ -43,7 +45,6 @@ func TestPodSource(t *testing.T) {
PodSourceDomain string
expected []*endpoint.Endpoint
expectError bool
expectedDebugMsgs []string
nodes []*corev1.Node
pods []*corev1.Pod
}{
Expand All @@ -58,7 +59,6 @@ func TestPodSource(t *testing.T) {
{DNSName: "internal.a.foo.example.org", Targets: endpoint.Targets{"10.0.1.1", "10.0.1.2"}, RecordType: endpoint.RecordTypeA},
},
false,
nil,
nodesFixturesIPv4(),
[]*corev1.Pod{
{
Expand Down Expand Up @@ -108,7 +108,6 @@ func TestPodSource(t *testing.T) {
{DNSName: "internal.a.foo.example.org", Targets: endpoint.Targets{"10.0.1.1", "10.0.1.2"}, RecordType: endpoint.RecordTypeA},
},
false,
nil,
nodesFixturesIPv4(),
[]*corev1.Pod{
{
Expand Down Expand Up @@ -158,7 +157,6 @@ func TestPodSource(t *testing.T) {
{DNSName: "internal.a.foo.example.org", Targets: endpoint.Targets{"2001:DB8::1", "2001:DB8::2"}, RecordType: endpoint.RecordTypeAAAA},
},
false,
nil,
nodesFixturesIPv6(),
[]*corev1.Pod{
{
Expand Down Expand Up @@ -208,7 +206,6 @@ func TestPodSource(t *testing.T) {
{DNSName: "internal.a.foo.example.org", Targets: endpoint.Targets{"2001:DB8::1", "2001:DB8::2"}, RecordType: endpoint.RecordTypeAAAA},
},
false,
nil,
nodesFixturesIPv6(),
[]*corev1.Pod{
{
Expand Down Expand Up @@ -258,7 +255,6 @@ func TestPodSource(t *testing.T) {
{DNSName: "internal.a.foo.example.org", Targets: endpoint.Targets{"208.1.2.1", "208.1.2.2"}, RecordType: endpoint.RecordTypeA},
},
false,
nil,
nodesFixturesIPv4(),
[]*corev1.Pod{
{
Expand Down Expand Up @@ -311,7 +307,6 @@ func TestPodSource(t *testing.T) {
{DNSName: "b.foo.example.org", Targets: endpoint.Targets{"54.10.11.2"}, RecordType: endpoint.RecordTypeA},
},
false,
nil,
[]*corev1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Expand Down Expand Up @@ -383,7 +378,6 @@ func TestPodSource(t *testing.T) {
{DNSName: "internal.a.foo.example.org", Targets: endpoint.Targets{"10.0.1.1"}, RecordType: endpoint.RecordTypeA},
},
false,
[]string{"skipping pod my-pod2. hostNetwork=false"},
nodesFixturesIPv4(),
[]*corev1.Pod{
{
Expand Down Expand Up @@ -433,7 +427,6 @@ func TestPodSource(t *testing.T) {
{DNSName: "internal.a.foo.example.org", Targets: endpoint.Targets{"10.0.1.1"}, RecordType: endpoint.RecordTypeA},
},
false,
nil,
nodesFixturesIPv4(),
[]*corev1.Pod{
{
Expand Down Expand Up @@ -483,7 +476,6 @@ func TestPodSource(t *testing.T) {
{DNSName: "internal.b.foo.example.org", Targets: endpoint.Targets{"10.0.1.1"}, RecordType: endpoint.RecordTypeA},
},
false,
nil,
[]*corev1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Expand Down Expand Up @@ -526,7 +518,6 @@ func TestPodSource(t *testing.T) {
{DNSName: "my-pod2.example.org", Targets: endpoint.Targets{"192.168.1.2"}, RecordType: endpoint.RecordTypeA},
},
false,
nil,
nodesFixturesIPv4(),
[]*corev1.Pod{
{
Expand Down Expand Up @@ -572,7 +563,6 @@ func TestPodSource(t *testing.T) {
{DNSName: "internal.a.foo.example.org", Targets: endpoint.Targets{"208.1.2.1", "208.1.2.2"}, RecordType: endpoint.RecordTypeA},
},
false,
nil,
nodesFixturesIPv4(),
[]*corev1.Pod{
{
Expand Down Expand Up @@ -621,7 +611,6 @@ func TestPodSource(t *testing.T) {
"",
[]*endpoint.Endpoint{},
false,
[]string{`Get node[missing-node] of pod[my-pod1] error: node "missing-node" not found; ignoring`},
nodesFixturesIPv4(),
[]*corev1.Pod{
{
Expand Down Expand Up @@ -654,6 +643,7 @@ func TestPodSource(t *testing.T) {
}
}

// Create the pods
for _, pod := range tc.pods {
pods := kubernetes.CoreV1().Pods(pod.Namespace)

Expand All @@ -665,25 +655,245 @@ func TestPodSource(t *testing.T) {
client, err := NewPodSource(context.TODO(), kubernetes, tc.targetNamespace, tc.compatibility, tc.ignoreNonHostNetworkPods, tc.PodSourceDomain)
require.NoError(t, err)

hook := testutils.LogsUnderTestWithLogLevel(log.DebugLevel, t)

endpoints, err := client.Endpoints(ctx)
if tc.expectError {
require.Error(t, err)
} else {
require.NoError(t, err)
}

if tc.expectedDebugMsgs != nil {
for _, expectedMsg := range tc.expectedDebugMsgs {
testutils.TestHelperLogContains(expectedMsg, hook, t)
// Validate returned endpoints against desired endpoints.
validateEndpoints(t, endpoints, tc.expected)
})
}
}

func TestPodSourceLogs(t *testing.T) {
t.Parallel()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

do we need Parallel for that? Just a comment, not necessary need to action

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think it's needed here, because as discussed below, using t.Parallel at this level is safe and allows us to parallelize the tests.

// Generate unique pod names to avoid log conflicts across parallel tests.
// Since logs are globally shared, using the same pod names could cause
// false positives in unexpectedDebugLogs assertions.
suffix := fmt.Sprintf("%d", rand.Intn(100000))
for _, tc := range []struct {
title string
ignoreNonHostNetworkPods bool
pods []*corev1.Pod
nodes []*corev1.Node
expectedDebugLogs []string
unexpectedDebugLogs []string
}{
{
"pods with hostNetwore=false should be skipped logging",
true,
[]*corev1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("my-pod1-%s", suffix),
Namespace: "kube-system",
Annotations: map[string]string{
internalHostnameAnnotationKey: "internal.a.foo.example.org",
hostnameAnnotationKey: "a.foo.example.org",
},
},
Spec: corev1.PodSpec{
HostNetwork: false,
NodeName: "my-node1",
},
Status: corev1.PodStatus{
PodIP: "100.0.1.1",
},
},
},
nodesFixturesIPv4(),
[]string{fmt.Sprintf("skipping pod my-pod1-%s. hostNetwork=false", suffix)},
nil,
},
{
"host network pod on a missing node",
true,
[]*corev1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("missing-node-pod-%s", suffix),
Namespace: "kube-system",
Annotations: map[string]string{
hostnameAnnotationKey: "a.foo.example.org",
},
},
Spec: corev1.PodSpec{
HostNetwork: true,
NodeName: "missing-node",
},
Status: corev1.PodStatus{
PodIP: "10.0.1.1",
},
},
},
nodesFixturesIPv4(),
[]string{
fmt.Sprintf(`Get node[missing-node] of pod[missing-node-pod-%s] error: node "missing-node" not found; ignoring`, suffix),
},
nil,
},
{
"mixed valid and hostNetwork=false pods with missing node",
true,
[]*corev1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("valid-pod-%s", suffix),
Namespace: "kube-system",
Annotations: map[string]string{
hostnameAnnotationKey: "valid.foo.example.org",
},
},
Spec: corev1.PodSpec{
HostNetwork: true,
NodeName: "my-node1",
},
Status: corev1.PodStatus{
PodIP: "10.0.1.1",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("non-hostnet-pod-%s", suffix),
Namespace: "kube-system",
Annotations: map[string]string{
hostnameAnnotationKey: "nonhost.foo.example.org",
},
},
Spec: corev1.PodSpec{
HostNetwork: false,
NodeName: "my-node2",
},
Status: corev1.PodStatus{
PodIP: "100.0.1.2",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("missing-node-pod-%s", suffix),
Namespace: "kube-system",
Annotations: map[string]string{
hostnameAnnotationKey: "missing.foo.example.org",
},
},
Spec: corev1.PodSpec{
HostNetwork: true,
NodeName: "missing-node",
},
Status: corev1.PodStatus{
PodIP: "10.0.1.3",
},
},
},
nodesFixturesIPv4(),
[]string{
fmt.Sprintf("skipping pod non-hostnet-pod-%s. hostNetwork=false", suffix),
fmt.Sprintf(`Get node[missing-node] of pod[missing-node-pod-%s] error: node "missing-node" not found; ignoring`, suffix),
},
[]string{
fmt.Sprintf("skipping pod valid-pod-%s. hostNetwork=false", suffix),
fmt.Sprintf(`Get node[my-node1] of pod[valid-pod-%s] error: node "my-node1" not found; ignoring`, suffix),
},
},
{
"valid pods with hostNetwork=true should not generate logs",
true,
[]*corev1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("valid-pod-%s", suffix),
Namespace: "kube-system",
Annotations: map[string]string{
hostnameAnnotationKey: "valid.foo.example.org",
},
},
Spec: corev1.PodSpec{
HostNetwork: true,
NodeName: "my-node1",
},
Status: corev1.PodStatus{
PodIP: "10.0.1.1",
},
},
},
nodesFixturesIPv4(),
nil,
[]string{
fmt.Sprintf("skipping pod valid-pod-%s. hostNetwork=false", suffix),
fmt.Sprintf(`Get node[my-node1] of pod[valid-pod-%s] error: node "my-node1" not found; ignoring`, suffix),
},
},
{
"when ignoreNonHostNetworkPods=false, no skip logs should be generated",
false,
[]*corev1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("my-pod1-%s", suffix),
Namespace: "kube-system",
Annotations: map[string]string{
internalHostnameAnnotationKey: "internal.a.foo.example.org",
hostnameAnnotationKey: "a.foo.example.org",
},
},
Spec: corev1.PodSpec{
HostNetwork: false,
NodeName: "my-node1",
},
Status: corev1.PodStatus{
PodIP: "10.0.1.1",
},
},
},
nodesFixturesIPv4(),
nil,
[]string{
fmt.Sprintf("skipping pod my-pod1-%s. hostNetwork=false", suffix),
},
},
} {
t.Run(tc.title, func(t *testing.T) {
kubernetes := fake.NewClientset()
ctx := context.Background()
// Create the nodes
for _, node := range tc.nodes {
if _, err := kubernetes.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
}

// Create the pods
for _, pod := range tc.pods {
pods := kubernetes.CoreV1().Pods(pod.Namespace)

if _, err := pods.Create(ctx, pod, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
} else {
require.Empty(t, hook.AllEntries(), "Expected no debug messages")
}

// Validate returned endpoints against desired endpoints.
validateEndpoints(t, endpoints, tc.expected)
client, err := NewPodSource(ctx, kubernetes, "", "", tc.ignoreNonHostNetworkPods, "")
require.NoError(t, err)

hook := testutils.LogsUnderTestWithLogLevel(log.DebugLevel, t)

_, err = client.Endpoints(ctx)
require.NoError(t, err)

// Check if all expected logs are present in actual logs.
// We don't do an exact match because logs are globally shared,
// making precise comparisons difficult
for _, expectedLog := range tc.expectedDebugLogs {
testutils.TestHelperLogContains(expectedLog, hook, t)
}

// Check that no unexpected logs are present.
// This ensures that logs are not generated inappropriately.
for _, unexpectedLog := range tc.unexpectedDebugLogs {
testutils.TestHelperLogNotContains(unexpectedLog, hook, t)
}
})
}
}
Expand Down
Loading