-
Notifications
You must be signed in to change notification settings - Fork 2.8k
test(source): remove flaky log assertions from pod tests #5517
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -18,6 +18,8 @@ package source | |
|
||
import ( | ||
"context" | ||
"fmt" | ||
"math/rand" | ||
"testing" | ||
|
||
log "github.com/sirupsen/logrus" | ||
|
@@ -43,7 +45,6 @@ func TestPodSource(t *testing.T) { | |
PodSourceDomain string | ||
expected []*endpoint.Endpoint | ||
expectError bool | ||
expectedDebugMsgs []string | ||
nodes []*corev1.Node | ||
pods []*corev1.Pod | ||
}{ | ||
|
@@ -58,7 +59,6 @@ func TestPodSource(t *testing.T) { | |
{DNSName: "internal.a.foo.example.org", Targets: endpoint.Targets{"10.0.1.1", "10.0.1.2"}, RecordType: endpoint.RecordTypeA}, | ||
}, | ||
false, | ||
nil, | ||
nodesFixturesIPv4(), | ||
[]*corev1.Pod{ | ||
{ | ||
|
@@ -108,7 +108,6 @@ func TestPodSource(t *testing.T) { | |
{DNSName: "internal.a.foo.example.org", Targets: endpoint.Targets{"10.0.1.1", "10.0.1.2"}, RecordType: endpoint.RecordTypeA}, | ||
}, | ||
false, | ||
nil, | ||
nodesFixturesIPv4(), | ||
[]*corev1.Pod{ | ||
{ | ||
|
@@ -158,7 +157,6 @@ func TestPodSource(t *testing.T) { | |
{DNSName: "internal.a.foo.example.org", Targets: endpoint.Targets{"2001:DB8::1", "2001:DB8::2"}, RecordType: endpoint.RecordTypeAAAA}, | ||
}, | ||
false, | ||
nil, | ||
nodesFixturesIPv6(), | ||
[]*corev1.Pod{ | ||
{ | ||
|
@@ -208,7 +206,6 @@ func TestPodSource(t *testing.T) { | |
{DNSName: "internal.a.foo.example.org", Targets: endpoint.Targets{"2001:DB8::1", "2001:DB8::2"}, RecordType: endpoint.RecordTypeAAAA}, | ||
}, | ||
false, | ||
nil, | ||
nodesFixturesIPv6(), | ||
[]*corev1.Pod{ | ||
{ | ||
|
@@ -258,7 +255,6 @@ func TestPodSource(t *testing.T) { | |
{DNSName: "internal.a.foo.example.org", Targets: endpoint.Targets{"208.1.2.1", "208.1.2.2"}, RecordType: endpoint.RecordTypeA}, | ||
}, | ||
false, | ||
nil, | ||
nodesFixturesIPv4(), | ||
[]*corev1.Pod{ | ||
{ | ||
|
@@ -311,7 +307,6 @@ func TestPodSource(t *testing.T) { | |
{DNSName: "b.foo.example.org", Targets: endpoint.Targets{"54.10.11.2"}, RecordType: endpoint.RecordTypeA}, | ||
}, | ||
false, | ||
nil, | ||
[]*corev1.Node{ | ||
{ | ||
ObjectMeta: metav1.ObjectMeta{ | ||
|
@@ -383,7 +378,6 @@ func TestPodSource(t *testing.T) { | |
{DNSName: "internal.a.foo.example.org", Targets: endpoint.Targets{"10.0.1.1"}, RecordType: endpoint.RecordTypeA}, | ||
}, | ||
false, | ||
[]string{"skipping pod my-pod2. hostNetwork=false"}, | ||
nodesFixturesIPv4(), | ||
[]*corev1.Pod{ | ||
{ | ||
|
@@ -433,7 +427,6 @@ func TestPodSource(t *testing.T) { | |
{DNSName: "internal.a.foo.example.org", Targets: endpoint.Targets{"10.0.1.1"}, RecordType: endpoint.RecordTypeA}, | ||
}, | ||
false, | ||
nil, | ||
nodesFixturesIPv4(), | ||
[]*corev1.Pod{ | ||
{ | ||
|
@@ -483,7 +476,6 @@ func TestPodSource(t *testing.T) { | |
{DNSName: "internal.b.foo.example.org", Targets: endpoint.Targets{"10.0.1.1"}, RecordType: endpoint.RecordTypeA}, | ||
}, | ||
false, | ||
nil, | ||
[]*corev1.Node{ | ||
{ | ||
ObjectMeta: metav1.ObjectMeta{ | ||
|
@@ -526,7 +518,6 @@ func TestPodSource(t *testing.T) { | |
{DNSName: "my-pod2.example.org", Targets: endpoint.Targets{"192.168.1.2"}, RecordType: endpoint.RecordTypeA}, | ||
}, | ||
false, | ||
nil, | ||
nodesFixturesIPv4(), | ||
[]*corev1.Pod{ | ||
{ | ||
|
@@ -572,7 +563,6 @@ func TestPodSource(t *testing.T) { | |
{DNSName: "internal.a.foo.example.org", Targets: endpoint.Targets{"208.1.2.1", "208.1.2.2"}, RecordType: endpoint.RecordTypeA}, | ||
}, | ||
false, | ||
nil, | ||
nodesFixturesIPv4(), | ||
[]*corev1.Pod{ | ||
{ | ||
|
@@ -621,7 +611,6 @@ func TestPodSource(t *testing.T) { | |
"", | ||
[]*endpoint.Endpoint{}, | ||
false, | ||
[]string{`Get node[missing-node] of pod[my-pod1] error: node "missing-node" not found; ignoring`}, | ||
nodesFixturesIPv4(), | ||
[]*corev1.Pod{ | ||
{ | ||
|
@@ -654,6 +643,7 @@ func TestPodSource(t *testing.T) { | |
} | ||
} | ||
|
||
// Create the pods | ||
for _, pod := range tc.pods { | ||
pods := kubernetes.CoreV1().Pods(pod.Namespace) | ||
|
||
|
@@ -665,25 +655,245 @@ func TestPodSource(t *testing.T) { | |
client, err := NewPodSource(context.TODO(), kubernetes, tc.targetNamespace, tc.compatibility, tc.ignoreNonHostNetworkPods, tc.PodSourceDomain) | ||
require.NoError(t, err) | ||
|
||
hook := testutils.LogsUnderTestWithLogLevel(log.DebugLevel, t) | ||
|
||
endpoints, err := client.Endpoints(ctx) | ||
if tc.expectError { | ||
require.Error(t, err) | ||
} else { | ||
require.NoError(t, err) | ||
} | ||
|
||
if tc.expectedDebugMsgs != nil { | ||
for _, expectedMsg := range tc.expectedDebugMsgs { | ||
testutils.TestHelperLogContains(expectedMsg, hook, t) | ||
// Validate returned endpoints against desired endpoints. | ||
validateEndpoints(t, endpoints, tc.expected) | ||
}) | ||
} | ||
} | ||
|
||
func TestPodSourceLogs(t *testing.T) { | ||
t.Parallel() | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. do we need Parallel for that? Just a comment, not necessary need to action There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think it's needed here, because as discussed below, using
u-kai marked this conversation as resolved.
Show resolved
Hide resolved
|
||
// Generate unique pod names to avoid log conflicts across parallel tests. | ||
// Since logs are globally shared, using the same pod names could cause | ||
// false positives in unexpectedDebugLogs assertions. | ||
suffix := fmt.Sprintf("%d", rand.Intn(100000)) | ||
for _, tc := range []struct { | ||
title string | ||
ignoreNonHostNetworkPods bool | ||
pods []*corev1.Pod | ||
nodes []*corev1.Node | ||
expectedDebugLogs []string | ||
unexpectedDebugLogs []string | ||
}{ | ||
{ | ||
"pods with hostNetwore=false should be skipped logging", | ||
true, | ||
[]*corev1.Pod{ | ||
{ | ||
ObjectMeta: metav1.ObjectMeta{ | ||
Name: fmt.Sprintf("my-pod1-%s", suffix), | ||
Namespace: "kube-system", | ||
Annotations: map[string]string{ | ||
internalHostnameAnnotationKey: "internal.a.foo.example.org", | ||
hostnameAnnotationKey: "a.foo.example.org", | ||
}, | ||
}, | ||
Spec: corev1.PodSpec{ | ||
HostNetwork: false, | ||
NodeName: "my-node1", | ||
}, | ||
Status: corev1.PodStatus{ | ||
PodIP: "100.0.1.1", | ||
}, | ||
}, | ||
}, | ||
nodesFixturesIPv4(), | ||
[]string{fmt.Sprintf("skipping pod my-pod1-%s. hostNetwork=false", suffix)}, | ||
nil, | ||
}, | ||
{ | ||
"host network pod on a missing node", | ||
true, | ||
[]*corev1.Pod{ | ||
{ | ||
ObjectMeta: metav1.ObjectMeta{ | ||
Name: fmt.Sprintf("missing-node-pod-%s", suffix), | ||
Namespace: "kube-system", | ||
Annotations: map[string]string{ | ||
hostnameAnnotationKey: "a.foo.example.org", | ||
}, | ||
}, | ||
Spec: corev1.PodSpec{ | ||
HostNetwork: true, | ||
NodeName: "missing-node", | ||
}, | ||
Status: corev1.PodStatus{ | ||
PodIP: "10.0.1.1", | ||
}, | ||
}, | ||
}, | ||
nodesFixturesIPv4(), | ||
[]string{ | ||
fmt.Sprintf(`Get node[missing-node] of pod[missing-node-pod-%s] error: node "missing-node" not found; ignoring`, suffix), | ||
}, | ||
nil, | ||
}, | ||
{ | ||
"mixed valid and hostNetwork=false pods with missing node", | ||
true, | ||
[]*corev1.Pod{ | ||
{ | ||
ObjectMeta: metav1.ObjectMeta{ | ||
Name: fmt.Sprintf("valid-pod-%s", suffix), | ||
Namespace: "kube-system", | ||
Annotations: map[string]string{ | ||
hostnameAnnotationKey: "valid.foo.example.org", | ||
}, | ||
}, | ||
Spec: corev1.PodSpec{ | ||
HostNetwork: true, | ||
NodeName: "my-node1", | ||
}, | ||
Status: corev1.PodStatus{ | ||
PodIP: "10.0.1.1", | ||
}, | ||
}, | ||
{ | ||
ObjectMeta: metav1.ObjectMeta{ | ||
Name: fmt.Sprintf("non-hostnet-pod-%s", suffix), | ||
Namespace: "kube-system", | ||
Annotations: map[string]string{ | ||
hostnameAnnotationKey: "nonhost.foo.example.org", | ||
}, | ||
}, | ||
Spec: corev1.PodSpec{ | ||
HostNetwork: false, | ||
NodeName: "my-node2", | ||
}, | ||
Status: corev1.PodStatus{ | ||
PodIP: "100.0.1.2", | ||
}, | ||
}, | ||
{ | ||
ObjectMeta: metav1.ObjectMeta{ | ||
Name: fmt.Sprintf("missing-node-pod-%s", suffix), | ||
Namespace: "kube-system", | ||
Annotations: map[string]string{ | ||
hostnameAnnotationKey: "missing.foo.example.org", | ||
}, | ||
}, | ||
Spec: corev1.PodSpec{ | ||
HostNetwork: true, | ||
NodeName: "missing-node", | ||
}, | ||
Status: corev1.PodStatus{ | ||
PodIP: "10.0.1.3", | ||
}, | ||
}, | ||
}, | ||
nodesFixturesIPv4(), | ||
[]string{ | ||
fmt.Sprintf("skipping pod non-hostnet-pod-%s. hostNetwork=false", suffix), | ||
fmt.Sprintf(`Get node[missing-node] of pod[missing-node-pod-%s] error: node "missing-node" not found; ignoring`, suffix), | ||
}, | ||
[]string{ | ||
fmt.Sprintf("skipping pod valid-pod-%s. hostNetwork=false", suffix), | ||
fmt.Sprintf(`Get node[my-node1] of pod[valid-pod-%s] error: node "my-node1" not found; ignoring`, suffix), | ||
}, | ||
}, | ||
{ | ||
"valid pods with hostNetwork=true should not generate logs", | ||
true, | ||
[]*corev1.Pod{ | ||
{ | ||
ObjectMeta: metav1.ObjectMeta{ | ||
Name: fmt.Sprintf("valid-pod-%s", suffix), | ||
Namespace: "kube-system", | ||
Annotations: map[string]string{ | ||
hostnameAnnotationKey: "valid.foo.example.org", | ||
}, | ||
}, | ||
Spec: corev1.PodSpec{ | ||
HostNetwork: true, | ||
NodeName: "my-node1", | ||
}, | ||
Status: corev1.PodStatus{ | ||
PodIP: "10.0.1.1", | ||
}, | ||
}, | ||
}, | ||
nodesFixturesIPv4(), | ||
nil, | ||
[]string{ | ||
fmt.Sprintf("skipping pod valid-pod-%s. hostNetwork=false", suffix), | ||
fmt.Sprintf(`Get node[my-node1] of pod[valid-pod-%s] error: node "my-node1" not found; ignoring`, suffix), | ||
}, | ||
}, | ||
{ | ||
"when ignoreNonHostNetworkPods=false, no skip logs should be generated", | ||
false, | ||
[]*corev1.Pod{ | ||
{ | ||
ObjectMeta: metav1.ObjectMeta{ | ||
Name: fmt.Sprintf("my-pod1-%s", suffix), | ||
Namespace: "kube-system", | ||
Annotations: map[string]string{ | ||
internalHostnameAnnotationKey: "internal.a.foo.example.org", | ||
hostnameAnnotationKey: "a.foo.example.org", | ||
}, | ||
}, | ||
Spec: corev1.PodSpec{ | ||
HostNetwork: false, | ||
NodeName: "my-node1", | ||
}, | ||
Status: corev1.PodStatus{ | ||
PodIP: "10.0.1.1", | ||
}, | ||
}, | ||
}, | ||
nodesFixturesIPv4(), | ||
nil, | ||
[]string{ | ||
fmt.Sprintf("skipping pod my-pod1-%s. hostNetwork=false", suffix), | ||
}, | ||
}, | ||
} { | ||
t.Run(tc.title, func(t *testing.T) { | ||
kubernetes := fake.NewClientset() | ||
u-kai marked this conversation as resolved.
Show resolved
Hide resolved
|
||
ctx := context.Background() | ||
// Create the nodes | ||
for _, node := range tc.nodes { | ||
if _, err := kubernetes.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}); err != nil { | ||
t.Fatal(err) | ||
} | ||
} | ||
|
||
// Create the pods | ||
for _, pod := range tc.pods { | ||
pods := kubernetes.CoreV1().Pods(pod.Namespace) | ||
|
||
if _, err := pods.Create(ctx, pod, metav1.CreateOptions{}); err != nil { | ||
t.Fatal(err) | ||
} | ||
} else { | ||
require.Empty(t, hook.AllEntries(), "Expected no debug messages") | ||
} | ||
|
||
// Validate returned endpoints against desired endpoints. | ||
validateEndpoints(t, endpoints, tc.expected) | ||
client, err := NewPodSource(ctx, kubernetes, "", "", tc.ignoreNonHostNetworkPods, "") | ||
require.NoError(t, err) | ||
|
||
hook := testutils.LogsUnderTestWithLogLevel(log.DebugLevel, t) | ||
|
||
_, err = client.Endpoints(ctx) | ||
require.NoError(t, err) | ||
|
||
// Check if all expected logs are present in actual logs. | ||
// We don't do an exact match because logs are globally shared, | ||
// making precise comparisons difficult | ||
for _, expectedLog := range tc.expectedDebugLogs { | ||
testutils.TestHelperLogContains(expectedLog, hook, t) | ||
} | ||
|
||
// Check that no unexpected logs are present. | ||
// This ensures that logs are not generated inappropriately. | ||
for _, unexpectedLog := range tc.unexpectedDebugLogs { | ||
testutils.TestHelperLogNotContains(unexpectedLog, hook, t) | ||
} | ||
}) | ||
} | ||
} | ||
|
Uh oh!
There was an error while loading. Please reload this page.