Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions .buildkite/testsuite.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@ steps:
queue: k8s-m6id12xlarge
command: ./ci/scripts/run-in-nix-docker.sh task ci:configure ci:test:unit
env:
BUILD_GOARCH: amd64
BUILD_GOOS: linux
LOG_LEVEL: trace
OTLP_DIR: /work/artifacts
OTLP_METRIC_INTERVAL: 5s
Expand Down Expand Up @@ -73,6 +75,8 @@ steps:
queue: k8s-m6id12xlarge
command: ./ci/scripts/run-in-nix-docker.sh task ci:configure ci:test:integration
env:
BUILD_GOARCH: amd64
BUILD_GOOS: linux
LOG_LEVEL: trace
OTLP_DIR: /work/artifacts
OTLP_METRIC_INTERVAL: 5s
Expand Down Expand Up @@ -121,6 +125,8 @@ steps:
queue: k8s-m6id12xlarge
command: ./ci/scripts/run-in-nix-docker.sh task ci:configure ci:test:acceptance
env:
BUILD_GOARCH: amd64
BUILD_GOOS: linux
LOG_LEVEL: trace
OTLP_DIR: /work/artifacts
OTLP_METRIC_INTERVAL: 5s
Expand Down Expand Up @@ -169,6 +175,8 @@ steps:
queue: k8s-m6id12xlarge
command: ./ci/scripts/run-in-nix-docker.sh task ci:configure ci:test:kuttl-v1
env:
BUILD_GOARCH: amd64
BUILD_GOOS: linux
LOG_LEVEL: trace
OTLP_DIR: /work/artifacts
OTLP_METRIC_INTERVAL: 5s
Expand Down Expand Up @@ -218,6 +226,8 @@ steps:
queue: k8s-m6id12xlarge
command: ./ci/scripts/run-in-nix-docker.sh task ci:configure ci:test:kuttl-v1-nodepools
env:
BUILD_GOARCH: amd64
BUILD_GOOS: linux
LOG_LEVEL: trace
OTLP_DIR: /work/artifacts
OTLP_METRIC_INTERVAL: 5s
Expand Down
37 changes: 33 additions & 4 deletions Taskfile.yml
Original file line number Diff line number Diff line change
Expand Up @@ -264,8 +264,16 @@ tasks:
_PKG:
sh: go work edit -json | jq -j '.Use.[].DiskPath + "/... "'
PKG: '{{ .PKG | default ._PKG }}'
# When using gotestsum with --packages, packages must be a single
# quoted arg and -- separates gotestsum flags from go test args.
_USE_PACKAGES: '{{if contains "--packages" .GO_TEST_RUNNER}}true{{end}}'
cmds:
- '{{.GO_TEST_RUNNER}} {{.PKG}} {{.CLI_ARGS}}'
- |
{{- if ._USE_PACKAGES}}
{{.GO_TEST_RUNNER}} "{{.PKG}}" -- {{.CLI_ARGS}}
{{- else}}
{{.GO_TEST_RUNNER}} {{.PKG}} {{.CLI_ARGS}}
{{- end}}

test:integration:
desc: "Run all integration tests (~90m)"
Expand All @@ -284,7 +292,7 @@ tasks:
vars:
GO_TEST_RUNNER:
ref: .GO_TEST_RUNNER
CLI_ARGS: '{{.CLI_ARGS}} -p=1 -run {{.RUN}} -timeout 60m -tags integration'
CLI_ARGS: '{{.CLI_ARGS}} -run {{.RUN}} -timeout 60m -tags integration'

test:acceptance:
desc: "Run all acceptance tests (~90m)"
Expand Down Expand Up @@ -330,6 +338,10 @@ tasks:
TEST_KUBE_VERSION: '{{ .TEST_KUBE_VERSION | default .DEFAULT_TEST_KUBE_VERSION }}'
TEST_COREDNS_VERSION: '{{ .TEST_COREDNS_VERSION | default .DEFAULT_TEST_COREDNS_VERSION }}'
IMAGES:
# k3d infrastructure images — pre-pulling avoids slow pulls during cluster creation.
- rancher/k3s:v1.32.13-k3s1
- ghcr.io/k3d-io/k3d-tools:5.8.3
- ghcr.io/k3d-io/k3d-proxy:5.8.3
- quay.io/jetstack/cert-manager-controller:{{.TEST_CERTMANAGER_VERSION}}
- quay.io/jetstack/cert-manager-cainjector:{{.TEST_CERTMANAGER_VERSION}}
- quay.io/jetstack/cert-manager-startupapicheck:{{.TEST_CERTMANAGER_VERSION}}
Expand All @@ -339,20 +351,37 @@ tasks:
- quay.io/jetstack/cert-manager-webhook:{{.SECOND_TEST_CERTMANAGER_VERSION}}
- '{{.TEST_REDPANDA_REPO}}:{{.TEST_REDPANDA_VERSION}}'
- '{{.DEFAULT_TEST_UPGRADE_REDPANDA_REPO}}:{{.TEST_UPGRADE_REDPANDA_VERSION}}'
- redpandadata/redpanda-operator:v25.1.3
- redpandadata/redpanda-operator:v25.2.2
- redpandadata/redpanda-operator:v25.3.1
- redpandadata/redpanda-operator:{{.TEST_UPGRADE_OPERATOR_VERSION}}
- ghcr.io/loft-sh/vcluster-pro:{{.TEST_VCLUSTER_VERSION}}
- registry.k8s.io/kube-controller-manager:{{.TEST_KUBE_VERSION}}
- registry.k8s.io/kube-apiserver:{{.TEST_KUBE_VERSION}}
- coredns/coredns:{{.TEST_COREDNS_VERSION}}
- redpandadata/redpanda-unstable:v24.3.1-rc4
- redpandadata/redpanda-unstable:v24.3.1-rc8
- redpandadata/redpanda-unstable:v25.2.1-rc7
- redpandadata/redpanda-unstable:v25.3.1-rc2
- redpandadata/redpanda-unstable:v25.3.1-rc4
- redpandadata/redpanda-unstable:v26.1.1-rc5
- redpandadata/redpanda-nightly:v0.0.0-20260330git0d4187b
- redpandadata/redpanda-operator-nightly:v0.0.0-20250129gita89e202
- redpandadata/redpanda:v23.2.8
- redpandadata/redpanda:v24.2.9
- redpandadata/redpanda:v25.1.1
- redpandadata/redpanda:v25.2.1
- redpandadata/redpanda:v25.2.11
- redpandadata/redpanda:v26.1.1

cmds:
- for: {var: IMAGES}
cmd: docker inspect {{.ITEM}} > /dev/null || docker pull {{.ITEM}}
- |
pids=""
{{range .IMAGES}}
(docker inspect "{{.}}" > /dev/null 2>&1 || docker pull -q "{{.}}") &
pids="$pids $!"
{{end}}
for pid in $pids; do wait "$pid" || true; done

pending-prs:
desc: "Get all pending PRs for watched branches"
Expand Down
31 changes: 1 addition & 30 deletions acceptance/features/cluster.feature
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
Feature: Basic cluster tests
@skip:gke @skip:aks @skip:eks
Scenario: Updating admin ports
# replaces e2e-v2 "upgrade-values-check"
# replaces e2e-v2 "upgrade-values-check"
Given I apply Kubernetes manifest:
"""
---
Expand Down Expand Up @@ -44,32 +44,3 @@ Feature: Basic cluster tests
Then cluster "upgrade" is stable with 1 nodes
And service "upgrade-external" should have named port "admin-default" with value 9640
And rpk is configured correctly in "upgrade" cluster


@skip:gke @skip:aks @skip:eks
Scenario: Rack Awareness
Given I apply Kubernetes manifest:
# NB: You wouldn't actually use kubernetes.io/os for the value of rack,
# it's just a value that we know is both present and deterministic for the
# purpose of testing.
"""
---
apiVersion: cluster.redpanda.com/v1alpha2
kind: Redpanda
metadata:
name: rack-awareness
spec:
clusterSpec:
console:
enabled: false
statefulset:
replicas: 1
rackAwareness:
enabled: true
nodeAnnotation: 'kubernetes.io/os'
"""
And cluster "rack-awareness" is stable with 1 nodes
Then running `cat /etc/redpanda/redpanda.yaml | grep -o 'rack: .*$'` will output:
"""
rack: linux
"""
10 changes: 9 additions & 1 deletion acceptance/features/console-upgrades.feature
Original file line number Diff line number Diff line change
@@ -1,9 +1,13 @@
@operator:none
@vcluster
Feature: Upgrading the operator with Console installed
@skip:gke @skip:aks @skip:eks
Scenario: Console v2 to v3 no warnings
Given I helm install "redpanda-operator" "redpanda/operator" --version v25.1.3 with values:
"""
image:
repository: redpandadata/redpanda-operator
crds:
enabled: true
"""
And I apply Kubernetes manifest:
"""
Expand Down Expand Up @@ -47,6 +51,10 @@ Feature: Upgrading the operator with Console installed
Scenario: Console v2 to v3 with warnings
Given I helm install "redpanda-operator" "redpanda/operator" --version v25.1.3 with values:
"""
image:
repository: redpandadata/redpanda-operator
crds:
enabled: true
"""
And I apply Kubernetes manifest:
"""
Expand Down
3 changes: 2 additions & 1 deletion acceptance/features/decommissioning.feature
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
@serial
Feature: Decommissioning brokers
# note that this test requires both the decommissioner and pvc unbinder
# run in order to pass
# run in order to pass
@skip:gke @skip:aks @skip:eks
Scenario: Pruning brokers on failed nodes
Given I create a basic cluster "decommissioning" with 3 nodes
Expand Down
2 changes: 1 addition & 1 deletion acceptance/features/helm-chart.feature
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
@operator:none
@serial
Feature: Redpanda Helm Chart

Scenario: Tolerating Node Failure
Expand Down
2 changes: 1 addition & 1 deletion acceptance/features/multicluster.feature
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
@operator:none
@multicluster
Feature: Multicluster Operator

@skip:gke @skip:aks @skip:eks @skip:k3d
Expand Down
4 changes: 3 additions & 1 deletion acceptance/features/operator-upgrades.feature
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
@operator:none @vcluster
@vcluster
Feature: Upgrading the operator
@skip:gke @skip:aks @skip:eks
Scenario: Operator upgrade from 25.2.2
Given I helm install "redpanda-operator" "redpanda/operator" --version v25.2.2 with values:
"""
image:
repository: redpandadata/redpanda-operator
crds:
enabled: true
"""
Expand Down
28 changes: 28 additions & 0 deletions acceptance/features/rack-awareness.feature
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
Feature: Rack Awareness
@skip:gke @skip:aks @skip:eks
Scenario: Rack Awareness
Given I apply Kubernetes manifest:
# NB: You wouldn't actually use kubernetes.io/os for the value of rack,
# it's just a value that we know is both present and deterministic for the
# purpose of testing.
"""
---
apiVersion: cluster.redpanda.com/v1alpha2
kind: Redpanda
metadata:
name: rack-awareness
spec:
clusterSpec:
console:
enabled: false
statefulset:
replicas: 1
rackAwareness:
enabled: true
nodeAnnotation: 'kubernetes.io/os'
"""
And cluster "rack-awareness" is stable with 1 nodes
Then running `cat /etc/redpanda/redpanda.yaml | grep -o 'rack: .*$'` will output:
"""
rack: linux
"""
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,3 @@ Feature: Scaling down broker nodes
And cluster "scaledown" is stable with 5 nodes
When I scale "scaledown" to 3 nodes
Then cluster "scaledown" should be stable with 3 nodes

@skip:gke @skip:aks @skip:eks
Scenario: Scaling up nodes
Given I create a basic cluster "scaleup" with 1 nodes
And cluster "scaleup" is stable with 1 nodes
When I scale "scaleup" to 3 nodes
Then cluster "scaleup" should be stable with 3 nodes
7 changes: 7 additions & 0 deletions acceptance/features/scale-up.feature
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
Feature: Scaling up broker nodes
@skip:gke @skip:aks @skip:eks
Scenario: Scaling up nodes
Given I create a basic cluster "scaleup" with 1 nodes
And cluster "scaleup" is stable with 1 nodes
When I scale "scaleup" to 3 nodes
Then cluster "scaleup" should be stable with 3 nodes
2 changes: 1 addition & 1 deletion acceptance/features/shadow-links.feature
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ Feature: ShadowLink CRDs
And I enable feature "enable_shadow_linking" on cluster "basic"
And I enable feature "enable_shadow_linking" on cluster "sasl"
# enable trace logging on the target cluster so we can debug a bit easier
And I enable "trace" logging for the "cluster_link" logger on cluster "sasl"
And I enable "trace" logging for the "shadow_link_service" logger on cluster "sasl"

@skip:gke @skip:aks @skip:eks
Scenario: Manage ShadowLink
Expand Down
6 changes: 5 additions & 1 deletion acceptance/features/upgrade-regressions.feature
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
@operator:none @vcluster
@vcluster
# Note: use the same version of RP across upgrades to minimize
# issues not related to operator upgrade regressions.
Feature: Operator upgrade regressions
@skip:gke @skip:aks @skip:eks
Scenario: Regression - field managers
Given I helm install "redpanda-operator" "redpanda/operator" --version v25.1.3 with values:
"""
image:
repository: redpandadata/redpanda-operator
crds:
enabled: true
"""
Expand Down Expand Up @@ -41,6 +43,8 @@ Feature: Operator upgrade regressions
"""
Then I helm upgrade "redpanda-operator" "redpanda/operator" --version v25.3.1 with values:
"""
image:
repository: redpandadata/redpanda-operator
crds:
enabled: true
"""
Expand Down
Loading