diff --git a/.pipelines/build/scripts/cni.sh b/.pipelines/build/scripts/cni.sh index bbdddbaa68..a6575aee5c 100644 --- a/.pipelines/build/scripts/cni.sh +++ b/.pipelines/build/scripts/cni.sh @@ -61,5 +61,6 @@ pushd "$REPO_ROOT"/cni cp azure-$OS-swift-overlay.conflist "$OUT_DIR"/files/azure-swift-overlay.conflist cp azure-$OS-swift-overlay-dualstack.conflist "$OUT_DIR"/files/azure-swift-overlay-dualstack.conflist cp azure-$OS-multitenancy.conflist "$OUT_DIR"/files/multitenancy.conflist + cp azure-chained-cilium.conflist "$OUT_DIR"/files/azure-chained-cilium.conflist cp "$REPO_ROOT"/telemetry/azure-vnet-telemetry.config "$OUT_DIR"/files/azure-vnet-telemetry.config popd diff --git a/cni/Dockerfile b/cni/Dockerfile index 5867fd09b2..c212e461a0 100644 --- a/cni/Dockerfile +++ b/cni/Dockerfile @@ -33,6 +33,7 @@ COPY --from=azure-vnet /azure-container-networking/cni/azure-linux-multitenancy- COPY --from=azure-vnet /azure-container-networking/cni/azure-$OS-swift-overlay.conflist /payload/azure-swift-overlay.conflist COPY --from=azure-vnet /azure-container-networking/cni/azure-$OS-swift-overlay-dualstack.conflist /payload/azure-swift-overlay-dualstack.conflist COPY --from=azure-vnet /azure-container-networking/cni/azure-$OS-multitenancy.conflist /payload/azure-multitenancy.conflist +COPY --from=azure-vnet /azure-container-networking/cni/azure-chained-cilium.conflist /payload/azure-chained-cilium.conflist COPY --from=azure-vnet /azure-container-networking/telemetry/azure-vnet-telemetry.config /payload/azure-vnet-telemetry.config RUN cd /payload && sha256sum * > sum.txt RUN gzip --verbose --best --recursive /payload && for f in /payload/*.gz; do mv -- "$f" "${f%%.gz}"; done diff --git a/cni/azure-chained-cilium.conflist b/cni/azure-chained-cilium.conflist new file mode 100644 index 0000000000..e629a7c0f2 --- /dev/null +++ b/cni/azure-chained-cilium.conflist @@ -0,0 +1,21 @@ +{ + "cniVersion": "0.3.0", + "name": "azure", + "plugins": [ + { + "type": "azure-vnet", + "mode": "transparent", + "ipsToRouteViaHost": [ + "169.254.20.10" + ], + "executionMode": "v4swift", + "ipam": { + "type": "azure-cns" + } + }, + { + "name": "cilium", + "type": "cilium-cni" + } + ] +} diff --git a/cns/cniconflist/generator.go b/cns/cniconflist/generator.go index 1e154dd80b..9ff5563333 100644 --- a/cns/cniconflist/generator.go +++ b/cns/cniconflist/generator.go @@ -71,6 +71,10 @@ type SWIFTGenerator struct { Writer io.WriteCloser } +type AzureCNIChainedCiliumGenerator struct { + Writer io.WriteCloser +} + func (v *V4OverlayGenerator) Close() error { if err := v.Writer.Close(); err != nil { return errors.Wrap(err, "error closing generator") @@ -110,3 +114,11 @@ func (v *SWIFTGenerator) Close() error { return nil } + +func (v *AzureCNIChainedCiliumGenerator) Close() error { + if err := v.Writer.Close(); err != nil { + return errors.Wrap(err, "error closing generator") + } + + return nil +} diff --git a/cns/cniconflist/generator_linux.go b/cns/cniconflist/generator_linux.go index 1c65cdbff6..01886313a8 100644 --- a/cns/cniconflist/generator_linux.go +++ b/cns/cniconflist/generator_linux.go @@ -161,3 +161,33 @@ func (v *SWIFTGenerator) Generate() error { return nil } + +func (v *AzureCNIChainedCiliumGenerator) Generate() error { + conflist := cniConflist{ + CNIVersion: azurecniVersion, + Name: azureName, + Plugins: []any{ + cni.NetworkConfig{ + Type: azureType, + Mode: cninet.OpModeTransparent, + IPsToRouteViaHost: []string{nodeLocalDNSIP}, + ExecutionMode: string(util.V4Swift), + IPAM: cni.IPAM{ + Type: network.AzureCNS, + }, + }, + cni.NetworkConfig{ + Name: ciliumcniName, + Type: ciliumcniType, + }, + }, + } + + enc := json.NewEncoder(v.Writer) + enc.SetIndent("", "\t") + if err := enc.Encode(conflist); err != nil { + return errors.Wrap(err, "error encoding conflist to json") + } + + return nil +} diff --git a/cns/cniconflist/generator_linux_test.go b/cns/cniconflist/generator_linux_test.go index 982cabceed..79e6b7db37 100644 --- a/cns/cniconflist/generator_linux_test.go +++ b/cns/cniconflist/generator_linux_test.go @@ -6,7 +6,7 @@ import ( "testing" "github.com/Azure/azure-container-networking/cns/cniconflist" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) type bufferWriteCloser struct { @@ -23,13 +23,13 @@ func TestGenerateV4OverlayConflist(t *testing.T) { buffer := new(bytes.Buffer) g := cniconflist.V4OverlayGenerator{Writer: &bufferWriteCloser{buffer}} err := g.Generate() - assert.NoError(t, err) + require.NoError(t, err) fixtureBytes, err := os.ReadFile(fixture) - assert.NoError(t, err) + require.NoError(t, err) // remove newlines and carriage returns in case these UTs are running on Windows - assert.Equal(t, removeNewLines(fixtureBytes), removeNewLines(buffer.Bytes())) + require.Equal(t, removeNewLines(fixtureBytes), removeNewLines(buffer.Bytes())) } func TestGenerateDualStackOverlayConflist(t *testing.T) { @@ -38,13 +38,13 @@ func TestGenerateDualStackOverlayConflist(t *testing.T) { buffer := new(bytes.Buffer) g := cniconflist.DualStackOverlayGenerator{Writer: &bufferWriteCloser{buffer}} err := g.Generate() - assert.NoError(t, err) + require.NoError(t, err) fixtureBytes, err := os.ReadFile(fixture) - assert.NoError(t, err) + require.NoError(t, err) // remove newlines and carriage returns in case these UTs are running on Windows - assert.Equal(t, removeNewLines(fixtureBytes), removeNewLines(buffer.Bytes())) + require.Equal(t, removeNewLines(fixtureBytes), removeNewLines(buffer.Bytes())) } func TestGenerateOverlayConflist(t *testing.T) { @@ -53,13 +53,13 @@ func TestGenerateOverlayConflist(t *testing.T) { buffer := new(bytes.Buffer) g := cniconflist.OverlayGenerator{Writer: &bufferWriteCloser{buffer}} err := g.Generate() - assert.NoError(t, err) + require.NoError(t, err) fixtureBytes, err := os.ReadFile(fixture) - assert.NoError(t, err) + require.NoError(t, err) // remove newlines and carriage returns in case these UTs are running on Windows - assert.Equal(t, removeNewLines(fixtureBytes), removeNewLines(buffer.Bytes())) + require.Equal(t, removeNewLines(fixtureBytes), removeNewLines(buffer.Bytes())) } func TestGenerateCiliumConflist(t *testing.T) { @@ -68,13 +68,13 @@ func TestGenerateCiliumConflist(t *testing.T) { buffer := new(bytes.Buffer) g := cniconflist.CiliumGenerator{Writer: &bufferWriteCloser{buffer}} err := g.Generate() - assert.NoError(t, err) + require.NoError(t, err) fixtureBytes, err := os.ReadFile(fixture) - assert.NoError(t, err) + require.NoError(t, err) // remove newlines and carriage returns in case these UTs are running on Windows - assert.Equal(t, removeNewLines(fixtureBytes), removeNewLines(buffer.Bytes())) + require.Equal(t, removeNewLines(fixtureBytes), removeNewLines(buffer.Bytes())) } func TestGenerateSWIFTConflist(t *testing.T) { @@ -83,13 +83,28 @@ func TestGenerateSWIFTConflist(t *testing.T) { buffer := new(bytes.Buffer) g := cniconflist.SWIFTGenerator{Writer: &bufferWriteCloser{buffer}} err := g.Generate() - assert.NoError(t, err) + require.NoError(t, err) fixtureBytes, err := os.ReadFile(fixture) - assert.NoError(t, err) + require.NoError(t, err) // remove newlines and carriage returns in case these UTs are running on Windows - assert.Equal(t, removeNewLines(fixtureBytes), removeNewLines(buffer.Bytes())) + require.Equal(t, removeNewLines(fixtureBytes), removeNewLines(buffer.Bytes())) +} + +func TestGenerateAzurecniCiliumConflist(t *testing.T) { + fixture := "testdata/fixtures/azure-chained-cilium.conflist" + + buffer := new(bytes.Buffer) + g := cniconflist.AzureCNIChainedCiliumGenerator{Writer: &bufferWriteCloser{buffer}} + err := g.Generate() + require.NoError(t, err) + + fixtureBytes, err := os.ReadFile(fixture) + require.NoError(t, err) + + // remove newlines and carriage returns in case these UTs are running on Windows + require.Equal(t, removeNewLines(fixtureBytes), removeNewLines(buffer.Bytes())) } // removeNewLines will remove the newlines and carriage returns from the byte slice diff --git a/cns/cniconflist/generator_windows.go b/cns/cniconflist/generator_windows.go index 31551f4e59..d0ef208578 100644 --- a/cns/cniconflist/generator_windows.go +++ b/cns/cniconflist/generator_windows.go @@ -25,3 +25,7 @@ func (v *CiliumGenerator) Generate() error { func (v *SWIFTGenerator) Generate() error { return errNotImplemented } + +func (v *AzureCNIChainedCiliumGenerator) Generate() error { + return errNotImplemented +} diff --git a/cns/cniconflist/testdata/fixtures/azure-chained-cilium.conflist b/cns/cniconflist/testdata/fixtures/azure-chained-cilium.conflist new file mode 100644 index 0000000000..7fc6fca211 --- /dev/null +++ b/cns/cniconflist/testdata/fixtures/azure-chained-cilium.conflist @@ -0,0 +1,34 @@ +{ + "cniVersion": "0.3.0", + "name": "azure", + "plugins": [ + { + "type": "azure-vnet", + "mode": "transparent", + "ipsToRouteViaHost": [ + "169.254.20.10" + ], + "executionMode": "v4swift", + "ipam": { + "type": "azure-cns" + }, + "dns": {}, + "runtimeConfig": { + "dns": {} + }, + "windowsSettings": {} + }, + { + "name": "cilium", + "type": "cilium-cni", + "ipam": { + "type": "" + }, + "dns": {}, + "runtimeConfig": { + "dns": {} + }, + "windowsSettings": {} + } + ] +} diff --git a/cns/service/main.go b/cns/service/main.go index 67f7872f44..24263320d3 100644 --- a/cns/service/main.go +++ b/cns/service/main.go @@ -121,11 +121,12 @@ const ( type cniConflistScenario string const ( - scenarioV4Overlay cniConflistScenario = "v4overlay" - scenarioDualStackOverlay cniConflistScenario = "dualStackOverlay" - scenarioOverlay cniConflistScenario = "overlay" - scenarioCilium cniConflistScenario = "cilium" - scenarioSWIFT cniConflistScenario = "swift" + scenarioV4Overlay cniConflistScenario = "v4overlay" + scenarioDualStackOverlay cniConflistScenario = "dualStackOverlay" + scenarioOverlay cniConflistScenario = "overlay" + scenarioCilium cniConflistScenario = "cilium" + scenarioSWIFT cniConflistScenario = "swift" + scenarioAzurecniChainedCilium cniConflistScenario = "azurecni-chained-cilium" ) var ( @@ -623,6 +624,8 @@ func main() { conflistGenerator = &cniconflist.CiliumGenerator{Writer: writer} case scenarioSWIFT: conflistGenerator = &cniconflist.SWIFTGenerator{Writer: writer} + case scenarioAzurecniChainedCilium: + conflistGenerator = &cniconflist.AzureCNIChainedCiliumGenerator{Writer: writer} default: logger.Errorf("unable to generate cni conflist for unknown scenario: %s", scenario) os.Exit(1) diff --git a/docs/feature/swift-v2/setup-guide-azcni.md b/docs/feature/swift-v2/setup-guide-azcni.md new file mode 100644 index 0000000000..596aaffd3b --- /dev/null +++ b/docs/feature/swift-v2/setup-guide-azcni.md @@ -0,0 +1,46 @@ +# Swiftv2 Cilium In-place Upgrade Guide + +## Steps +### Clone repo + checkout branch for *.yamls +``` +git clone https://github.com/Azure/azure-container-networking.git +git checkout master +``` + +### Update Conflist + +``` +export CONFLIST=azure-chained-cilium.conflist +export CONFLIST_PRIORITY=05 +export CNI_IMAGE=acnpublic.azurecr.io/public/containernetworking/azure-cni:v1.7.5-3 +envsubst '${CONFLIST},${CONFLIST_PRIORITY},${CNI_IMAGE}' < test/integration/manifests/cni/conflist-installer.yaml | kubectl apply -f - +``` + + +### Apply Cilium config +``` +export DIR=1.17 +export CILIUM_VERSION_TAG=v1.17.7-250927 +export CILIUM_IMAGE_REGISTRY=mcr.microsoft.com/containernetworking +kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-chained-config.yaml +``` + + +### Apply Cilium Agent + Operator + RBAC +``` +kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-operator/files +kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-agent/files +envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset.yaml | kubectl apply -f - +envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-operator/templates/deployment.yaml | kubectl apply -f - +``` + + +### Quick Summary +- Apply conflist installer to update conflist on all nodes +- Apply Cilium Config +- Apply Agent + Operator + RBAC + + +## Quick Vaildation testing +- Check Cilium Management with + - `kubectl get cep -A` diff --git a/docs/feature/swift-v2/setup-guide-cil.md b/docs/feature/swift-v2/setup-guide-cil.md new file mode 100644 index 0000000000..64c2d3526e --- /dev/null +++ b/docs/feature/swift-v2/setup-guide-cil.md @@ -0,0 +1,36 @@ +# Swiftv2 Managed Cilium Setup Guide + +## Steps +### Clone repo + checkout branch for *.yamls +``` +git clone https://github.com/Azure/azure-container-networking.git +git checkout master +``` + +### Update Conflist + +``` +export CONFLIST=azure-chained-cilium.conflist +export CONFLIST_PRIORITY=05 +export CNI_IMAGE=acnpublic.azurecr.io/public/containernetworking/azure-cni:v1.7.5-3 +envsubst '${CONFLIST},${CONFLIST_PRIORITY},${CNI_IMAGE}' < test/integration/manifests/cni/conflist-installer-byon.yaml | kubectl apply -f - +``` + + +### Apply Watcher +``` +kubectl apply -f test/integration/manifests/cilium/watcher/deployment.yaml +``` + +- Watcher obtains existing Cilium RBAC and Daemonset from managed node + - We overwrite Cilium Configmap values through the use of args on the `cilium-agent` container within the watcher deployment. + + + +### Quick Summary +- Apply conflist installer to update conflist on BYON +- Apply Watcher and Overwrite existing CM values through `cilium-agent` container + +## Quick Vaildation testing +Check Cilium Management with +- `kubectl get cep -A` diff --git a/hack/manifests/kubectl.yaml b/hack/manifests/kubectl.yaml index 0b241ca168..7a54474d5a 100644 --- a/hack/manifests/kubectl.yaml +++ b/hack/manifests/kubectl.yaml @@ -6,7 +6,7 @@ metadata: spec: containers: - name: kubectl - image: docker.io/bitnami/kubectl:latest + image: mcr.microsoft.com/oss/v2/kubernetes/kubectl command: ["/bin/bash", "-c", "--"] args: ["sleep 3600"] env: diff --git a/test/integration/manifests/cilium/netpol/default-allow.yaml b/test/integration/manifests/cilium/netpol/default-allow.yaml new file mode 100644 index 0000000000..e30f937210 --- /dev/null +++ b/test/integration/manifests/cilium/netpol/default-allow.yaml @@ -0,0 +1,15 @@ +## Only allows traffic within the default namespace +apiVersion: cilium.io/v2 +kind: CiliumNetworkPolicy +metadata: + name: allow-default +spec: + endpointSelector: {} + ingress: + - fromEndpoints: + - matchLabels: + k8s:io.kubernetes.pod.namespace: default + egress: + - toEndpoints: + - matchLabels: + k8s:io.kubernetes.pod.namespace: default diff --git a/test/integration/manifests/cilium/v1.17/cilium-agent/templates/daemonset.yaml b/test/integration/manifests/cilium/v1.17/cilium-agent/templates/daemonset.yaml index f3e6e7093f..c2a2d91925 100644 --- a/test/integration/manifests/cilium/v1.17/cilium-agent/templates/daemonset.yaml +++ b/test/integration/manifests/cilium/v1.17/cilium-agent/templates/daemonset.yaml @@ -29,8 +29,6 @@ spec: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - - key: kubernetes.azure.com/cluster - operator: Exists - key: type operator: NotIn values: @@ -418,7 +416,7 @@ spec: path: /proc/sys/kernel type: Directory name: host-proc-sys-kernel - - hostPath: + - hostPath: path: /var/run/netns type: DirectoryOrCreate name: cilium-netns diff --git a/test/integration/manifests/cilium/v1.17/cilium-config/cilium-chained-config.yaml b/test/integration/manifests/cilium/v1.17/cilium-config/cilium-chained-config.yaml new file mode 100644 index 0000000000..6d1981934d --- /dev/null +++ b/test/integration/manifests/cilium/v1.17/cilium-config/cilium-chained-config.yaml @@ -0,0 +1,139 @@ +apiVersion: v1 #Not verified, placeholder +data: + agent-not-ready-taint-key: node.cilium.io/agent-not-ready + arping-refresh-period: 30s + auto-direct-node-routes: "false" + bpf-lb-external-clusterip: "false" + bpf-lb-map-max: "65536" + bpf-lb-mode: snat + bpf-map-dynamic-size-ratio: "0.0025" + bpf-policy-map-max: "16384" + bpf-root: /sys/fs/bpf + cgroup-root: /run/cilium/cgroupv2 + cilium-endpoint-gc-interval: 5m0s + cni-chaining-mode: generic-veth + cluster-id: "0" + cluster-name: default + debug: "false" + disable-cnp-status-updates: "true" + disable-endpoint-crd: "false" + enable-auto-protect-node-port-range: "true" + enable-bgp-control-plane: "false" + enable-bpf-clock-probe: "true" + enable-endpoint-health-checking: "false" + enable-endpoint-routes: "true" + enable-health-check-nodeport: "true" + enable-health-checking: "true" + enable-host-legacy-routing: "true" + enable-hubble: "false" + enable-ipv4: "true" + enable-ipv4-masquerade: "false" + enable-ipv6: "false" + enable-ipv6-masquerade: "false" + enable-k8s-terminating-endpoint: "true" + enable-l2-neigh-discovery: "true" + enable-l7-proxy: "false" + enable-local-node-route: "false" + enable-local-redirect-policy: "true" # set to true for lrp test + enable-metrics: "true" + enable-policy: default + enable-session-affinity: "true" + enable-svc-source-range-check: "true" + enable-vtep: "false" + enable-well-known-identities: "false" + enable-xt-socket-fallback: "true" + identity-allocation-mode: crd + install-iptables-rules: "true" + install-no-conntrack-iptables-rules: "false" + ipam: delegated-plugin + kube-proxy-replacement: "true" + local-router-ipv4: 169.254.23.0 + metrics: +cilium_bpf_map_pressure + monitor-aggregation: medium + monitor-aggregation-flags: all + monitor-aggregation-interval: 5s + node-port-bind-protection: "true" + nodes-gc-interval: 5m0s + operator-api-serve-addr: 127.0.0.1:9234 + operator-prometheus-serve-addr: :9963 + preallocate-bpf-maps: "false" + procfs: /host/proc + prometheus-serve-addr: :9962 + remove-cilium-node-taints: "true" + set-cilium-is-up-condition: "true" + sidecar-istio-proxy-image: cilium/istio_proxy + synchronize-k8s-nodes: "true" + tofqdns-dns-reject-response-code: refused + tofqdns-enable-dns-compression: "true" + tofqdns-endpoint-max-ip-per-hostname: "1000" + tofqdns-idle-connection-grace-period: 0s + tofqdns-max-deferred-connection-deletes: "10000" + tofqdns-min-ttl: "0" + tofqdns-proxy-response-max-delay: 100ms + routing-mode: native + unmanaged-pod-watcher-interval: "15" + vtep-cidr: "" + vtep-endpoint: "" + vtep-mac: "" + vtep-mask: "" + enable-sctp: "false" + external-envoy-proxy: "false" + k8s-client-qps: "10" + k8s-client-burst: "20" + mesh-auth-enabled: "true" + mesh-auth-queue-size: "1024" + mesh-auth-rotated-identities-queue-size: "1024" + mesh-auth-gc-interval: "5m0s" + proxy-connect-timeout: "2" + proxy-max-requests-per-connection: "0" + proxy-max-connection-duration-seconds: "0" + set-cilium-node-taints: "true" + unmanaged-pod-watcher-interval: "15" +## new values added for 1.16 below + enable-ipv4-big-tcp: "false" + enable-ipv6-big-tcp: "false" + enable-masquerade-to-route-source: "false" + enable-health-check-loadbalancer-ip: "false" + bpf-lb-acceleration: "disabled" + enable-k8s-networkpolicy: "true" + cni-exclusive: "false" # Cilium takes ownership of /etc/cni/net.d, pods cannot be scheduled with any other cni if cilium is down + cni-log-file: "/var/run/cilium/cilium-cni.log" + ipam-cilium-node-update-rate: "15s" + egress-gateway-reconciliation-trigger-interval: "1s" + nat-map-stats-entries: "32" + nat-map-stats-interval: "30s" + bpf-events-drop-enabled: "true" # exposes drop events to cilium monitor/hubble + bpf-events-policy-verdict-enabled: "true" # exposes policy verdict events to cilium monitor/hubble + bpf-events-trace-enabled: "true" # exposes trace events to cilium monitor/hubble + enable-tcx: "false" # attach endpoint programs with tcx if supported by kernel + datapath-mode: "veth" + direct-routing-skip-unreachable: "false" + enable-runtime-device-detection: "false" + bpf-lb-sock: "false" + bpf-lb-sock-terminate-pod-connections: "false" + nodeport-addresses: "" + k8s-require-ipv4-pod-cidr: "false" + k8s-require-ipv6-pod-cidr: "false" + enable-node-selector-labels: "false" +## new values for 1.17 + ces-slice-mode: "fcfs" + enable-cilium-endpoint-slice: "true" + bpf-lb-source-range-all-types: "false" + bpf-algorithm-annotation: "false" + bpf-lb-mode-annotation: "false" + enable-experimental-lb: "false" + enable-endpoint-lockdown-on-policy-overflow: "false" + health-check-icmp-failure-threshold: "3" + enable-internal-traffic-policy: "true" + enable-lb-ipam: "true" + enable-non-default-deny-policies: "true" + enable-source-ip-verification: "true" +kind: ConfigMap +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/managed-by: Helm + name: cilium-config + namespace: kube-system diff --git a/test/integration/manifests/cilium/v1.17/cilium-operator/templates/deployment.yaml b/test/integration/manifests/cilium/v1.17/cilium-operator/templates/deployment.yaml index 0b1a497bd2..154b83f002 100644 --- a/test/integration/manifests/cilium/v1.17/cilium-operator/templates/deployment.yaml +++ b/test/integration/manifests/cilium/v1.17/cilium-operator/templates/deployment.yaml @@ -123,8 +123,6 @@ spec: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - - key: kubernetes.azure.com/cluster - operator: Exists - key: type operator: NotIn values: diff --git a/test/integration/manifests/cilium/watcher/deployment.yaml b/test/integration/manifests/cilium/watcher/deployment.yaml new file mode 100644 index 0000000000..1cf59e8a87 --- /dev/null +++ b/test/integration/manifests/cilium/watcher/deployment.yaml @@ -0,0 +1,372 @@ + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cilium-watcher-sa + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cilium-watcher-role + namespace: kube-system +rules: + - apiGroups: [""] + resources: ["serviceaccounts"] + verbs: ["get", "list", "create", "update", "patch", "apply"] + - apiGroups: ["apps"] + resources: ["daemonsets"] + verbs: ["get", "list", "create", "watch", "patch", "apply"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cilium-watcher-binding + namespace: kube-system +subjects: + - kind: ServiceAccount + name: cilium-watcher-sa +roleRef: + kind: Role + name: cilium-watcher-role + apiGroup: rbac.authorization.k8s.io +--- +### For ClusterRole and ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cilium-watcher-cluster-role +rules: + - apiGroups: ["rbac.authorization.k8s.io"] + resources: ["clusterroles", "clusterrolebindings"] + verbs: ["get", "list", "create", "update", "patch", "watch"] + - apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - namespaces + - services + - pods + - endpoints + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - list + - watch + - get + - apiGroups: + - cilium.io + resources: + - ciliumbgppeeringpolicies + - ciliumclusterwideenvoyconfigs + - ciliumclusterwidenetworkpolicies + - ciliumegressgatewaypolicies + - ciliumendpoints + - ciliumendpointslices + - ciliumenvoyconfigs + - ciliumidentities + - ciliumlocalredirectpolicies + - ciliumnetworkpolicies + - ciliumnodes + - ciliumnodeconfigs + - ciliumloadbalancerippools + - ciliumcidrgroups + - ciliuml2announcementpolicies + - ciliumpodippools + - ciliumbgpnodeconfigs + - ciliumbgpadvertisements + - ciliumbgppeerconfigs + verbs: + - list + - watch + - apiGroups: + - cilium.io + resources: + - ciliumidentities + - ciliumendpoints + - ciliumnodes + verbs: + - create + - apiGroups: + - cilium.io + resources: + - ciliumidentities + verbs: + - update + - apiGroups: + - cilium.io + resources: + - ciliumendpoints + verbs: + - delete + - get + - apiGroups: + - cilium.io + resources: + - ciliumnodes + - ciliumnodes/status + verbs: + - get + - update + - apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies/status + - ciliumclusterwidenetworkpolicies/status + - ciliumendpoints/status + - ciliumendpoints + - ciliuml2announcementpolicies/status + - ciliumbgpnodeconfigs/status + verbs: + - patch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - apiGroups: + - cilium.io + resources: + - ciliumnodes + verbs: + - patch + +--- +### For ClusterRole and ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cilium-watcher-cluster-binding +subjects: + - kind: ServiceAccount + name: cilium-watcher-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: cilium-watcher-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cilium-watcher + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + app: cilium-watcher + template: + metadata: + labels: + app: cilium-watcher + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.azure.com/managed + # set on unmanaged nodes + operator: NotIn + values: + - "false" + hostNetwork: true + serviceAccountName: cilium-watcher-sa + containers: + - name: ds-watcher + image: mcr.microsoft.com/oss/v2/kubernetes/kubectl + command: + - /bin/sh + - -c + - | + + NAMESPACE="kube-system" + export ORIGINAL_DAEMONSET="cilium" + export MODIFIED_DAEMONSET="cilium-unmanaged" + + update_daemonset() { + local temp_file="/tmp/${ORIGINAL_DAEMONSET}_daemonset.yaml" + + echo "Fetching the latest Cilium DaemonSet YAML..." + kubectl get daemonset $ORIGINAL_DAEMONSET -n $NAMESPACE -oyaml > "$temp_file" + + echo "Modifying the YAML..." + # Change the name of the DaemonSet + yq eval '.metadata.name = strenv(MODIFIED_DAEMONSET)' -i "$temp_file" + + yq eval 'del(.metadata.annotations)' -i "$temp_file" + yq eval 'del(.metadata.labels)' -i "$temp_file" + + # Remove metadata.annotations and metadata.labels.app.kubernetes.io/managed-by + yq eval 'del(.spec.template.metadata.annotations)' -i "$temp_file" + yq eval 'del(.spec.template.metadata.labels."kubernetes.azure.com/managedby")' -i "$temp_file" + yq eval '.spec.template.metadata.labels."k8s-app" = strenv(MODIFIED_DAEMONSET)' -i "$temp_file" + yq eval '.spec.selector.matchLabels."k8s-app" = strenv(MODIFIED_DAEMONSET)' -i "$temp_file" + + # yq eval '.spec.template.spec.containers[0].args += ["--enable-bandwidth-manager"]' -i "$temp_file" + yq eval '.spec.template.spec.containers[0].args += ["--cni-chaining-mode=generic-veth"]' -i "$temp_file" + # yq eval '.spec.template.spec.containers[0].args += ["--enable-host-legacy-routing=false"]' -i "$temp_file" + + # Replace service account name + yq eval '.spec.template.spec.serviceAccountName = strenv(MODIFIED_DAEMONSET)' -i "$temp_file" + yq eval '.spec.template.spec.serviceAccount = strenv(MODIFIED_DAEMONSET)' -i "$temp_file" + + # Replace node affinity requirement + yq eval 'del(.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[].matchExpressions[] | select(.key == "kubernetes.azure.com/ebpf-dataplane"))' -i "$temp_file" + yq eval 'with(.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[]; .matchExpressions |= map(select(.key != "kubernetes.azure.com/cluster")) | .matchExpressions += {"key": "kubernetes.azure.com/managed", "operator": "In", "values": ["false"]})' -i "$temp_file" + + # Remove status section + yq eval 'del(.status)' -i "$temp_file" + + # Ensure no duplicate conflicts + yq eval 'del(.metadata.resourceVersion)' -i "$temp_file" + yq eval 'del(.metadata.uid)' -i "$temp_file" + + echo "Applying the modified DaemonSet..." + cat "$temp_file" + kubectl apply -f "$temp_file" + echo "Cilium Unmanaged DaemonSet updated!" + } + + kubectl get daemonset $ORIGINAL_DAEMONSET -n $NAMESPACE -w | while read -r line; do + if echo "$line" | grep -q "$ORIGINAL_DAEMONSET"; then + echo "Detected change in $ORIGINAL_DAEMONSET, updating..." + update_daemonset + fi + done + + - name: rbac-watcher + image: mcr.microsoft.com/oss/v2/kubernetes/kubectl + command: + - /bin/sh + - -c + - | + + NAMESPACE="kube-system" + export ORIGINAL_DAEMONSET="cilium" + export MODIFIED_DAEMONSET="cilium-unmanaged" + update_rbac() { + local sa_file="/tmp/${ORIGINAL_DAEMONSET}_sa.yaml" + local cr_file="/tmp/${ORIGINAL_DAEMONSET}_cr.yaml" + local crb_file="/tmp/${ORIGINAL_DAEMONSET}_crb.yaml" + local current_file="" + + # --- ClusterRole --- + current_file=$cr_file + echo "Fetching the latest Cilium ClusterRole YAML..." + kubectl get clusterrole $ORIGINAL_DAEMONSET -oyaml > "$current_file" + + echo "Modifying the ClusterRole YAML..." + # Change name + yq eval '.metadata.name = strenv(MODIFIED_DAEMONSET)' -i "$current_file" + + # Remove metadata.annotations + yq eval 'del(.metadata.annotations)' -i "$current_file" + yq eval 'del(.metadata.labels."app.kubernetes.io/managed-by")' -i "$current_file" + yq eval 'del(.metadata.labels."app.kubernetes.io/actually-managed-by")' -i "$current_file" + yq eval '.metadata.labels."app.kubernetes.io/part-of" = strenv(MODIFIED_DAEMONSET)' -i "$current_file" + + # Ensure no duplicate conflicts + yq eval 'del(.metadata.resourceVersion)' -i "$current_file" + yq eval 'del(.metadata.uid)' -i "$current_file" + yq eval 'del(.metadata.creationTimestamp)' -i "$current_file" + + echo "Applying the modified ClusterRole..." + cat "$current_file" + kubectl apply -f "$current_file" + echo "Cilium Unmanaged ClusterRole updated!" + + + # --- ServiceAccount --- + if kubectl get serviceaccount -n $NAMESPACE $MODIFIED_DAEMONSET -oyaml &> /dev/null; then + echo "ServiceAccount $MODIFIED_DAEMONSET already exists. Skipping creation." + else + current_file=$sa_file + echo "Fetching the latest Cilium ServiceAccount YAML..." + kubectl get serviceaccount -n $NAMESPACE $ORIGINAL_DAEMONSET -oyaml > "$current_file" + + echo "Modifying the ServiceAccount YAML..." + # Change name + yq eval '.metadata.name = strenv(MODIFIED_DAEMONSET)' -i "$current_file" + + # Remove metadata.annotations + yq eval 'del(.metadata.annotations)' -i "$current_file" + yq eval 'del(.metadata.labels)' -i "$current_file" + yq eval 'del(.metadata.labels."app.kubernetes.io/actually-managed-by")' -i "$current_file" + + # Ensure no duplicate conflicts + yq eval 'del(.metadata.resourceVersion)' -i "$current_file" + yq eval 'del(.metadata.uid)' -i "$current_file" + yq eval 'del(.metadata.creationTimestamp)' -i "$current_file" + + echo "Applying the modified ServiceAccount..." + cat "$current_file" + kubectl apply -f "$current_file" + echo "Cilium Unmanaged ServiceAccount updated!" + fi + + # --- ClusterRoleBinding --- + if kubectl get clusterrolebinding $MODIFIED_DAEMONSET -oyaml &> /dev/null; then + echo "ClusterRoleBinding $MODIFIED_DAEMONSET already exists. Skipping creation." + else + current_file=$crb_file + echo "Fetching the latest Cilium ClusterRoleBinding YAML..." + kubectl get clusterrolebinding -n $NAMESPACE $ORIGINAL_DAEMONSET -oyaml > "$current_file" + + echo "Modifying the ClusterRoleBinding YAML..." + # Change name + yq eval '.metadata.name = strenv(MODIFIED_DAEMONSET)' -i "$current_file" + yq eval '.roleRef.name = strenv(MODIFIED_DAEMONSET)' -i "$current_file" + yq eval '.subjects[0].name = strenv(MODIFIED_DAEMONSET)' -i "$current_file" + + # Remove metadata.annotations + yq eval 'del(.metadata.annotations)' -i "$current_file" + yq eval 'del(.metadata.labels."app.kubernetes.io/managed-by")' -i "$current_file" + yq eval 'del(.metadata.labels."app.kubernetes.io/actually-managed-by")' -i "$current_file" + yq eval '.metadata.labels."app.kubernetes.io/part-of" = strenv(MODIFIED_DAEMONSET)' -i "$current_file" + + # Ensure no duplicate conflicts + yq eval 'del(.metadata.resourceVersion)' -i "$current_file" + yq eval 'del(.metadata.uid)' -i "$current_file" + yq eval 'del(.metadata.creationTimestamp)' -i "$current_file" + + echo "Applying the modified ClusterRoleBinding..." + cat "$current_file" + kubectl apply -f "$current_file" + echo "Cilium Unmanaged ClusterRoleBinding updated!" + fi + + + } + + # DaemonSet RBAC watcher + kubectl get clusterrole $ORIGINAL_DAEMONSET -w | while read -r line; do + if echo "$line" | grep -q "$ORIGINAL_DAEMONSET"; then + echo "Detected change in ClusterRole $ORIGINAL_DAEMONSET, updating RBAC..." + update_rbac + fi + done diff --git a/test/integration/manifests/cni/conflist-installer-byon.yaml b/test/integration/manifests/cni/conflist-installer-byon.yaml new file mode 100644 index 0000000000..90c4d96c65 --- /dev/null +++ b/test/integration/manifests/cni/conflist-installer-byon.yaml @@ -0,0 +1,67 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: azure-cni-conflist-installer + namespace: kube-system + labels: + app: azure-cni +spec: + selector: + matchLabels: + k8s-app: azure-cni + template: + metadata: + labels: + k8s-app: azure-cni + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + - key: kubernetes.io/os + operator: In + values: + - linux + - key: kubernetes.azure.com/managed + operator: In + values: + - "false" + priorityClassName: system-node-critical + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - operator: "Exists" + effect: NoExecute + - operator: "Exists" + effect: NoSchedule + initContainers: + - name: cni-installer + image: ${CNI_IMAGE} + imagePullPolicy: Always + command: ["/dropgz"] + args: + - deploy + - --skip-verify + - ${CONFLIST} + - -o + - /etc/cni/net.d/${CONFLIST_PRIORITY}-${CONFLIST} + volumeMounts: + - name: cni-conflist + mountPath: /etc/cni/net.d + containers: + - name: pause + image: mcr.microsoft.com/oss/kubernetes/pause:3.6 + hostNetwork: true + volumes: + - name: cni-conflist + hostPath: + path: /etc/cni/net.d + type: Directory + +# acnpublic.azurecr.io/azure-cni:linux-amd64-v1.7.5-3-g93d32acd0 +# envsubst '${CONFLIST},${CONFLIST_PRIORITY},${CNI_IMAGE}' < test/integration/manifests/cni/conflist-installer.yaml | kubectl apply -f - diff --git a/test/integration/manifests/cni/conflist-installer.yaml b/test/integration/manifests/cni/conflist-installer.yaml new file mode 100644 index 0000000000..a66c9b7d29 --- /dev/null +++ b/test/integration/manifests/cni/conflist-installer.yaml @@ -0,0 +1,63 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: azure-cni-conflist-installer + namespace: kube-system + labels: + app: azure-cni +spec: + selector: + matchLabels: + k8s-app: azure-cni + template: + metadata: + labels: + k8s-app: azure-cni + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + - key: kubernetes.io/os + operator: In + values: + - linux + priorityClassName: system-node-critical + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - operator: "Exists" + effect: NoExecute + - operator: "Exists" + effect: NoSchedule + initContainers: + - name: cni-installer + image: ${CNI_IMAGE} + imagePullPolicy: Always + command: ["/dropgz"] + args: + - deploy + - --skip-verify + - ${CONFLIST} + - -o + - /etc/cni/net.d/${CONFLIST_PRIORITY}-${CONFLIST} + volumeMounts: + - name: cni-conflist + mountPath: /etc/cni/net.d + containers: + - name: pause + image: mcr.microsoft.com/oss/kubernetes/pause:3.6 + hostNetwork: true + volumes: + - name: cni-conflist + hostPath: + path: /etc/cni/net.d + type: Directory + +# acnpublic.azurecr.io/azure-cni:linux-amd64-v1.7.5-3-g93d32acd0 +# envsubst '${CONFLIST},${CONFLIST_PRIORITY},${CNI_IMAGE}' < test/integration/manifests/cni/conflist-installer.yaml | kubectl apply -f - diff --git a/test/integration/manifests/cnsconfig/azcnichainedciliumconfigmap.yaml b/test/integration/manifests/cnsconfig/azcnichainedciliumconfigmap.yaml new file mode 100644 index 0000000000..f1cb5e3f78 --- /dev/null +++ b/test/integration/manifests/cnsconfig/azcnichainedciliumconfigmap.yaml @@ -0,0 +1,46 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: cns-config + namespace: kube-system +data: + cns_config.json: | + { + "CNIConflistFilepath": "/etc/cni/net.d/05-azure-chained-cilium.conflist", + "CNIConflistScenario": "azurecni-chained-cilium", + "ChannelMode": "CRD", + "EnableAsyncPodDelete": true, + "EnableCNIConflistGeneration": true, + "EnableIPAMv2": true, + "EnableK8sDevicePlugin": true, + "EnableLoggerV2": true, + "EnableStateMigration": true, + "EnableSubnetScarcity": false, + "InitializeFromCNI": false, + "Logger": { + "file": { + "filepath": "/var/log/azure-cns/azure-cns.log", + "level": "info", + "maxBackups": 5, + "maxSize": 5 + } + }, + "ManageEndpointState": true, + "ManagedSettings": { + "InfrastructureNetworkID": "", + "NodeID": "", + "NodeSyncIntervalInSeconds": 30, + "PrivateEndpoint": "" + }, + "MetricsBindAddress": ":10092", + "ProgramSNATIPTables": false, + "TelemetrySettings": { + "DebugMode": false, + "DisableAll": false, + "HeartBeatIntervalInMins": 30, + "RefreshIntervalInSecs": 15, + "SnapshotIntervalInMins": 60, + "TelemetryBatchIntervalInSecs": 15, + "TelemetryBatchSizeBytes": 16384 + } + } diff --git a/test/integration/manifests/swiftv2/mt-deploy.yaml b/test/integration/manifests/swiftv2/mt-deploy.yaml new file mode 100644 index 0000000000..dbe8d690f7 --- /dev/null +++ b/test/integration/manifests/swiftv2/mt-deploy.yaml @@ -0,0 +1,44 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: container + namespace: default +spec: + selector: + matchLabels: + app: container + replicas: 1 + template: + metadata: + labels: + app: container + kubernetes.azure.com/pod-network-instance: pni + spec: + containers: + - name: container + image: mcr.microsoft.com/azurelinux/busybox:1.36 + command: + - sh + - -c + - sleep 3650d + imagePullPolicy: Always + securityContext: + privileged: true + nodeSelector: + kubernetes.io/os: linux + tolerations: + - key: "cri-resource-consume" + operator: "Equal" + value: "true" + effect: "NoSchedule" + - key: "cri-resource-consume" + operator: "Equal" + value: "true" + effect: "NoExecute" + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname # KV: Key is hostname, value is each unique nodename + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: container