在 GKE 安装 cilium
概述
GKE 标准集群默认网络模式并没有对 cilium 做产品化支持,不过 cilium 开源社区支持了 GKE 标准集群的容器网络,在 GKE 环境中安装 cilium 时,自动做 一些配置调整来适配。
安装方法
确保 node 打上如下的污点:
taints:
- key: "node.cilium.io/agent-not-ready"
value: "true"
effect: "NoExecute"
然后用 cli 工具安装 cilium:
cilium install --version 1.18.2
YAML 清单
cilium 安装后,相关 YAML 如下:
- cilium
- cilium-config
- cilium-operator
- cilium-envoy
- cilium-envoy-config
apiVersion: apps/v1
kind: DaemonSet
metadata:
annotations:
deprecated.daemonset.template.generation: "1"
meta.helm.sh/release-name: cilium
meta.helm.sh/release-namespace: kube-system
labels:
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: cilium-agent
app.kubernetes.io/part-of: cilium
k8s-app: cilium
name: cilium
namespace: kube-system
spec:
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: cilium
template:
metadata:
annotations:
kubectl.kubernetes.io/default-container: cilium-agent
labels:
app.kubernetes.io/name: cilium-agent
app.kubernetes.io/part-of: cilium
k8s-app: cilium
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
k8s-app: cilium
topologyKey: kubernetes.io/hostname
automountServiceAccountToken: true
containers:
- args:
- --config-dir=/tmp/cilium/config-map
command:
- cilium-agent
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_CLUSTERMESH_CONFIG
value: /var/lib/cilium/clustermesh/
- name: GOMEMLIMIT
valueFrom:
resourceFieldRef:
divisor: "1"
resource: limits.memory
- name: KUBE_CLIENT_BACKOFF_BASE
value: "1"
- name: KUBE_CLIENT_BACKOFF_DURATION
value: "120"
image: quay.io/cilium/cilium:v1.18.2@sha256:858f807ea4e20e85e3ea3240a762e1f4b29f1cb5bbd0463b8aa77e7b097c0667
imagePullPolicy: IfNotPresent
lifecycle:
postStart:
exec:
command:
- bash
- -c
- |
set -o errexit
set -o pipefail
set -o nounset
# When running in AWS ENI mode, it's likely that 'aws-node' has
# had a chance to install SNAT iptables rules. These can result
# in dropped traffic, so we should attempt to remove them.
# We do it using a 'postStart' hook since this may need to run
# for nodes which might have already been init'ed but may still
# have dangling rules. This is safe because there are no
# dependencies on anything that is part of the startup script
# itself, and can be safely run multiple times per node (e.g. in
# case of a restart).
if [[ "$(iptables-save | grep -E -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]];
then
echo 'Deleting iptables rules created by the AWS CNI VPC plugin'
iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore
fi
echo 'Done!'
preStop:
exec:
command:
- /cni-uninstall.sh
livenessProbe:
failureThreshold: 10
httpGet:
host: 127.0.0.1
httpHeaders:
- name: brief
value: "true"
- name: require-k8s-connectivity
value: "false"
path: /healthz
port: 9879
scheme: HTTP
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
name: cilium-agent
readinessProbe:
failureThreshold: 3
httpGet:
host: 127.0.0.1
httpHeaders:
- name: brief
value: "true"
path: /healthz
port: 9879
scheme: HTTP
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
securityContext:
capabilities:
add:
- CHOWN
- KILL
- NET_ADMIN
- NET_RAW
- IPC_LOCK
- SYS_MODULE
- SYS_ADMIN
- SYS_RESOURCE
- DAC_OVERRIDE
- FOWNER
- SETGID
- SETUID
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
startupProbe:
failureThreshold: 300
httpGet:
host: 127.0.0.1
httpHeaders:
- name: brief
value: "true"
path: /healthz
port: 9879
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 2
successThreshold: 1
timeoutSeconds: 1
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /var/run/cilium/envoy/sockets
name: envoy-sockets
- mountPath: /host/proc/sys/net
name: host-proc-sys-net
- mountPath: /host/proc/sys/kernel
name: host-proc-sys-kernel
- mountPath: /sys/fs/bpf
mountPropagation: HostToContainer
name: bpf-maps
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /var/run/cilium/netns
mountPropagation: HostToContainer
name: cilium-netns
- mountPath: /host/etc/cni/net.d
name: etc-cni-netd
- mountPath: /var/lib/cilium/clustermesh
name: clustermesh-secrets
readOnly: true
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
- mountPath: /var/lib/cilium/tls/hubble
name: hubble-tls
readOnly: true
- mountPath: /tmp
name: tmp
dnsPolicy: ClusterFirst
hostNetwork: true
initContainers:
- command:
- cilium-dbg
- build-config
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
image: quay.io/cilium/cilium:v1.18.2@sha256:858f807ea4e20e85e3ea3240a762e1f4b29f1cb5bbd0463b8aa77e7b097c0667
imagePullPolicy: IfNotPresent
name: config
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /tmp
name: tmp
- command:
- sh
- -ec
- |
cp /usr/bin/cilium-mount /hostbin/cilium-mount;
nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT;
rm /hostbin/cilium-mount
env:
- name: CGROUP_ROOT
value: /run/cilium/cgroupv2
- name: BIN_PATH
value: /home/kubernetes/bin
image: quay.io/cilium/cilium:v1.18.2@sha256:858f807ea4e20e85e3ea3240a762e1f4b29f1cb5bbd0463b8aa77e7b097c0667
imagePullPolicy: IfNotPresent
name: mount-cgroup
securityContext:
capabilities:
add:
- SYS_ADMIN
- SYS_CHROOT
- SYS_PTRACE
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- command:
- sh
- -ec
- |
cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;
nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix";
rm /hostbin/cilium-sysctlfix
env:
- name: BIN_PATH
value: /home/kubernetes/bin
image: quay.io/cilium/cilium:v1.18.2@sha256:858f807ea4e20e85e3ea3240a762e1f4b29f1cb5bbd0463b8aa77e7b097c0667
imagePullPolicy: IfNotPresent
name: apply-sysctl-overwrites
securityContext:
capabilities:
add:
- SYS_ADMIN
- SYS_CHROOT
- SYS_PTRACE
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- args:
- mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf
command:
- /bin/bash
- -c
- --
image: quay.io/cilium/cilium:v1.18.2@sha256:858f807ea4e20e85e3ea3240a762e1f4b29f1cb5bbd0463b8aa77e7b097c0667
imagePullPolicy: IfNotPresent
name: mount-bpf-fs
securityContext:
privileged: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /sys/fs/bpf
mountPropagation: Bidirectional
name: bpf-maps
- command:
- sh
- -c
- |
until test -s "/tmp/cilium-bootstrap.d/cilium-bootstrap-time"; do
echo "Waiting on node-init to run...";
sleep 1;
done
image: quay.io/cilium/cilium:v1.18.2@sha256:858f807ea4e20e85e3ea3240a762e1f4b29f1cb5bbd0463b8aa77e7b097c0667
imagePullPolicy: IfNotPresent
name: wait-for-node-init
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /tmp/cilium-bootstrap.d
name: cilium-bootstrap-file-dir
- command:
- /init-container.sh
env:
- name: CILIUM_ALL_STATE
valueFrom:
configMapKeyRef:
key: clean-cilium-state
name: cilium-config
optional: true
- name: CILIUM_BPF_STATE
valueFrom:
configMapKeyRef:
key: clean-cilium-bpf-state
name: cilium-config
optional: true
- name: WRITE_CNI_CONF_WHEN_READY
valueFrom:
configMapKeyRef:
key: write-cni-conf-when-ready
name: cilium-config
optional: true
image: quay.io/cilium/cilium:v1.18.2@sha256:858f807ea4e20e85e3ea3240a762e1f4b29f1cb5bbd0463b8aa77e7b097c0667
imagePullPolicy: IfNotPresent
name: clean-cilium-state
securityContext:
capabilities:
add:
- NET_ADMIN
- SYS_MODULE
- SYS_ADMIN
- SYS_RESOURCE
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /sys/fs/bpf
name: bpf-maps
- mountPath: /run/cilium/cgroupv2
mountPropagation: HostToContainer
name: cilium-cgroup
- mountPath: /var/run/cilium
name: cilium-run
- command:
- /install-plugin.sh
image: quay.io/cilium/cilium:v1.18.2@sha256:858f807ea4e20e85e3ea3240a762e1f4b29f1cb5bbd0463b8aa77e7b097c0667
imagePullPolicy: IfNotPresent
name: install-cni-binaries
resources:
requests:
cpu: 100m
memory: 10Mi
securityContext:
capabilities:
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-path
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-node-critical
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
appArmorProfile:
type: Unconfined
seccompProfile:
type: Unconfined
serviceAccount: cilium
serviceAccountName: cilium
terminationGracePeriodSeconds: 1
tolerations:
- operator: Exists
volumes:
- name: tmp
- hostPath:
path: /var/run/cilium
type: DirectoryOrCreate
name: cilium-run
- hostPath:
path: /var/run/netns
type: DirectoryOrCreate
name: cilium-netns
- hostPath:
path: /sys/fs/bpf
type: DirectoryOrCreate
name: bpf-maps
- hostPath:
path: /proc
type: Directory
name: hostproc
- hostPath:
path: /run/cilium/cgroupv2
type: DirectoryOrCreate
name: cilium-cgroup
- hostPath:
path: /home/kubernetes/bin
type: DirectoryOrCreate
name: cni-path
- hostPath:
path: /etc/cni/net.d
type: DirectoryOrCreate
name: etc-cni-netd
- hostPath:
path: /lib/modules
type: ""
name: lib-modules
- hostPath:
path: /run/xtables.lock
type: FileOrCreate
name: xtables-lock
- hostPath:
path: /var/run/cilium/envoy/sockets
type: DirectoryOrCreate
name: envoy-sockets
- hostPath:
path: /tmp/cilium-bootstrap.d
type: DirectoryOrCreate
name: cilium-bootstrap-file-dir
- name: clustermesh-secrets
projected:
defaultMode: 256
sources:
- secret:
name: cilium-clustermesh
optional: true
- secret:
items:
- key: tls.key
path: common-etcd-client.key
- key: tls.crt
path: common-etcd-client.crt
- key: ca.crt
path: common-etcd-client-ca.crt
name: clustermesh-apiserver-remote-cert
optional: true
- secret:
items:
- key: tls.key
path: local-etcd-client.key
- key: tls.crt
path: local-etcd-client.crt
- key: ca.crt
path: local-etcd-client-ca.crt
name: clustermesh-apiserver-local-cert
optional: true
- hostPath:
path: /proc/sys/net
type: Directory
name: host-proc-sys-net
- hostPath:
path: /proc/sys/kernel
type: Directory
name: host-proc-sys-kernel
- name: hubble-tls
projected:
defaultMode: 256
sources:
- secret:
items:
- key: tls.crt
path: server.crt
- key: tls.key
path: server.key
- key: ca.crt
path: client-ca.crt
name: hubble-server-certs
optional: true
updateStrategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 2
type: RollingUpdate
apiVersion: v1
kind: ConfigMap
metadata:
annotations:
meta.helm.sh/release-name: cilium
meta.helm.sh/release-namespace: kube-system
labels:
app.kubernetes.io/managed-by: Helm
name: cilium-config
namespace: kube-system
data:
agent-not-ready-taint-key: node.cilium.io/agent-not-ready
auto-direct-node-routes: "false"
bpf-distributed-lru: "false"
bpf-events-drop-enabled: "true"
bpf-events-policy-verdict-enabled: "true"
bpf-events-trace-enabled: "true"
bpf-lb-acceleration: disabled
bpf-lb-algorithm-annotation: "false"
bpf-lb-external-clusterip: "false"
bpf-lb-map-max: "65536"
bpf-lb-mode-annotation: "false"
bpf-lb-sock: "false"
bpf-lb-source-range-all-types: "false"
bpf-map-dynamic-size-ratio: "0.0025"
bpf-policy-map-max: "16384"
bpf-policy-stats-map-max: "65536"
bpf-root: /sys/fs/bpf
cgroup-root: /run/cilium/cgroupv2
cilium-endpoint-gc-interval: 5m0s
cluster-id: "0"
cluster-name: gke
clustermesh-enable-endpoint-sync: "false"
clustermesh-enable-mcs-api: "false"
cni-exclusive: "true"
cni-log-file: /var/run/cilium/cilium-cni.log
custom-cni-conf: "false"
datapath-mode: veth
debug: "false"
debug-verbose: ""
default-lb-service-ipam: lbipam
direct-routing-skip-unreachable: "false"
dnsproxy-enable-transparent-mode: "true"
dnsproxy-socket-linger-timeout: "10"
egress-gateway-reconciliation-trigger-interval: 1s
enable-auto-protect-node-port-range: "true"
enable-bpf-clock-probe: "false"
enable-endpoint-health-checking: "true"
enable-endpoint-lockdown-on-policy-overflow: "false"
enable-endpoint-routes: "true"
enable-health-check-loadbalancer-ip: "true"
enable-health-check-nodeport: "true"
enable-health-checking: "true"
enable-hubble: "true"
enable-internal-traffic-policy: "true"
enable-ipv4: "true"
enable-ipv4-big-tcp: "false"
enable-ipv4-masquerade: "true"
enable-ipv6: "false"
enable-ipv6-big-tcp: "false"
enable-ipv6-masquerade: "true"
enable-k8s-networkpolicy: "true"
enable-l2-neigh-discovery: "false"
enable-l7-proxy: "true"
enable-lb-ipam: "true"
enable-masquerade-to-route-source: "false"
enable-metrics: "true"
enable-node-port: "false"
enable-node-selector-labels: "false"
enable-non-default-deny-policies: "true"
enable-policy: default
enable-policy-secrets-sync: "true"
enable-sctp: "false"
enable-source-ip-verification: "true"
enable-svc-source-range-check: "true"
enable-tcx: "true"
enable-vtep: "false"
enable-well-known-identities: "false"
enable-xt-socket-fallback: "true"
envoy-access-log-buffer-size: "4096"
envoy-base-id: "0"
envoy-keep-cap-netbindservice: "false"
external-envoy-proxy: "true"
health-check-icmp-failure-threshold: "3"
http-retry-count: "3"
hubble-disable-tls: "false"
hubble-listen-address: :4244
hubble-network-policy-correlation-enabled: "true"
hubble-socket-path: /var/run/cilium/hubble.sock
hubble-tls-cert-file: /var/lib/cilium/tls/hubble/server.crt
hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/client-ca.crt
hubble-tls-key-file: /var/lib/cilium/tls/hubble/server.key
identity-allocation-mode: crd
identity-gc-interval: 15m0s
identity-heartbeat-timeout: 30m0s
identity-management-mode: agent
install-no-conntrack-iptables-rules: "false"
ipam: kubernetes
ipam-cilium-node-update-rate: 15s
iptables-random-fully: "false"
ipv4-native-routing-cidr: 10.44.0.0/14
k8s-require-ipv4-pod-cidr: "false"
k8s-require-ipv6-pod-cidr: "false"
kube-proxy-replacement: "false"
max-connected-clusters: "255"
mesh-auth-enabled: "true"
mesh-auth-gc-interval: 5m0s
mesh-auth-queue-size: "1024"
mesh-auth-rotated-identities-queue-size: "1024"
metrics-sampling-interval: 5m
monitor-aggregation: medium
monitor-aggregation-flags: all
monitor-aggregation-interval: 5s
nat-map-stats-entries: "32"
nat-map-stats-interval: 30s
node-port-bind-protection: "true"
nodeport-addresses: ""
nodes-gc-interval: 5m0s
operator-api-serve-addr: 127.0.0.1:9234
operator-prometheus-serve-addr: :9963
policy-cidr-match-mode: ""
policy-default-local-cluster: "false"
policy-secrets-namespace: cilium-secrets
policy-secrets-only-from-secrets-namespace: "true"
preallocate-bpf-maps: "false"
procfs: /host/proc
proxy-connect-timeout: "2"
proxy-idle-timeout-seconds: "60"
proxy-initial-fetch-timeout: "30"
proxy-max-concurrent-retries: "128"
proxy-max-connection-duration-seconds: "0"
proxy-max-requests-per-connection: "0"
proxy-xff-num-trusted-hops-egress: "0"
proxy-xff-num-trusted-hops-ingress: "0"
remove-cilium-node-taints: "true"
routing-mode: native
service-no-backend-response: reject
set-cilium-is-up-condition: "true"
set-cilium-node-taints: "true"
synchronize-k8s-nodes: "true"
tofqdns-dns-reject-response-code: refused
tofqdns-enable-dns-compression: "true"
tofqdns-endpoint-max-ip-per-hostname: "1000"
tofqdns-idle-connection-grace-period: 0s
tofqdns-max-deferred-connection-deletes: "10000"
tofqdns-preallocate-identities: "true"
tofqdns-proxy-response-max-delay: 100ms
tunnel-protocol: vxlan
tunnel-source-port-range: 0-0
unmanaged-pod-watcher-interval: "15"
vtep-cidr: ""
vtep-endpoint: ""
vtep-mac: ""
vtep-mask: ""
write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
meta.helm.sh/release-name: cilium
meta.helm.sh/release-namespace: kube-system
labels:
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: cilium-operator
app.kubernetes.io/part-of: cilium
io.cilium/app: operator
name: cilium-operator
name: cilium-operator
namespace: kube-system
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
io.cilium/app: operator
name: cilium-operator
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 100%
type: RollingUpdate
template:
metadata:
annotations:
prometheus.io/port: "9963"
prometheus.io/scrape: "true"
labels:
app.kubernetes.io/name: cilium-operator
app.kubernetes.io/part-of: cilium
io.cilium/app: operator
name: cilium-operator
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
io.cilium/app: operator
topologyKey: kubernetes.io/hostname
automountServiceAccountToken: true
containers:
- args:
- --config-dir=/tmp/cilium/config-map
- --debug=$(CILIUM_DEBUG)
command:
- cilium-operator-generic
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_DEBUG
valueFrom:
configMapKeyRef:
key: debug
name: cilium-config
optional: true
image: quay.io/cilium/operator-generic:v1.18.2@sha256:cb4e4ffc5789fd5ff6a534e3b1460623df61cba00f5ea1c7b40153b5efb81805
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
host: 127.0.0.1
path: /healthz
port: 9234
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
name: cilium-operator
ports:
- containerPort: 9963
hostPort: 9963
name: prometheus
protocol: TCP
readinessProbe:
failureThreshold: 5
httpGet:
host: 127.0.0.1
path: /healthz
port: 9234
scheme: HTTP
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 3
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /tmp/cilium/config-map
name: cilium-config-path
readOnly: true
dnsPolicy: ClusterFirst
hostNetwork: true
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
seccompProfile:
type: RuntimeDefault
serviceAccount: cilium-operator
serviceAccountName: cilium-operator
terminationGracePeriodSeconds: 30
tolerations:
- key: node-role.kubernetes.io/control-plane
operator: Exists
- key: node-role.kubernetes.io/master
operator: Exists
- key: node.kubernetes.io/not-ready
operator: Exists
- key: node.cloudprovider.kubernetes.io/uninitialized
operator: Exists
- key: node.cilium.io/agent-not-ready
operator: Exists
volumes:
- configMap:
defaultMode: 420
name: cilium-config
name: cilium-config-path
apiVersion: apps/v1
kind: DaemonSet
metadata:
annotations:
deprecated.daemonset.template.generation: "1"
meta.helm.sh/release-name: cilium
meta.helm.sh/release-namespace: kube-system
labels:
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: cilium-envoy
app.kubernetes.io/part-of: cilium
k8s-app: cilium-envoy
name: cilium-envoy
name: cilium-envoy
namespace: kube-system
spec:
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: cilium-envoy
template:
metadata:
labels:
app.kubernetes.io/name: cilium-envoy
app.kubernetes.io/part-of: cilium
k8s-app: cilium-envoy
name: cilium-envoy
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: cilium.io/no-schedule
operator: NotIn
values:
- "true"
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
k8s-app: cilium
topologyKey: kubernetes.io/hostname
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
k8s-app: cilium-envoy
topologyKey: kubernetes.io/hostname
automountServiceAccountToken: true
containers:
- args:
- --
- -c /var/run/cilium/envoy/bootstrap-config.json
- --base-id 0
- --log-level info
command:
- /usr/bin/cilium-envoy-starter
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
image: quay.io/cilium/cilium-envoy:v1.34.7-1757592137-1a52bb680a956879722f48c591a2ca90f7791324@sha256:7932d656b63f6f866b6732099d33355184322123cfe1182e6f05175a3bc2e0e0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 10
httpGet:
host: 127.0.0.1
path: /healthz
port: 9878
scheme: HTTP
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
name: cilium-envoy
ports:
- containerPort: 9964
hostPort: 9964
name: envoy-metrics
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
host: 127.0.0.1
path: /healthz
port: 9878
scheme: HTTP
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
securityContext:
capabilities:
add:
- NET_ADMIN
- SYS_ADMIN
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
startupProbe:
failureThreshold: 105
httpGet:
host: 127.0.0.1
path: /healthz
port: 9878
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 2
successThreshold: 1
timeoutSeconds: 1
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /var/run/cilium/envoy/sockets
name: envoy-sockets
- mountPath: /var/run/cilium/envoy/artifacts
name: envoy-artifacts
readOnly: true
- mountPath: /var/run/cilium/envoy/
name: envoy-config
readOnly: true
- mountPath: /sys/fs/bpf
mountPropagation: HostToContainer
name: bpf-maps
dnsPolicy: ClusterFirst
hostNetwork: true
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-node-critical
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
appArmorProfile:
type: Unconfined
serviceAccount: cilium-envoy
serviceAccountName: cilium-envoy
terminationGracePeriodSeconds: 1
tolerations:
- operator: Exists
volumes:
- hostPath:
path: /var/run/cilium/envoy/sockets
type: DirectoryOrCreate
name: envoy-sockets
- hostPath:
path: /var/run/cilium/envoy/artifacts
type: DirectoryOrCreate
name: envoy-artifacts
- configMap:
defaultMode: 256
items:
- key: bootstrap-config.json
path: bootstrap-config.json
name: cilium-envoy-config
name: envoy-config
- hostPath:
path: /sys/fs/bpf
type: DirectoryOrCreate
name: bpf-maps
updateStrategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 2
type: RollingUpdate
apiVersion: v1
kind: ConfigMap
metadata:
annotations:
meta.helm.sh/release-name: cilium
meta.helm.sh/release-namespace: kube-system
labels:
app.kubernetes.io/managed-by: Helm
name: cilium-envoy-config
namespace: kube-system
data:
bootstrap-config.json: |
{"admin":{"address":{"pipe":{"path":"/var/run/cilium/envoy/sockets/admin.sock"}}},"applicationLogConfig":{"logFormat":{"textFormat":"[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v"}},"bootstrapExtensions":[{"name":"envoy.bootstrap.internal_listener","typedConfig":{"@type":"type.googleapis.com/envoy.extensions.bootstrap.internal_listener.v3.InternalListener"}}],"dynamicResources":{"cdsConfig":{"apiConfigSource":{"apiType":"GRPC","grpcServices":[{"envoyGrpc":{"clusterName":"xds-grpc-cilium"}}],"setNodeOnFirstMessageOnly":true,"transportApiVersion":"V3"},"initialFetchTimeout":"30s","resourceApiVersion":"V3"},"ldsConfig":{"apiConfigSource":{"apiType":"GRPC","grpcServices":[{"envoyGrpc":{"clusterName":"xds-grpc-cilium"}}],"setNodeOnFirstMessageOnly":true,"transportApiVersion":"V3"},"initialFetchTimeout":"30s","resourceApiVersion":"V3"}},"node":{"cluster":"ingress-cluster","id":"host~127.0.0.1~no-id~localdomain"},"overloadManager":{"resourceMonitors":[{"name":"envoy.resource_monitors.global_downstream_max_connections","typedConfig":{"@type":"type.googleapis.com/envoy.extensions.resource_monitors.downstream_connections.v3.DownstreamConnectionsConfig","max_active_downstream_connections":"50000"}}]},"staticResources":{"clusters":[{"circuitBreakers":{"thresholds":[{"maxRetries":128}]},"cleanupInterval":"2.500s","connectTimeout":"2s","lbPolicy":"CLUSTER_PROVIDED","name":"ingress-cluster","type":"ORIGINAL_DST","typedExtensionProtocolOptions":{"envoy.extensions.upstreams.http.v3.HttpProtocolOptions":{"@type":"type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions","commonHttpProtocolOptions":{"idleTimeout":"60s","maxConnectionDuration":"0s","maxRequestsPerConnection":0},"useDownstreamProtocolConfig":{}}}},{"circuitBreakers":{"thresholds":[{"maxRetries":128}]},"cleanupInterval":"2.500s","connectTimeout":"2s","lbPolicy":"CLUSTER_PROVIDED","name":"egress-cluster-tls","transportSocket":{"name":"cilium.tls_wrapper","typedConfig":{"@type":"type.googleapis.com/cilium.UpstreamTlsWrapperContext"}},"type":"ORIGINAL_DST","typedExtensionProtocolOptions":{"envoy.extensions.upstreams.http.v3.HttpProtocolOptions":{"@type":"type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions","commonHttpProtocolOptions":{"idleTimeout":"60s","maxConnectionDuration":"0s","maxRequestsPerConnection":0},"upstreamHttpProtocolOptions":{},"useDownstreamProtocolConfig":{}}}},{"circuitBreakers":{"thresholds":[{"maxRetries":128}]},"cleanupInterval":"2.500s","connectTimeout":"2s","lbPolicy":"CLUSTER_PROVIDED","name":"egress-cluster","type":"ORIGINAL_DST","typedExtensionProtocolOptions":{"envoy.extensions.upstreams.http.v3.HttpProtocolOptions":{"@type":"type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions","commonHttpProtocolOptions":{"idleTimeout":"60s","maxConnectionDuration":"0s","maxRequestsPerConnection":0},"useDownstreamProtocolConfig":{}}}},{"circuitBreakers":{"thresholds":[{"maxRetries":128}]},"cleanupInterval":"2.500s","connectTimeout":"2s","lbPolicy":"CLUSTER_PROVIDED","name":"ingress-cluster-tls","transportSocket":{"name":"cilium.tls_wrapper","typedConfig":{"@type":"type.googleapis.com/cilium.UpstreamTlsWrapperContext"}},"type":"ORIGINAL_DST","typedExtensionProtocolOptions":{"envoy.extensions.upstreams.http.v3.HttpProtocolOptions":{"@type":"type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions","commonHttpProtocolOptions":{"idleTimeout":"60s","maxConnectionDuration":"0s","maxRequestsPerConnection":0},"upstreamHttpProtocolOptions":{},"useDownstreamProtocolConfig":{}}}},{"connectTimeout":"2s","loadAssignment":{"clusterName":"xds-grpc-cilium","endpoints":[{"lbEndpoints":[{"endpoint":{"address":{"pipe":{"path":"/var/run/cilium/envoy/sockets/xds.sock"}}}}]}]},"name":"xds-grpc-cilium","type":"STATIC","typedExtensionProtocolOptions":{"envoy.extensions.upstreams.http.v3.HttpProtocolOptions":{"@type":"type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions","explicitHttpConfig":{"http2ProtocolOptions":{}}}}},{"connectTimeout":"2s","loadAssignment":{"clusterName":"/envoy-admin","endpoints":[{"lbEndpoints":[{"endpoint":{"address":{"pipe":{"path":"/var/run/cilium/envoy/sockets/admin.sock"}}}}]}]},"name":"/envoy-admin","type":"STATIC"}],"listeners":[{"address":{"socketAddress":{"address":"0.0.0.0","portValue":9964}},"filterChains":[{"filters":[{"name":"envoy.filters.network.http_connection_manager","typedConfig":{"@type":"type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager","httpFilters":[{"name":"envoy.filters.http.router","typedConfig":{"@type":"type.googleapis.com/envoy.extensions.filters.http.router.v3.Router"}}],"internalAddressConfig":{"cidrRanges":[{"addressPrefix":"10.0.0.0","prefixLen":8},{"addressPrefix":"172.16.0.0","prefixLen":12},{"addressPrefix":"192.168.0.0","prefixLen":16},{"addressPrefix":"127.0.0.1","prefixLen":32}]},"routeConfig":{"virtualHosts":[{"domains":["*"],"name":"prometheus_metrics_route","routes":[{"match":{"prefix":"/metrics"},"name":"prometheus_metrics_route","route":{"cluster":"/envoy-admin","prefixRewrite":"/stats/prometheus"}}]}]},"statPrefix":"envoy-prometheus-metrics-listener","streamIdleTimeout":"300s"}}]}],"name":"envoy-prometheus-metrics-listener"},{"address":{"socketAddress":{"address":"127.0.0.1","portValue":9878}},"filterChains":[{"filters":[{"name":"envoy.filters.network.http_connection_manager","typedConfig":{"@type":"type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager","httpFilters":[{"name":"envoy.filters.http.router","typedConfig":{"@type":"type.googleapis.com/envoy.extensions.filters.http.router.v3.Router"}}],"internalAddressConfig":{"cidrRanges":[{"addressPrefix":"10.0.0.0","prefixLen":8},{"addressPrefix":"172.16.0.0","prefixLen":12},{"addressPrefix":"192.168.0.0","prefixLen":16},{"addressPrefix":"127.0.0.1","prefixLen":32}]},"routeConfig":{"virtual_hosts":[{"domains":["*"],"name":"health","routes":[{"match":{"prefix":"/healthz"},"name":"health","route":{"cluster":"/envoy-admin","prefixRewrite":"/ready"}}]}]},"statPrefix":"envoy-health-listener","streamIdleTimeout":"300s"}}]}],"name":"envoy-health-listener"}]}}
CNI 配置
cilium 安装后,CNI 配置变成:
/etc/cni/net.d/05-cilium.conflist
{
"cniVersion": "0.3.1",
"name": "cilium",
"plugins": [
{
"type": "cilium-cni",
"enable-debug": false,
"log-file": "/var/run/cilium/cilium-cni.log"
}
]
}
cilium-dbg
$ kubectl exec -i -t cilium-69x5v -- bash
root@gke-gke-test-default-pool-e5de334e-9jms:/home/cilium# cilium-dbg status
KVStore: Disabled
Kubernetes: Ok 1.34 (v1.34.0-gke.1662000) [linux/amd64]
Kubernetes APIs: ["EndpointSliceOrEndpoint", "cilium/v2::CiliumCIDRGroup", "cilium/v2::CiliumClusterwideNetworkPolicy", "cilium/v2::CiliumEndpoint", "cilium/v2::CiliumNetworkPolicy", "cilium/v2::CiliumNode", "core/v1::Pods", "networking.k8s.io/v1::NetworkPolicy"]
KubeProxyReplacement: False
Host firewall: Disabled
SRv6: Disabled
CNI Chaining: none
CNI Config file: successfully wrote CNI configuration file to /host/etc/cni/net.d/05-cilium.conflist
Cilium: Ok 1.18.2 (v1.18.2-5bd307a8)
NodeMonitor: Listening for events on 2 CPUs with 64x4096 of shared memory
Cilium health daemon: Ok
IPAM: IPv4: 11/254 allocated from 10.44.0.0/24,
IPv4 BIG TCP: Disabled
IPv6 BIG TCP: Disabled
BandwidthManager: Disabled
Routing: Network: Native Host: Legacy
Attach Mode: TCX
Device Mode: veth
Masquerading: IPTables [IPv4: Enabled, IPv6: Disabled]
Controller Status: 62/62 healthy
Proxy Status: OK, ip 10.44.0.205, 0 redirects active on ports 10000-20000, Envoy: external
Global Identity Range: min 256, max 65535
Hubble: Ok Current/Max Flows: 4095/4095 (100.00%), Flows/s: 14.39 Metrics: Disabled
Encryption: Disabled
Cluster health: 3/3 reachable (2025-09-23T07:47:31Z) (Probe interval: 1m56.754608943s)
Name IP Node Endpoints
Modules Health: Stopped(13) Degraded(0) OK(88)