05
2024
08
16:02:48

Ubuntu 上的 Kubernets 集群搭建

Kubernetes

Kubernetes Version: 1.22.6

System: Ubuntu Server 20.10


VirtualBox

测试环境集群由 virtualbox 创建,测试时使用的宿主机系统:Kubuntu 21.10

虚拟机系统镜像:https://repo.huaweicloud.com/ubuntu-releases/21.10/ubuntu-21.10-live-server-amd64.iso

虚拟机硬件配置:CPU >= 2 Core; RAM >= 4 GB; Disk >= 32 GB;

虚拟机主机名配置:node01、node02、node03

虚拟机网卡配置:推荐使用 NAT 网卡方式组网

  • 仅主机(Host-Only):

    工具-网络-创建一个网卡:Host: 10.10.10.2;DHCP: 10.10.10.1/24

     # 对于 linux 宿主机需要开启以下内核参数
     cat <<EOF | sudo tee /etc/modules-load.d/containerd.conf
     overlay
     br_netfilter
     EOF
     sudo modprobe overlay
     sudo modprobe br_netfilter
     
     # https://imroc.cc/k8s/faq/why-enable-bridge-nf-call-iptables/
     cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
     net.ipv4.ip_forward                 = 1
     net.bridge.bridge-nf-call-iptables  = 1
     net.bridge.bridge-nf-call-ip6tables = 1
     EOF
     
     # Host-Only 网卡 10.10.10.0/24
     sudo iptables -t nat -I POSTROUTING -s 10.10.10.0/24 -j MASQUERADE
     sudo iptables -P FORWARD ACCEPT
     sudo iptables -t nat -P POSTROUTING ACCEPT
  • 网络地址转换(NAT):

    管理-全局设定-网络-新建一个 NAT 网络:CIDR:10.10.10.0/24;支持 DHCP;转发各虚拟机 22 端口到本机端口;

附表:VirtualBox 网络连接方式对比


NetworkVM<->HostVM1<->VM2VM->InternetVM<-Internet
HostOnly++--
Internal-+--
Bridged++++
NAT--+PortFoward
NAT Network-++PortFoward


虚拟机网络配置:

 # 编辑 /etc/netplan/00-installer-config.yaml 为每个节点配置静态 IP
 # node01 10.10.10.11
 # node02 10.10.10.12
 # node02 10.10.10.13
 network:
   version: 2
   ethernets:
     enp0s3:
       dhcp4: false
       addresses:
       - 10.10.10.11/24
       gateway4: 10.10.10.2 # 宿主机 IP
       nameservers:
         addresses:
         - 223.5.5.5
         - 223.6.6.6
 
 # 应用静态 IP 配置
 sudo netplan --debug apply


基础环境

一键安装脚本

  • 调整内核参数

  • 关闭 SWAP

  • 同步时间

  • 安装 containerd 作为 CRI

  • 安装 kubeadm、kubelet、kubectl 工具

 #!/bin/bash
 set -eux -o pipefail
 
 # https://kubernetes.io/zh/docs/setup/production-environment/tools/kubeadm/install-kubeadm/
 # https://kubernetes.io/docs/setup/production-environment/container-runtimes/#containerd
 # https://docs.docker.com/engine/install/#server
 
 if [ "$(id -u)" -ne 0 ]; then
   echo "THIS SCRIPT MUST BE RUN AS ROOT"
   exit 1
 fi
 
 # configure prerequisites
 config_system() {
   # update packages
   apt update && apt dist-upgrade -y && apt autopurge -y
   apt install -y curl gnupg2 apt-transport-https ca-certificates bash-completion tzdata ntpdate
 
   # turn off swap
   swapoff -a && sed -ri 's/.*swap.*/#&/' /etc/fstab
 
   # hostnamectl set-hostname node01
   # echo '10.10.10.11 api.k8s.local' >>/etc/hosts
   # set timezone
   dpkg-reconfigure tzdata
   # set ntp
   ntpdate cn.pool.ntp.org
   # sync hardware clock
   hwclock --systohc
 
   # letting iptables see bridged traffic
   cat <<EOF | tee /etc/modules-load.d/containerd.conf
 overlay
 br_netfilter
 EOF
   modprobe overlay
   modprobe br_netfilter
 
   # setup required sysctl params, these persist across reboots.
   cat <<EOF | tee /etc/sysctl.d/99-kubernetes-cri.conf
 net.ipv4.ip_forward                 = 1
 net.bridge.bridge-nf-call-iptables  = 1
 net.bridge.bridge-nf-call-ip6tables = 1
 EOF
 
   # apply sysctl params without reboot
   sysctl --system
 }
 
 install_containerd() {
   # add docker's official GPG key
   # curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --yes --dearmor -o /etc/apt/trusted.gpg.d/docker.gpg
   curl -fsSL https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/debian/gpg | gpg --yes --dearmor -o /etc/apt/trusted.gpg.d/docker.gpg
   # curl -fsSL https://repo.huaweicloud.com/docker-ce/linux/debian/gpg | gpg --yes --dearmor -o /etc/apt/trusted.gpg.d/docker.gpg
 
   # set up the stable repository
   osarch=$(dpkg --print-architecture)
   osname=$(lsb_release -s -i | tr '[:upper:]' '[:lower:]')
   codename=$(lsb_release -s -c)
   # echo "deb [arch=$osarch] https://download.docker.com/linux/$osname $codename stable" >/etc/apt/sources.list.d/docker.list
   echo "deb [arch=$osarch] https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/$osname $codename stable" >/etc/apt/sources.list.d/docker.list
   # echo "deb [arch=$osarch] https://repo.huaweicloud.com/docker-ce/linux/$osname $codename stable" >/etc/apt/sources.list.d/docker.list
 
   # uninstall old versions & install containerd
   apt purge -y docker docker.io containerd runc && apt update && apt install -y containerd.io
 
   # configure containerd
   mkdir -p /etc/containerd && containerd config default >/etc/containerd/config.toml
 
   sed -i "s#k8s.gcr.io#registry.aliyuncs.com/google_containers#g" /etc/containerd/config.toml
   sed -i "s#pause:3.2#pause:3.5#g" /etc/containerd/config.toml
   sed -i "s#https://registry-1.docker.io#https://registry.cn-shanghai.aliyuncs.com#g" /etc/containerd/config.toml
   sed -i '/containerd.runtimes.runc.options/a\ \ \ \ \ \ \ \ \ \ \ \ SystemdCgroup = true' /etc/containerd/config.toml
 
   # restart containerd
   systemctl daemon-reload && systemctl enable containerd && systemctl restart containerd
 }
 
 install_k8s() {
   # add google cloud's official GPG key
   # curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | gpg --yes --dearmor -o /etc/apt/trusted.gpg.d/kubernetes.gpg
   curl -fsSL https://repo.huaweicloud.com/kubernetes/apt/doc/apt-key.gpg | gpg --yes --dearmor -o /etc/apt/trusted.gpg.d/kubernetes.gpg
 
   # set up the kubernetes repository
   # echo "https://apt.kubernetes.io/ kubernetes-xenial main" >/etc/apt/sources.list.d/kubernetes.list
   # echo "deb https://repo.huaweicloud.com/kubernetes/apt/ kubernetes-xenial main" >/etc/apt/sources.list.d/kubernetes.list
   echo "deb https://mirrors.tuna.tsinghua.edu.cn/kubernetes/apt/ kubernetes-xenial main" >/etc/apt/sources.list.d/kubernetes.list
 
   # https://kubernetes.io/zh/docs/setup/production-environment/tools/kubeadm/kubelet-integration/#the-kubelet-drop-in-file-for-systemd
   #echo 'KUBELET_EXTRA_ARGS=--cgroup-driver=systemd' >/etc/default/kubelet
 
   # install kubeadm/kubelet/kubectl
   export DEB_VERSION=1.22.6-00
   apt update && apt install -y kubeadm=$DEB_VERSION kubectl=$DEB_VERSION kubelet=$DEB_VERSION
   # pin the version
   apt-mark hold kubeadm kubelet kubectl
 
   cat <<EOF | tee /etc/crictl.yaml
 runtime-endpoint: unix:///run/containerd/containerd.sock
 image-endpoint: unix:///run/containerd/containerd.sock
 timeout: 10
 debug: false
 EOF
 
   # restart kubelet
   systemctl daemon-reload && systemctl enable kubelet && systemctl restart kubelet
 
   (kubectl completion bash) >/etc/bash_completion.d/kubectl
 }
 
 config_system
 install_containerd
 install_k8s

初始化集群

集群初始化配置清单:kubeadm-init.yaml

 # kubeadm config print init-defaults
 apiVersionkubeadm.k8s.io/v1beta3
 bootstrapTokens:
 groups:
   - system:bootstrappers:kubeadm:default-node-token
   # token: abcdef.0123456789abcdef
   ttl24h0m0s
   usages:
   - signing
   - authentication
 kindInitConfiguration
 localAPIEndpoint:
   advertiseAddress10.10.10.11 # api server listen address
   bindPort6443
 nodeRegistration:
   criSocket/run/containerd/containerd.sock # containerd socket
   imagePullPolicyIfNotPresent
   namenode01 # node name
   taintsnull
 ---
 apiServer:
   certSANs# cert SANs
     - 127.0.0.1
     - 10.10.10.11
   timeoutForControlPlane4m0s
 apiVersionkubeadm.k8s.io/v1beta3
 certificatesDir/etc/kubernetes/pki
 clusterNamekubernetes
 #controlPlaneEndpoint: 10.10.10.11 # control plane endpoint
 controllerManager: {}
 dns: {}
 etcd:
   local:
     dataDir/var/lib/etcd
 imageRepositoryregistry.aliyuncs.com/google_containers # image registry
 kindClusterConfiguration
 kubernetesVersion1.22.6 # correct k8s version
 networking:
   dnsDomaincluster.local
   serviceSubnet10.96.0.0/12 # service subnet, for CNI plugin
   podSubnet10.244.0.0/16 # pod subnet
 scheduler: {}
 ---
 # https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver/#configuring-the-kubelet-cgroup-driver
 kindKubeletConfiguration
 apiVersionkubelet.config.k8s.io/v1beta1
 cgroupDriversystemd

根据配置清单初始化一个控制平面

 # kubeadm 访问 /run/containerd/containerd.sock 需要 root 权限
 # 拉取 K8S 所需镜像
 sudo kubeadm -v=5 config images pull --config kubeadm-init.yaml
 # 导入现有镜像
 sudo ctr -n k8s.io i import k8s.tar
 # 根据配置清单初始化一个控制平面
 sudo kubeadm -v=5 init --config kubeadm-init.yaml | tee kubeadm-init.log
 # 普通用户的 kubectl 配置文件
 mkdir -p $HOME/.kube
 sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
 sudo chown $(id -u):$(id -g) $HOME/.kube/config
 # root 用户的 kubectl 配置文件
 export KUBECONFIG=/etc/kubernetes/admin.conf
 # 输出集群信息
 kubectl cluster-info dump
 
 # 以下为调试时可能用到命令
 # 列出配置清单所需镜像
 sudo kubeadm --config kubeadm-init.yaml config images list
 # 配置清单及环境验证
 sudo kubeadm init -v=5 --config kubeadm-init.yaml --dry-run
 # 查看错误日志
 sudo crictl ps -a
 sudo crictl logs <container-id>
 systemctl status kubelet
 journalctl -xeu kubelet
 kubectl kubectl describe pods -n kube-system <pod>
 # 初始化时出错可以重置节点
 sudo kubeadm reset -f
 sudo rm -rf /etc/kubernetes/ ~/.kube/


安装 CNI 插件

CoreDNS 组件在安装容器网络接口(CNI)前不会启动,也因此在此之前该节点状态为 NotReady

 # 安装 Calico 插件
 # https://projectcalico.docs.tigera.io/getting-started/kubernetes/self-managed-onprem/onpremises
 # 下载 Calico 配置清单
 curl -fsSLO https://docs.projectcalico.org/manifests/calico.yaml
 # 修改配置清单
 POD_CIDR="10.244.0.0/16" && sed -i -e "s#192.168.0.0/16#$POD_CIDR#g" calico.yaml
 # 应用配置清单
 kubectl apply -f calico.yaml
 # 查看所有 Pod 状态
 kubectl get pods -A -o wide
 # 查看所有 Node 状态
 kubectl get nodes -o wide


加入集群节点

 # https://kubernetes.io/zh/docs/reference/setup-tools/kubeadm/kubeadm-join/
 # 主节点上生成新的令牌
 kubeadm token create --print-join-command
 
 # 在子节点上使用 root 权限执行上面输出的命令加入集群
 # sudo kubeadm join 10.0.10.10:6443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash \
 #  sha256:a9b6a3acd9f6ce958bc2423c6882fdc2a6a192b347cf1dd18fe8a97083c7fe28
 
 # 查看所有 Node 状态确保 READY
 kubectl get nodes -o wide


Metrics Server

 # 下载 Metrics 配置清单
 METRICS_VERSION=0.5.2 && curl -fsSL -o metrics-server.yaml https://github.com/kubernetes-sigs/metrics-server/releases/download/v$METRICS_VERSION/components.yaml
 # 修改配置清单
 sed -i 's#k8s.gcr.io/metrics-server#registry.aliyuncs.com/google_containers#' metrics-server.yaml
 sed -i '/args:/a\ \ \ \ \ \ \ \ - --kubelet-insecure-tls' metrics-server.yaml
 # 应用配置清单
 kubectl apply -f metrics-server.yaml
 # 验证是否生效
 kubectl top nodes


Istio

 # 下载 istioctl
 ISTIO_VERSION=1.12.2 && curl -fsSLO https://github.com/istio/istio/releases/download/$ISTIO_VERSION/istio-$ISTIO_VERSION-linux-amd64.tar.gz
 tar zxvf istio-$ISTIO_VERSION-linux-amd64.tar.gz && cd istio-$ISTIO_VERSION
 # 推荐基于 default 方案定制生产环境
 # https://istio.io/latest/zh/docs/setup/additional-setup/config-profiles/
 istioctl profile dump default > istio-custom.yaml

定制 Istio 配置清单:istio-custom.yaml

 # 配置项 https://istio.io/latest/zh/docs/reference/config/istio.operator.v1alpha1/
 # spec.values 配置项 https://istio.io/latest/zh/docs/reference/config/installation-options/
 apiVersioninstall.istio.io/v1alpha1
 kindIstioOperator
 spec:
   components:
     base:
       enabledtrue
     cni:
       enabledfalse
     egressGateways:
     - enabledfalse
       nameistio-egressgateway
     ingressGateways:
     - enabledtrue
       nameistio-ingressgateway
       k8s:
         env:
           - namealiyun_logs_envoy
             valuestdout
           - namealiyun_logs_envoy_tags
             value"fields.topic=log:envoy"
         hpaSpec:
           minReplicas3
         affinity:
           podAntiAffinity:
             preferredDuringSchedulingIgnoredDuringExecution:
               - weight100
                 podAffinityTerm:
                   labelSelector:
                     matchExpressions:
                       - keyapp
                         operatorIn
                         values:
                           - istio-ingressgateway
                   topologyKeykubernetes.io/hostname
         service:
           ports:
             - namehttp2
               port80
               nodePort31380
             - namehttps
               port443
               nodePort31390
     istiodRemote:
       enabledfalse
     pilot:
       enabledtrue
   hubdocker.io/istio
   meshConfig:
     defaultConfig:
       proxyMetadata: {}
     enablePrometheusMergetrue
     accessLogFile/dev/stdout
   profiledefault
   tag1.12.2
   values:
     base:
       enableCRDTemplatesfalse
       validationURL""
     defaultRevision""
     gateways:
       istio-egressgateway:
         autoscaleEnabledtrue
         env: {}
         nameistio-egressgateway
         secretVolumes:
         - mountPath/etc/istio/egressgateway-certs
           nameegressgateway-certs
           secretNameistio-egressgateway-certs
         - mountPath/etc/istio/egressgateway-ca-certs
           nameegressgateway-ca-certs
           secretNameistio-egressgateway-ca-certs
         typeClusterIP
       istio-ingressgateway:
         autoscaleEnabledtrue
         env: {}
         nameistio-ingressgateway
         secretVolumes:
         - mountPath/etc/istio/ingressgateway-certs
           nameingressgateway-certs
           secretNameistio-ingressgateway-certs
         - mountPath/etc/istio/ingressgateway-ca-certs
           nameingressgateway-ca-certs
           secretNameistio-ingressgateway-ca-certs
         typeNodePort
         runAsRoottrue
     global:
       configValidationtrue
       defaultNodeSelector: {}
       defaultPodDisruptionBudget:
         enabledtrue
       defaultResources:
         requests:
           cpu10m
       imagePullPolicy""
       imagePullSecrets: []
       istioNamespaceistio-system
       istiod:
         enableAnalysisfalse
       jwtPolicythird-party-jwt
       logAsJsonfalse
       logging:
         leveldefault:info
       meshNetworks: {}
       mountMtlsCertsfalse
       multiCluster:
         clusterName""
         enabledfalse
       network""
       omitSidecarInjectorConfigMapfalse
       oneNamespacefalse
       operatorManageWebhooksfalse
       pilotCertProvideristiod
       priorityClassName""
       proxy:
         autoInjectenabled
         clusterDomaincluster.local
         componentLogLevelmisc:error
         enableCoreDumpfalse
         excludeIPRanges""
         excludeInboundPorts""
         excludeOutboundPorts""
         imageproxyv2
         includeIPRanges'10.244.0.0/16,10.96.0.0/12'
         logLevelwarning
         privilegedfalse
         readinessFailureThreshold30
         readinessInitialDelaySeconds1
         readinessPeriodSeconds2
         resources:
           limits:
             cpu2000m
             memory1024Mi
           requests:
             cpu100m
             memory128Mi
         statusPort15020
         tracerzipkin
       proxy_init:
         imageproxyv2
         resources:
           limits:
             cpu2000m
             memory1024Mi
           requests:
             cpu10m
             memory10Mi
       sds:
         token:
           audistio-ca
       sts:
         servicePort0
       tracer:
         datadog: {}
         lightstep: {}
         stackdriver: {}
         zipkin: {}
       useMCPfalse
     istiodRemote:
       injectionURL""
     pilot:
       autoscaleEnabledtrue
       autoscaleMax5
       autoscaleMin1
       configMaptrue
       cpu:
         targetAverageUtilization80
       enableProtocolSniffingForInboundtrue
       enableProtocolSniffingForOutboundtrue
       env: {}
       imagepilot
       keepaliveMaxServerConnectionAge30m
       nodeSelector: {}
       podLabels: {}
       replicaCount1
       traceSampling1
     telemetry:
       enabledtrue
       v2:
         enabledtrue
         metadataExchange:
           wasmEnabledfalse
         prometheus:
           enabledtrue
           wasmEnabledfalse
         stackdriver:
           configOverride: {}
           enabledfalse
           loggingfalse
           monitoringfalse
           topologyfalse

安装 Istio

 # 推荐使用 istioctl install -f 方法安装
 # https://istio.io/latest/zh/docs/setup/install/istioctl/
 istioctl install -f istio-custom.yaml
 # 获取 IstioOperator 配置清单
 kubectl -n istio-system get IstioOperator installed-state -o yaml > installed-state.yaml





推荐本站淘宝优惠价购买喜欢的宝贝:

image.png

本文链接:https://hqyman.cn/post/7411.html 非本站原创文章欢迎转载,原创文章需保留本站地址!

分享到:
打赏





休息一下~~


« 上一篇 下一篇 »

发表评论:

◎欢迎参与讨论,请在这里发表您的看法、交流您的观点。

请先 登录 再评论,若不是会员请先 注册

您的IP地址是: