一、安装 kubeadm=1.22.1-00 kubelet=1.22.1-00 kubectl=1.22.1-00

根据互联网区的k8s主机上安装的组件和版本,离线部署到内网区中:

在纯净主机上执行:

查看需要的所有组件

apt-get install –dry-run -qq kubeadm=1.22.1-00 kubelet=1.22.1-00 kubectl=1.22.1-00 | grep “Inst” | awk -F'[ (]+’ ‘{print $2}’

conntrack

cri-tools

ebtables

kubernetes-cni

socat

kubelet

kubectl

kubeadm

在已安装kube软件的主机执行:

查看组件的版本

 dpkg -l | grep -E ‘conntrack|cri-tools|ebtables|kubernetes-cni|socat|kubelet|kubectl|kubeadm’ | awk ‘{print $2 “=” $3}’

conntrack=1:1.4.5-2

cri-tools=1.25.0-00

ebtables=2.0.11-3build1

kubeadm=1.22.1-00

kubectl=1.22.1-00

kubelet=1.22.1-00

kubernetes-cni=1.1.1-00

libnetfilter-conntrack3:amd64=1.0.7-2

socat=1.7.3.3-2

下载组件

【手工方式】:

dpkg -l | grep -E ‘conntrack|cri-tools|ebtables|kubernetes-cni|socat|kubelet|kubectl|kubeadm’ | awk ‘{print “apt-get download -qq ” $2 “=” $3}’   > kube.sh

root@master-3:/data/pkg# cat kube.sh  (  bash kube.sh )

apt-get download -qq conntrack=1:1.4.5-2

apt-get download -qq cri-tools=1.25.0-00

apt-get download -qq ebtables=2.0.11-3build1

apt-get download -qq kubeadm=1.22.1-00

apt-get download -qq kubectl=1.22.1-00

apt-get download -qq kubelet=1.22.1-00

apt-get download -qq kubernetes-cni=1.1.1-00

apt-get download -qq libnetfilter-conntrack3:amd64=1.0.7-2

apt-get download -qq socat=1.7.3.3-2

【脚本方式】:

vim pkgs.txt

conntrack
cri-tools
ebtables
kubernetes-cni
socat
kubelet
kubectl
kubeadm

vim download_pkgs.sh

#!/bin/bash
# 指定组件列表文件路径
components_file="$(pwd)/pkgs.txt"
# 检查文件是否存在
if [ ! -f "$components_file" ]; then
    echo "Error: Components file not found."
    exit 1
fi

# 临时目录用于存放下载的组件
temp_dir="temp_dir"
mkdir $temp_dir
cd $temp_dir

# 读取组件列表文件中的每个组件,并执行下载命令
while  read -r component; do
    version=$(dpkg -l | grep -E "^ii\s+$component\s" | awk '{print $3}')
    echo "Checking component: $component"
    echo "Checking version: $version"
    if [ -n "$version" ]; then
        echo "Downloading $component=$version..."
        apt-get download -qq --allow-unauthenticated "$component=$version"
    else
        echo "Component $component not found."
    fi
done < "$components_file"

cd - > /dev/null

# 打包下载的组件
tar -czvf downloaded_components.tar.gz -C "$temp_dir" .

# 删除临时目录
rm -rf "$temp_dir"

echo "Downloaded components are packed and saved as downloaded_components.tar.gz"

把下载好的所有组件传到内网服务器,进行离线安装:

dpkg -i *.deb

二、准备和安装 kubernetes 所需的相关镜像

[root@master1 ~]#kubeadm config images list –kubernetes-version=v1.22.1   

k8s.gcr.io/kube-apiserver:v1.22.1

k8s.gcr.io/kube-controller-manager:v1.22.1

k8s.gcr.io/kube-scheduler:v1.22.1

k8s.gcr.io/kube-proxy:v1.22.1

k8s.gcr.io/pause:3.5

k8s.gcr.io/etcd:3.5.0-0

k8s.gcr.io/coredns/coredns:v1.8.4

把互联网区的服务器上的镜像导出,并导入到内网服务器

root@master-1:/data/kube_images# docker images
REPOSITORY                                                                    TAG       IMAGE ID       CREATED         SIZE
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver            v1.22.1   f30469a2491a   2 years ago     128MB
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler            v1.22.1   aca5ededae9c   2 years ago     52.7MB
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy                v1.22.1   36c4ebbc9d97   2 years ago     104MB
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager   v1.22.1   6e002eb89a88   2 years ago     122MB
registry.cn-hangzhou.aliyuncs.com/google_containers/etcd                      3.5.0-0   004811815584   2 years ago     295MB
registry.cn-hangzhou.aliyuncs.com/google_containers/coredns                   v1.8.4    8d147537fb7d   2 years ago     47.6MB
registry.cn-hangzhou.aliyuncs.com/google_containers/pause                     3.5       ed210e3e4a5b   2 years ago     683kB

导出镜像

docker save -o kube-apiserver.tar                     f30469a2491a

docker save -o kube-scheduler.tar                    aca5ededae9c

docker save -o kube-proxy.tar                            36c4ebbc9d97

docker save -o kube-controller-manager.tar   6e002eb89a88

docker save -o etcd.tar                                        004811815584

docker save -o coredns.tar                                 8d147537fb7d

docker save -o pause.tar                                     ed210e3e4a5b

打包镜像:

tar cf kube_images.tar -C kube_images/ .

将kube_images.tar 传到内网服务器,并解压,安装。

导入镜像:

docker load -i   kube-apiserver.tar  

批量导入:

mkdir kube_images 

tar xf  kube_images.tar  -C kube_images

cd kube_images

for i in ./*.tar ; do docker load -i “$i”;done

给镜像打标签:

vim im_tag.txt

registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver            v1.22.1   f30469a2491a   2 years ago     128MB

registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler            v1.22.1   aca5ededae9c   2 years ago     52.7MB

registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy                v1.22.1   36c4ebbc9d97   2 years ago     104MB

registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager   v1.22.1   6e002eb89a88   2 years ago     122MB

registry.cn-hangzhou.aliyuncs.com/google_containers/etcd                      3.5.0-0   004811815584   2 years ago     295MB

registry.cn-hangzhou.aliyuncs.com/google_containers/coredns                   v1.8.4    8d147537fb7d   2 years ago     47.6MB

registry.cn-hangzhou.aliyuncs.com/google_containers/pause                     3.5       ed210e3e4a5b   2 years ago     683kB

vim docker_tag.sh


#!/bin/bash
# 从im_tag.txt文件中读取镜像信息到数组images
mapfile -t images < im_tag.txt
# 循环遍历数组中的每个元素
for image_info in "${images[@]}"; do
  # 使用awk分割每个元素,第1列是目标标签,第3列是源镜像ID
  target_image_name=$(echo "${image_info}" | awk '{print $1}')
  target_image_version=$(echo "${image_info}" | awk '{print $2}')
  target_image_tag=${target_image_name}:${target_image_version}
  source_image_id=$(echo "${image_info}" | awk '{print $3}')
  # 使用docker tag命令为源镜像ID添加目标标签
  docker tag "${source_image_id}" "${target_image_tag}"
  echo "已为镜像 ${source_image_id} 添加标签 ${target_image_tag}"
done
echo "标签添加完成"

脚本注释:

        mapfile 命令从 im_tag.txt 文件中读取行并将它们存储在名为 images 的数组中。 -t 选项表示删除行尾的换行符。

      ${images[@]}  当中的 @ 表示展开数组的语法。在Bash中,@ 用于将数组中的元素展开为一个由空格分隔的列表,每个元素作为一个独立的项。具体来说, ${images[@]} 会将数组 images 中的所有元素分别展开,并用空格分隔它们,每个元素作为一个独立的项,这使得你可以像处理普通的空格分隔字符串一样处理数组的元素。例如,如果数组 images 包含以下元素:  images=(“image1” “image2” “image3″),  ${images[@]} 将会展开成以下形式:”image1” “image2” “image3″,

这使得你可以在循环、赋值等上下文中方便地处理数组中的每个元素,就好像它们是单独的字符串。在很多情况下,${images[@]} 可以用来遍历数组,执行操作,或者将数组中的元素传递给命令等。

三、部署kube集群:

在第一个 master 节点运行 kubeadm init 初始化命令

kubeadm init –control-plane-endpoint k8s.zspt.com  –kubernetes-version=v1.22.1 –pod-network-cidr 172.17.0.0/16 –service-cidr 10.10.0.0/16 –service-dns-domain zspt.com –image-repository registry.cn-hangzhou.aliyuncs.com/google_containers –token-ttl=0

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:

  kubeadm join k8s.zspt.com:6443 --token wesgr0.h5vieh75mn87zqzk \
        --discovery-token-ca-cert-hash sha256:aa1b7329732dc2f5bf890e6e7fe186a6d71a5dd1a58054902320c97234f4e6df \
        --control-plane

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join k8s.zspt.com:6443 --token wesgr0.h5vieh75mn87zqzk \
        --discovery-token-ca-cert-hash sha256:aa1b7329732dc2f5bf890e6e7fe186a6d71a5dd1a58054902320c97234f4e6df

其他master节点加入集群:

1、在第一个master节点执行:

root@k8s-master-1:/opt# kubeadm init phase upload-certs –upload-certs

W1206 18:24:24.000876  750987 version.go:103] could not fetch a Kubernetes version from the internet: unable to get URL “https://dl.k8s.io/release/stable-1.txt“: Get “https://dl.k8s.io/release/stable-1.txt“: context deadline exceeded (Client.Timeout exceeded while awaiting headers)

W1206 18:24:24.001012  750987 version.go:104] falling back to the local client version: v1.22.1

[upload-certs] Storing the certificates in Secret “kubeadm-certs” in the “kube-system” Namespace

[upload-certs] Using certificate key:

f2979ead43a09c4363ba7bc8c1e24eec1d007d682763db0eedda5a223e10af92

2、在其他master节点执行命令,加入集群:

结合初始化成功后提示的命令: kubeadm join命令和kubeadm init 产生的结果拼接起来

  kubeadm join k8s.zspt.com:6443 –token wesgr0.h5vieh75mn87zqzk \

        –discovery-token-ca-cert-hash sha256:aa1b7329732dc2f5bf890e6e7fe186a6d71a5dd1a58054902320c97234f4e6df \

        –control-plane  –certificate-key f2979ead43a09c4363ba7bc8c1e24eec1d007d682763db0eedda5a223e10af92

执行结果:

This node has joined the cluster and a new control plane instance was created:

* Certificate signing request was sent to apiserver and approval was received.

* The Kubelet was informed of the new secure connection details.

* Control plane (master) label and taint were applied to the new node.

* The Kubernetes control plane instances scaled up.

* A new etcd member was added to the local/stacked etcd cluster.

To start administering your cluster from this node, you need to run the following as a regular user:

        mkdir -p $HOME/.kube

        sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

        sudo chown $(id -u):$(id -g) $HOME/.kube/config

Run ‘kubectl get nodes’ to see this node join the cluster.

3、查看结果:

root@k8s-master-1:/opt# kubectl get node

NAME                    STATUS     ROLES                  AGE     VERSION

k8s-master-1.zspt.com   NotReady   control-plane,master   26m     v1.22.1

k8s-master-2.zspt.com   NotReady   control-plane,master   3m37s   v1.22.1

k8s-master-3.zspt.com   NotReady   control-plane,master   25s     v1.22.1

安装cilium网络插件

在其中一个master节点运行:

curl -LO https://github.com/cilium/cilium-cli/releases/download/v0.15.12/cilium-linux-amd64.tar.gz

tar xf cilium-linux-amd64.tar.gz

mv cilium   /usr/local/bin/cilium

#使用默认的隧道模式,以及VXLAN协议,但自定义了地址分配模式和PodCIDR

cilium install \
  --set kubeProxyReplacement=strict \
  --set ipam.mode=kubernetes \
  --set ipam.operator.clusterPoolIPv4PodCIDRList=172.17.0.0/16 \
  --set ipam.Operator.ClusterPoolIPv4MaskSize=24

# 功能同上,但明确指明了路由模式和使用的协议,将vxlan换成geneve即可换用隧道协议

cilium install \
--set kubeProxyReplacement=strict \
--set ipam.mode=kubernetes \
--set routingMode=tunnel \
--set tunnelProtocol=vxlan \
--set ipam.operator.clusterPoolIPv4PodCIDRList=172.17.0.0/16 \
--set ipam.Operator.ClusterPoolIPv4MaskSize=2

# 使用原生路由(native routing)模式,

cilium install \
--set kubeProxyReplacement=strict \
--set ipam.mode=kubernetes \
--set routingMode=native \
--set ipam.operator.clusterPoolIPv4PodCIDRList=172.17.0.0/16 \
--set ipam.Operator.ClusterPoolIPv4MaskSize=24 \
--set ipv4NativeRoutingCIDR=172.17.0.0/16 \
--set autoDirectNodeRoutes=true

安装cilium及hubble及prometheus插件
cilium install \
  --set kubeProxyReplacement=strict \
  --set ipam.mode=kubernetes \
  --set routingMode=native \
  --set ipam.operator.clusterPoolIPv4PodCIDRList=172.17.0.0/16 \
  --set ipam.Operator.ClusterPoolIPv4MaskSize=24 \
  --set ipv4NativeRoutingCIDR=172.17.0.0/16 \
  --set hubble.relay.enabled="true" \
  --set autoDirectNodeRoutes=true \
  --set prometheus.enabled=true \
  --set operator.prometheus.enabled=true \
  --set hubble.enabled="true" \
  --set hubble.metrics.enableOpenMetrics=true \
  --set hubble.ui.enabled="true" \
  --set hubble.relay.enabled="true" \
  --set hubble.metrics.enabled="{dns,drop,tcp,flow,port-distribution,icmp,httpV2:exemplars=true;labelsContext=source_ip\,source_namespace\,source_workload\,destination_ip\,destination_namespace\,destination_workload\,traffic_direction}"

禁用kube-proxy

  kubectl edit ds kube-proxy -n kube-system

# Please edit the object below. Lines beginning with a '#' will be ignored,
# and an empty file will abort the edit. If an error occurs while saving this file will be
# reopened with the relevant failures.
#
# daemonsets.apps "kube-proxy" was not valid:
# * spec.template.metadata.labels: Invalid value: map[string]string{"k8s-app":"kube-proxy111"}: `selector` does not match template `labels`
# * spec.template.metadata.labels: Invalid value: map[string]string{"k8s-app":"kube-proxy111"}: `selector` does not match template `labels`
# * spec.selector: Invalid value: v1.LabelSelector{MatchLabels:map[string]string{"k8s-app":"kube-proxy222"}, MatchExpressions:[]v1.LabelSelectorRequirement(nil)}: field is immutable
#
apiVersion: apps/v1
kind: DaemonSet
metadata:
  annotations:
    deprecated.daemonset.template.generation: "1"
  creationTimestamp: "2023-12-06T10:06:52Z"
  generation: 1
  labels:
    k8s-app: kube-proxy
  name: kube-proxy
  namespace: kube-system
  resourceVersion: "6503058"
  uid: 62ab2752-74eb-47f6-ae56-ef44fff48b72
spec:
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kube-proxy
  template:
    metadata:
      creationTimestamp: null
      labels:
        k8s-app: kube-proxy
    spec:
      containers:
      - command:
        - /usr/local/bin/kube-proxy
        - --config=/var/lib/kube-proxy/config.conf
        - --hostname-override=$(NODE_NAME)
        env:
        - name: NODE_NAME
          valueFrom:
            fieldRef:
              apiVersion: v1
              fieldPath: spec.nodeName
        image: registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.22.1
        imagePullPolicy: IfNotPresent
        name: kube-proxy
        resources: {}
        securityContext:
          privileged: true
        terminationMessagePath: /dev/termination-log
        terminationMessagePolicy: File
        volumeMounts:
        - mountPath: /var/lib/kube-proxy
          name: kube-proxy
        - mountPath: /run/xtables.lock
          name: xtables-lock
        - mountPath: /lib/modules
          name: lib-modules
          readOnly: true
      dnsPolicy: ClusterFirst
      hostNetwork: true
      nodeSelector:
        kubernetes.io/os: linux   #改为linux 111
      priorityClassName: system-node-critical
      restartPolicy: Always
      schedulerName: default-scheduler
      securityContext: {}
      serviceAccount: kube-proxy
      serviceAccountName: kube-proxy
      terminationGracePeriodSeconds: 30
      tolerations:
      - operator: Exists
      volumes:
      - configMap:
          defaultMode: 420
          name: kube-proxy
        name: kube-proxy
      - hostPath:
          path: /run/xtables.lock
          type: FileOrCreate
        name: xtables-lock
      - hostPath:
          path: /lib/modules
          type: ""
        name: lib-modules
  updateStrategy:
    rollingUpdate:
      maxSurge: 0
      maxUnavailable: 1
    type: RollingUpdate
status:
  currentNumberScheduled: 6
  desiredNumberScheduled: 6
  numberAvailable: 6
  numberMisscheduled: 0
  numberReady: 6
  observedGeneration: 1
  updatedNumberScheduled: 6

测试

kubectl create deploy demoapp –image=ikubernetes/demoapp:v1.0 –replicas=3

kubectl create service nodeport demoapp –tcp=80:80

发表回复

您的邮箱地址不会被公开。 必填项已用 * 标注