您的位置:首页 > 文旅 > 旅游 > 建站推荐_优化什么建立生育支持政策体系_微博营销软件_网站建设开发

建站推荐_优化什么建立生育支持政策体系_微博营销软件_网站建设开发

2025/4/4 23:09:08 来源:https://blog.csdn.net/qq_70531838/article/details/143226733  浏览:    关键词:建站推荐_优化什么建立生育支持政策体系_微博营销软件_网站建设开发
建站推荐_优化什么建立生育支持政策体系_微博营销软件_网站建设开发

官网:  安装 K8s 集群 | Sealos: 专为云原生开发打造的以 K8s 为内核的云操作系统

1、sealos工具下载

二进制自动下载

VERSION=`curl -s https://api.github.com/repos/labring/sealos/releases/latest | grep -oE '"tag_name": "[^"]+"' | head -n1 | cut -d'"' -f4`

 curl -sfL https://mirror.ghproxy.com/https://raw.githubusercontent.com/labring/sealos/main/scripts/install.sh | PROXY_PREFIX=https://mirror.ghproxy.com sh -s ${VERSION} labring/sealos
 

rpm源管理安装

[root@k8s-master1 ~]# uname -m      #检查架构
x86_64
 

sudo cat > /etc/yum.repos.d/labring.repo << EOF
[fury]
name=labring Yum Repo
baseurl=https://yum.fury.io/labring/
enabled=1
gpgcheck=0
EOF


sudo yum clean all
sudo yum install sealos

2、 k8s环境部署

官方建议:

K8s 的小版本号越高,集群越稳定。例如 v1.28.x,其中的 x 就是小版本号。建议使用小版本号比较高的 K8s 版本。到本文截止时间为止,v1.27 最高的版本号是 v1.27.7,而 v1.28 最高的版本号是 v1.28.3,所以建议使用 v1.27.7。你需要根据实际情况来选择最佳的 K8s 版本

个人踩坑:

因为本次使用的CNI是cilium,因此对内核的版本要求会高些,这里需要先升级内核的。(一般是5以上就行)

[root@k8s-master1 ~]# uname -r
5.4.278-1.el7.elrepo.x86_64

安装 K8s 单机版

# sealos version must >= v4.1.0
$ sealos run registry.cn-shanghai.aliyuncs.com/labring/kubernetes:v1.27.7 registry.cn-shanghai.aliyuncs.com/labring/helm:v3.9.4 registry.cn-shanghai.aliyuncs.com/labring/cilium:v1.13.4 --single

安装 K8s 集群

#执行之前各节点记得做免密钥

$ sealos run registry.cn-shanghai.aliyuncs.com/labring/kubernetes:v1.27.7 registry.cn-shanghai.aliyuncs.com/labring/helm:v3.9.4 registry.cn-shanghai.aliyuncs.com/labring/cilium:v1.13.4 \
     --masters 192.168.64.2,192.168.64.22,192.168.64.20 \
     --nodes 192.168.64.21,192.168.64.19 -p [your-ssh-passwd]

集群

[root@k8s-master1 ~]# kubectl get all -A
NAMESPACE     NAME                                      READY   STATUS    RESTARTS      AGE
kube-system   pod/cilium-5ts7s                          1/1     Running   0             24m
kube-system   pod/cilium-9m2tj                          1/1     Running   0             24m
kube-system   pod/cilium-hvsnp                          1/1     Running   0             24m
kube-system   pod/cilium-mplw9                          1/1     Running   0             24m
kube-system   pod/cilium-operator-86666d88cb-77p6r      1/1     Running   0             24m
kube-system   pod/cilium-tk8ks                          1/1     Running   0             24m
kube-system   pod/coredns-5d78c9869d-h6vrm              1/1     Running   0             25m
kube-system   pod/coredns-5d78c9869d-v762n              1/1     Running   0             25m
kube-system   pod/etcd-k8s-master1                      1/1     Running   0             25m
kube-system   pod/etcd-k8s-master2                      1/1     Running   0             25m
kube-system   pod/etcd-k8s-master3                      1/1     Running   0             24m
kube-system   pod/kube-apiserver-k8s-master1            1/1     Running   0             25m
kube-system   pod/kube-apiserver-k8s-master2            1/1     Running   0             25m
kube-system   pod/kube-apiserver-k8s-master3            1/1     Running   0             24m
kube-system   pod/kube-controller-manager-k8s-master1   1/1     Running   1 (25m ago)   25m
kube-system   pod/kube-controller-manager-k8s-master2   1/1     Running   0             25m
kube-system   pod/kube-controller-manager-k8s-master3   1/1     Running   0             24m
kube-system   pod/kube-proxy-4sp89                      1/1     Running   0             25m
kube-system   pod/kube-proxy-ccn7q                      1/1     Running   0             24m
kube-system   pod/kube-proxy-j2h9w                      1/1     Running   0             24m
kube-system   pod/kube-proxy-pd6tj                      1/1     Running   0             24m
kube-system   pod/kube-proxy-tjgx7                      1/1     Running   0             25m
kube-system   pod/kube-scheduler-k8s-master1            1/1     Running   1 (25m ago)   25m
kube-system   pod/kube-scheduler-k8s-master2            1/1     Running   0             25m
kube-system   pod/kube-scheduler-k8s-master3            1/1     Running   0             24m
kube-system   pod/kube-sealos-lvscare-k8s-node1         1/1     Running   0             24m
kube-system   pod/kube-sealos-lvscare-k8s-node2         1/1     Running   0             24m

NAMESPACE     NAME                  TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)                  AGE
default       service/kubernetes    ClusterIP   10.96.0.1     <none>        443/TCP                  25m
kube-system   service/hubble-peer   ClusterIP   10.96.2.203   <none>        443/TCP                  24m
kube-system   service/kube-dns      ClusterIP   10.96.0.10    <none>        53/UDP,53/TCP,9153/TCP   25m

NAMESPACE     NAME                        DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR            AGE
kube-system   daemonset.apps/cilium       5         5         5       5            5           kubernetes.io/os=linux   24m
kube-system   daemonset.apps/kube-proxy   5         5         5       5            5           kubernetes.io/os=linux   25m

NAMESPACE     NAME                              READY   UP-TO-DATE   AVAILABLE   AGE
kube-system   deployment.apps/cilium-operator   1/1     1            1           24m
kube-system   deployment.apps/coredns           2/2     2            2           25m

NAMESPACE     NAME                                         DESIRED   CURRENT   READY   AGE
kube-system   replicaset.apps/cilium-operator-86666d88cb   1         1         1       24m
kube-system   replicaset.apps/coredns-5d78c9869d           2         2         2       25m
 

报错记录: 

Error: cluster status is not ClusterSuccess

sealos reset

panic: arch /root/.bashrc: line 13: kubectl: command not found
x86_64 not yet supported, feel free to file an issue

goroutine 1 [running]:
github.com/labring/sealos/pkg/apply.getHostArch.func1({0xc0005a2ec0?, 0xc0000134e8?})
        github.com/labring/sealos/pkg/apply/utils.go:104 +0x174
github.com/labring/sealos/pkg/apply.GetHostArch({0x44c4cb0?, 0xc0000134e8?}, {0xc0005a2ec0, 0x10})
        github.com/labring/sealos/pkg/apply/utils.go:113 +0x3f
github.com/labring/sealos/pkg/apply.(*ClusterArgs).runArgs(0xc00083fc80, 0x40082a7?, 0xc0008ff440, {0xc0005ad440, 0x3, 0x4})
        github.com/labring/sealos/pkg/apply/run.go:140 +0x705
github.com/labring/sealos/pkg/apply.NewApplierFromArgs(0xc000177800, 0xc0008ff440, {0xc0005ad440, 0x3, 0x4})
        github.com/labring/sealos/pkg/apply/run.go:72 +0x48c
github.com/labring/sealos/cmd/sealos/cmd.newRunCmd.func1(0xc000177800?, {0xc000388bd0?, 0x9?, 0x9?})
        github.com/labring/sealos/cmd/sealos/cmd/run.go:73 +0x65
github.com/spf13/cobra.(*Command).execute(0xc000177800, {0xc000388b40, 0x9, 0x9})
        github.com/spf13/cobra@v1.7.0/command.go:940 +0x862
github.com/spf13/cobra.(*Command).ExecuteC(0x5c6d560)
        github.com/spf13/cobra@v1.7.0/command.go:1068 +0x3bd
github.com/spf13/cobra.(*Command).Execute(...)
        github.com/spf13/cobra@v1.7.0/command.go:992
github.com/labring/sealos/cmd/sealos/cmd.Execute()
        github.com/labring/sealos/cmd/sealos/cmd/root.go:49 +0x25
main.main()
        github.com/labring/sealos/cmd/sealos/main.go:27 +0x29

检查各个节点的/root/.bashrc下的13行删除

#可选,下载kubectl

curl -LO "https://dl.k8s.io/release/v1.27.7/bin/linux/amd64/kubectl"
chmod +x kubectl
sudo mv kubectl /usr/bin/

版权声明:

本网仅为发布的内容提供存储空间,不对发表、转载的内容提供任何形式的保证。凡本网注明“来源:XXX网络”的作品,均转载自其它媒体,著作权归作者所有,商业转载请联系作者获得授权,非商业转载请注明出处。

我们尊重并感谢每一位作者,均已注明文章来源和作者。如因作品内容、版权或其它问题,请及时与我们联系,联系邮箱:809451989@qq.com,投稿邮箱:809451989@qq.com