您的位置:首页 > 科技 > 能源 > 软件开发工具手机版_北京王府井美食_建网站软件_南宁seo关键词排名

软件开发工具手机版_北京王府井美食_建网站软件_南宁seo关键词排名

2025/2/24 7:50:37 来源:https://blog.csdn.net/m0_56363537/article/details/144027159  浏览:    关键词:软件开发工具手机版_北京王府井美食_建网站软件_南宁seo关键词排名
软件开发工具手机版_北京王府井美食_建网站软件_南宁seo关键词排名

k8s

    • 环境初始化
    • 安装Harbor
    • 安装k8s
    • 安装istio和kubevirt

使用脚本部署k8s1.25版本平台,网络插件使用flannel ,容器运行时ctr,部署包括harbor仓库,服务网格、kubevirt服务等

使用的centos7.9资源配置如下:

主机IP资源
master192.168.200.1004C_8G_100G
node192.168.200.1014C_8G_100G

环境初始化

两边节点修改主机名

hostnamectl set-hostname master
hostnamectl set-hostname node
cat >> /etc/hosts <<eof
192.168.200.100 master
192.168.200.101 node
eof

mastr节点配置centos源

mkdir /opt/centos
mount /dev/sr0 //opt/centos/
rm -rf /etc/yum.repos.d/*

配置repo仓库

vi /etc/yum.repos.d/local.repo
[local]
name=local
gpgcheck=0
enabled=1
baseurl=file:///opt/centos
[k8s]
name=k8s
gpgcheck=0
enabled=1
baseurl=file:///opt/kubernetes-repo

安装Harbor

mount kubernetes_v2.1.iso /mnt/
cp -rf /mnt/* /opt/
cd /opt/
#!/bin/bash# 配置免密钥
ALL_SERVER_ROOT_PASSWORD=000000
all_hosts=`cat /etc/hosts |awk '{print $1}' |sed '/::1/d'|sort -u`
all_hostname=`cat /etc/hosts |awk '{print $2}' |sort -u`
a_hosts="$all_hosts  $all_hostname"
my_ip=`ip a |grep -w "inet" |awk '{print $2}'|sed 's/\/.*//g'`
other_ip=$all_hosts
for i in $my_ip;do other_ip=`echo $other_ip |sed "s/$i//g"`;doneyum install -y expect
if [[ ! -s ~/.ssh/id_rsa.pub ]];thenssh-keygen  -t rsa -N '' -f ~/.ssh/id_rsa -q -b 2048
fi
for hosts in $a_hosts; doping $hosts -c 4 >> /dev/null 2>&1if [ 0  -ne  $? ]; thenecho -e "\033[31mWarning\n$hosts IP unreachable!\033[0m"fiexpect -c "set timeout -1;spawn ssh-copy-id  -i /root/.ssh/id_rsa  $hosts ;expect {*(yes/no)* {send -- yes\r;exp_continue;}*assword:* {send -- $ALL_SERVER_ROOT_PASSWORD\r;exp_continue;}eof        {exit 0;}}";
done# 配置 时间同步
IP=`ip addr | grep 'state UP' -A2 | grep inet | egrep -v '(127.0.0.1|inet6|docker)' | awk '{print $2}' | tr -d "addr:" | head -n 1 | cut -d / -f1`
yum install -y chrony
sed -i '3,6s/^/#/g' /etc/chrony.conf
sed -i "7s|^|server $IP iburst|g" /etc/chrony.conf
echo "allow all" >> /etc/chrony.conf
echo "local stratum 10" >> /etc/chrony.conf
systemctl restart chronyd
systemctl enable chronyd
timedatectl set-ntp true
sleep 5
systemctl restart chronyd
chronyc sources# 关闭防火墙以及selinux
systemctl stop firewalld && systemctl disable firewalld
sed -i 's/SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
setenforce 0# 关闭swap分区
swapoff -a
sed -i 's/.*swap.*/#&/' /etc/fstab
echo -e "nameserver 114.114.114.114" > /etc/resolv.conf
echo -e "overlay\nbr_netfilter" > /etc/modules-load.d/containerd.conf
modprobe -- overlay
modprobe -- br_netfilter
cat > /etc/sysctl.d/kubernetes.conf << EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl -p /etc/sysctl.d/kubernetes.conf# 安装 Docker-ce
yum install -y yum-utils device-mapper-persistent-data lvm2
yum install -y docker-ce
systemctl enable docker
systemctl start docker# 修改 Docker Cgroup Driver为systemd
tee /etc/docker/daemon.json <<EOF
{"insecure-registries" : ["0.0.0.0/0"],"registry-mirrors": ["https://d8b3zdiw.mirror.aliyuncs.com"],"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
systemctl restart docker# 安装docker-compose
chmod +x /opt/docker-compose/v2.10.2-docker-compose-linux-x86_64
mv /opt/docker-compose/v2.10.2-docker-compose-linux-x86_64 /usr/local/bin/docker-compose# 导入镜像
for i in $(ls /opt/images|grep tar)
dodocker load -i /opt/images/$i
done# 安装Harbor仓库
IP=`ip addr | grep 'state UP' -A2 | grep inet | egrep -v '(127.0.0.1|inet6|docker)' | awk '{print $2}' | tr -d "addr:" | head -n 1 | cut -d / -f1`
cd /opt/harbor/
tar -zxvf harbor-offline-installer-v2.5.3.tgz
cd harbor
mv harbor.yml.tmpl harbor.yml
sed -i "5s/reg.mydomain.com/${IP}/g" harbor.yml
sed -i "13s/^/#/g" harbor.yml
sed -i "15,18s/^/#/g" harbor.yml
docker load -i harbor.v2.5.3.tar.gz
./prepare || exit
./install.sh || exit
sleep 5
docker-compose ps
echo "请在浏览器通过http://${IP}访问Harbor"
[root@localhost opt]# ./k8s_harbor_install.sh

搭建完成后界面IP访问Harbor:admin/Harbor12345

导入镜像

#!/bin/bash#--------------------------------------------
## 镜像上传说明
# 需要先在镜像仓库中创建 library 项目
# 根据实际情况更改以下私有仓库地址
#--------------------------------------------# 定义日志
workdir=`pwd`
log_file=${workdir}/sync_images_$(date +"%Y-%m-%d").loglogger()
{log=$1cur_time='['$(date +"%Y-%m-%d %H:%M:%S")']'echo ${cur_time} ${log} | tee -a ${log_file}
}images_hub() {while true;doread -p "输入镜像仓库地址(不加http/https): " registryread -p "输入镜像仓库用户名: " registry_userread -p "输入镜像仓库用户密码: " registry_passwordecho "您设置的仓库地址为: ${registry},用户名: ${registry_user},密码: xxx"read -p "是否确认(Y/N): " confirmif [ $confirm != Y ] && [ $confirm != y ] && [ $confirm == '' ]; thenecho "输入不能为空,重新输入"elsebreakfidone
}images_hubecho "镜像仓库 $(docker login -u ${registry_user} -p ${registry_password} ${registry})"images=$(docker images -a | grep -v TAG | grep -v goharbor | awk '{print $1 ":" $2}')#images=$(cat library-images.txt )# 定义全局项目,如果想把镜像全部同步到一个仓库,则指定一个全局项目名称;
global_namespace=librarydocker_push() {for imgs in $( echo "${images}" );doif [[ -n "$global_namespace" ]]; thenn=$(echo ${imgs} | awk -F"/" '{print NF-1}')# 如果镜像名中没有/,那么此镜像一定是library仓库的镜像;if [ ${n} -eq 0 ]; thenimg_tag=${imgs}#重命名镜像docker tag ${imgs} ${registry}/${global_namespace}/${img_tag}#删除原始镜像#docker rmi ${imgs}#上传镜像docker push ${registry}/${global_namespace}/${img_tag}# 如果镜像名中有一个/,那么/左侧为项目名,右侧为镜像名和tagelif [ ${n} -eq 1 ]; thenimg_tag=$(echo ${imgs} | awk -F"/" '{print $2}')#重命名镜像docker tag ${imgs} ${registry}/${global_namespace}/${img_tag}#删除旧镜像#docker rmi ${imgs}#上传镜像docker push ${registry}/${global_namespace}/${img_tag}# 如果镜像名中有两个/,elif [ ${n} -eq 2 ]; thenimg_tag=$(echo ${imgs} | awk -F"/" '{print $3}')#重命名镜像docker tag ${imgs} ${registry}/${global_namespace}/${img_tag}#删除旧镜像#docker rmi ${imgs}#上传镜像docker push ${registry}/${global_namespace}/${img_tag}else#标准镜像为四层结构,即:仓库地址/项目名/镜像名:tag,如不符合此标准,即为非有效镜像。echo "No available images"fielsen=$(echo ${imgs} | awk -F"/" '{print NF-1}')# 如果镜像名中没有/,那么此镜像一定是library仓库的镜像;if [ ${n} -eq 0 ]; thenimg_tag=${imgs}namespace_1=librarynamespace_2=library#重命名镜像docker tag ${imgs} ${registry}/${namespace_1}/${img_tag}docker tag ${imgs} ${registry}/${namespace_2}/${img_tag}#删除原始镜像#docker rmi ${imgs}#上传镜像docker push ${registry}/${namespace_1}/${img_tag}docker push ${registry}/${namespace_2}/${img_tag}# 如果镜像名中有一个/,那么/左侧为项目名,右侧为镜像名和tagelif [ ${n} -eq 1 ]; thenimg_tag=$(echo ${imgs} | awk -F"/" '{print $2}')namespace=$(echo ${imgs} | awk -F"/" '{print $1}')#重命名镜像docker tag ${imgs} ${registry}/${namespace}/${img_tag}#删除旧镜像#docker rmi ${imgs}#上传镜像docker push ${registry}/${namespace}/${img_tag}# 如果镜像名中有两个/,elif [ ${n} -eq 2 ]; thenimg_tag=$(echo ${imgs} | awk -F"/" '{print $3}')namespace=$(echo ${imgs} | awk -F"/" '{print $2}')#重命名镜像docker tag ${imgs} ${registry}/${namespace}/${img_tag}#删除旧镜像#docker rmi ${imgs}#上传镜像docker push ${registry}/${namespace}/${img_tag}else#标准镜像为四层结构,即:仓库地址/项目名/镜像名:tag,如不符合此标准,即为非有效镜像。echo "No available images"fifidone
}docker_push
[root@localhost opt]# ./k8s_image_push.sh
输入镜像仓库地址(不加http/https): 192.168.200.100
输入镜像仓库用户名: admin
输入镜像仓库用户密码: Harbor12345
您设置的仓库地址为: 192.168.200.100,用户名: admin,密码: xxx
是否确认(Y/N): Y

安装k8s

[root@localhost opt]# cat k8s_master_install.sh
#!/bin/bash# 安装 Kubeadm
yum install -y kubeadm-1.25.0 kubelet-1.25.0 kubectl-1.25.0systemctl enable kubelet
systemctl start kubelet
docker -v
kubelet --version# 部署依赖
IP=`ip addr | grep 'state UP' -A2 | grep inet | egrep -v '(127.0.0.1|inet6|docker)' | awk '{print $2}' | tr -d "addr:" | head -n 1 | cut -d / -f1`tar -zxvf /opt/cri/crictl-v1.25.0-linux-amd64.tar.gz -C /usr/local/bin/
containerd config default > /etc/containerd/config.toml
sed -ri -e 's/(.*SystemdCgroup = ).*/\1true/' -e "s@(.*sandbox_image = ).*@\1\'$IP/library/pause:3.8\'@" /etc/containerd/config.toml
sed -i -e "/.*registry.mirrors.*/a\        [plugins.\"io.containerd.grpc.v1.cri\".registry.mirrors.\"docker.io\"]\n          endpoint = [\"https://registry.docker-cn.com\" ,\"http://hub-mirror.c.163.com\" ,\"https://docker.mirrors.ustc.edu.cn\"]\n        [plugins.\"io.containerd.grpc.v1.cri\".registry.mirrors.\"$IP\"]\n          endpoint = [\"http://$IP\"]" -e "/.*registry.configs.*/a\        [plugins.\"io.containerd.grpc.v1.cri\".registry.configs.\"$IP\".tls]\n          insecure_skMASTER_IP_verify = true\n        [plugins.\"io.containerd.grpc.v1.cri\".registry.configs.\"$IP\".auth]\n          username = \"admin\"\n          password = \"Harbor12345\"" /etc/containerd/config.tomlcat > /etc/crictl.yaml <<EOF
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
debug: false
EOFtar -zxvf /opt/cri/nerdctl-0.23.0-linux-amd64.tar.gz -C /usr/local/bin/
mkdir -p /etc/nerdctl/cat > /etc/nerdctl/nerdctl.toml <<EOF
namespace      = "k8s.io"
insecure_registry = true
EOFtar -zxvf /opt/cri/buildkit-v0.10.4.linux-amd64.tar.gz -C /usr/local/
cat > /usr/lib/systemd/system/buildkit.socket <<EOF
[Unit]
Description=BuildKit
Documentation=https://github.com/moby/buildkit[Socket]
ListenStream=%t/buildkit/buildkitd.sock
SocketMode=0660[Install]
WantedBy=sockets.target
EOF
cat > /usr/lib/systemd/system/buildkit.service << EOF
[Unit]
Description=BuildKit
Requires=buildkit.socket
After=buildkit.socket
Documentation=https://github.com/moby/buildkit[Service]
Type=notify
ExecStart=/usr/local/bin/buildkitd --addr fd://[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload && systemctl enable buildkit && systemctl start buildkit
systemctl daemon-reload && systemctl restart containerd && systemctl enable --now containerdctr version && crictl version && runc -version && buildctl --version && nerdctl versioncd /opt/harbor/harbor && docker-compose restart &> /dev/null && cd ~
nerdctl login -u admin -pHarbor12345 $IP# 初始化 master 节点
kubeadm init --kubernetes-version=1.25.0 --apiserver-advertise-address=$IP --image-repository $IP/library --pod-network-cidr=10.244.0.0/16
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
sleep 50
kubectl get pod -n kube-system -owide# 部署 flannel网络
eval sed -i 's@docker.io/flannel@$IP/library@g' /opt/yaml/flannel/kube-flannel.yaml
kubectl apply -f /opt/yaml/flannel/kube-flannel.yaml
sleep 20# 部署dashboard
mkdir /opt/dashboard-certs
cd /opt/dashboard-certs/
kubectl create namespace kubernetes-dashboard
openssl genrsa -out dashboard.key 2048
openssl req -days 36000 -new -out dashboard.csr -key dashboard.key -subj '/CN=dashboard-cert'
openssl x509 -req -in dashboard.csr -signkey dashboard.key -out dashboard.crt
kubectl create secret generic kubernetes-dashboard-certs --from-file=dashboard.key --from-file=dashboard.crt -n kubernetes-dashboard
sed -i "s/kubernetesui/$IP\/library/g" /opt/yaml/dashboard/recommended.yaml
kubectl apply -f /opt/yaml/dashboard/recommended.yaml
kubectl apply -f /opt/yaml/dashboard/dashadmin-user.yaml# 删除污点
kubectl taint nodes master node-role.kubernetes.io/control-plane-# 登录信息
token=`kubectl -n kubernetes-dashboard create token admin-user`
echo ""
echo ""
echo ""
echo "dashboard地址:https://$IP:30001"
echo "登录令牌:$token"
[root@localhost opt]# ./k8s_master_install.sh

将node脚本传给node节点

scp /opt/k8s_node_install.sh node:/root/

配置vsftp

yum install -y vsftpd
echo "anon_root=/opt" >> /etc/vsftpd/vsftpd.conf
systemctl restart vsftpd

node节点配置环境

rm -rf /etc/yum.repos.d/*
vi /etc/yum.repos.d/local.repo
[centos]
name=centos
gpgcheck=0
enabled=1
baseurl=ftp://192.168.200.100/centos
[k8s]
name=k8s
gpgcheck=0
enabled=1
baseurl=ftp://192.168.200.100/kubernetes-repo

node节点加入k8s集群

[root@localhost ~]# ls
anaconda-ks.cfg  k8s_node_install.sh
[root@localhost ~]# ./k8s_node_install.sh
[root@master opt]# kubectl get nodes
NAME     STATUS   ROLES           AGE     VERSION
master   Ready    control-plane   14m     v1.25.0
node     Ready    <none>          4m30s   v1.25.0
[root@master opt]# kubectl get pod -A
NAMESPACE              NAME                                         READY   STATUS    RESTARTS   AGE
kube-flannel           kube-flannel-ds-275nd                        1/1     Running   0          4m48s
kube-flannel           kube-flannel-ds-jlkz7                        1/1     Running   0          14m
kube-system            coredns-76bf7f8764-gcggn                     1/1     Running   0          14m
kube-system            coredns-76bf7f8764-qwz69                     1/1     Running   0          14m
kube-system            etcd-master                                  1/1     Running   0          15m
kube-system            kube-apiserver-master                        1/1     Running   0          15m
kube-system            kube-controller-manager-master               1/1     Running   0          15m
kube-system            kube-proxy-5jvkl                             1/1     Running   0          14m
kube-system            kube-proxy-f9k7p                             1/1     Running   0          4m48s
kube-system            kube-scheduler-master                        1/1     Running   0          15m
kubernetes-dashboard   dashboard-metrics-scraper-7b645c4f85-q858n   1/1     Running   0          13m
kubernetes-dashboard   kubernetes-dashboard-568f4844dc-2tlhx        1/1     Running   0          13m

安装istio和kubevirt

#!/bin/bash#### 部署istio ####
install_istio(){nerdctl load -i /opt/project/images/istio_image.tartar -zxvf /opt/project/istio/istio-1.17.2-linux-amd64.tar.gz -C /opt/project/istiochmod +x /opt/project/istio/istio-1.17.2/bin/istioctlcp /opt/project/istio/istio-1.17.2/bin/istioctl /usr/local/bin/kubectl delete po -n kube-system `kubectl get po -n kube-system  | grep coredns | head -n 1 | awk '{print $1}'`istioctl install --set profile=demo -ykubectl apply -f /opt/project/istio/istio-1.17.2/samples/addons/sleep 15kubectl get all -n istio-system
}#### 部署kubevirt ####
install_kubevirt(){yum install -y qemu-kvm libvirt virt-install bridge-utilsnerdctl load -i /opt/project/images/kubevirt_image.tarkubectl apply -f /opt/project/kubevirt/deploy/kubevirt-operator.yamlkubectl apply -f /opt/project/kubevirt/deploy/kubevirt-cr.yamlkubectl apply -f /opt/project/kubevirt/deploy/multus-daemonset.yamlkubectl apply -f /opt/project/kubevirt/deploy/multus-cni-macvlan.yamlchmod +x /opt/project/kubevirt/tools/virtctl-v0.41.0-linux-amd64cp /opt/project/kubevirt/tools/virtctl-v0.41.0-linux-amd64 /usr/local/bin/virtctlsleep 15kubectl get all -n kubevirt
}## 进行判断
read -p "是否安装配置istio和kubevirt(Y/N): " answercase "$answer" iny|Y)install_istioinstall_kubevirt;;n|N)exit 0;;*)echo "请输入正确的选项"exit 1;;
esac
[root@master opt]# ./k8s_project_install.sh
是否安装配置istio和kubevirt(Y/N): Y
[root@master opt]# kubectl get pod -A
NAMESPACE              NAME                                         READY   STATUS    RESTARTS   AGE
istio-system           grafana-56bdf8bf85-g86cz                     1/1     Running   0          2m47s
istio-system           istio-egressgateway-85649899f8-clwmg         1/1     Running   0          4m9s
istio-system           istio-ingressgateway-f56888458-24ttk         1/1     Running   0          4m9s
istio-system           istiod-64848b6c78-64zqf                      1/1     Running   0          4m13s
istio-system           jaeger-76cd7c7566-46hvk                      1/1     Running   0          2m47s
istio-system           kiali-646db7568f-h9q6h                       1/1     Running   0          2m47s
istio-system           prometheus-85949fddb-k4ndc                   2/2     Running   0          2m47s
kube-flannel           kube-flannel-ds-275nd                        1/1     Running   0          11m
kube-flannel           kube-flannel-ds-jlkz7                        1/1     Running   0          20m
kube-system            coredns-76bf7f8764-qwz69                     1/1     Running   0          21m
kube-system            coredns-76bf7f8764-rkhk5                     1/1     Running   0          4m21s
kube-system            etcd-master                                  1/1     Running   0          21m
kube-system            kube-apiserver-master                        1/1     Running   0          21m
kube-system            kube-controller-manager-master               1/1     Running   0          21m
kube-system            kube-multus-ds-pjckn                         1/1     Running   0          113s
kube-system            kube-multus-ds-qn7lv                         1/1     Running   0          113s
kube-system            kube-proxy-5jvkl                             1/1     Running   0          21m
kube-system            kube-proxy-f9k7p                             1/1     Running   0          11m
kube-system            kube-scheduler-master                        1/1     Running   0          21m
kubernetes-dashboard   dashboard-metrics-scraper-7b645c4f85-q858n   1/1     Running   0          20m
kubernetes-dashboard   kubernetes-dashboard-568f4844dc-2tlhx        1/1     Running   0          20m
kubevirt               virt-api-6c4f849c9d-nntf6                    1/1     Running   0          90s
kubevirt               virt-api-6c4f849c9d-q78kk                    1/1     Running   0          90s
kubevirt               virt-controller-67b95d99d5-bhpts             1/1     Running   0          59s
kubevirt               virt-controller-67b95d99d5-xspc7             1/1     Running   0          59s
kubevirt               virt-handler-ptsk8                           1/1     Running   0          59s
kubevirt               virt-handler-vs5kh                           1/1     Running   0          59s
kubevirt               virt-operator-798f64bdf6-4t4nc               1/1     Running   0          116s
kubevirt               virt-operator-798f64bdf6-cqqr6               1/1     Running   0          116s

版权声明:

本网仅为发布的内容提供存储空间,不对发表、转载的内容提供任何形式的保证。凡本网注明“来源:XXX网络”的作品,均转载自其它媒体,著作权归作者所有,商业转载请联系作者获得授权,非商业转载请注明出处。

我们尊重并感谢每一位作者,均已注明文章来源和作者。如因作品内容、版权或其它问题,请及时与我们联系,联系邮箱:809451989@qq.com,投稿邮箱:809451989@qq.com