您的位置:首页 > 教育 > 锐评 > 大庆加油app老版本_公司起名字大全免费查询_b站2023年免费入口_每日关键词搜索排行

大庆加油app老版本_公司起名字大全免费查询_b站2023年免费入口_每日关键词搜索排行

2024/10/7 4:25:56 来源:https://blog.csdn.net/weixin_70754025/article/details/142183875  浏览:    关键词:大庆加油app老版本_公司起名字大全免费查询_b站2023年免费入口_每日关键词搜索排行
大庆加油app老版本_公司起名字大全免费查询_b站2023年免费入口_每日关键词搜索排行

9.12 k8s

calico的部署

# lsanaconda-ks.cfg  k8s-ha-install  kubeadm-config.yaml  new.yaml  token# 切换 git 分⽀[root@k8s-master ~]# cd k8s-ha-install/[root@k8s-master k8s-ha-install]# git checkout manual-installation-v1.28.x && cd calico/分支 manual-installation-v1.28.x 设置为跟踪来自 origin 的远程分支 manual-installation-v1.28.x。切换到一个新分支 'manual-installation-v1.28.x'[root@k8s-master calico]# lscalico.yaml[root@k8s-master calico]# pwd/root/k8s-ha-install/calico[root@k8s-master calico]# cat ~/new.yaml | grep SubpodSubnet: 172.16.0.0/16serviceSubnet: 10.96.0.0/16[root@k8s-master calico]# vim calico.yaml •# 修改配置文件,将文件中的POD_CIDR替换成172.16.0.0/16 4801               value: "172.16.0.0/16"•[root@k8s-master calico]# kubectl get po -ANAMESPACE     NAME                                 READY   STATUS    RESTARTS   AGEkube-system   coredns-6554b8b87f-m5wnb             0/1     Pending   0          94mkube-system   coredns-6554b8b87f-zz9cb             0/1     Pending   0          94mkube-system   etcd-k8s-master                      1/1     Running   0          94mkube-system   kube-apiserver-k8s-master            1/1     Running   0          94mkube-system   kube-controller-manager-k8s-master   1/1     Running   0          94mkube-system   kube-proxy-gtt6v                     1/1     Running   0          94mkube-system   kube-proxy-snr8v                     1/1     Running   0          59mkube-system   kube-proxy-z5hrs                     1/1     Running   0          59mkube-system   kube-scheduler-k8s-master            1/1     Running   0          94m•# 创建pod[root@k8s-master calico]# kubectl apply -f calico.yaml•# 查看日志[root@k8s-master calico]# kubectl logs calico-node-9jp9m -n kube-system•# 出现问题就去节点查看日志[root@k8s-node01 ~]# vim /var/log/messages

更新并重新启动,三台机器

# yum -y update
# reboot

查看容器和节点状态就差不多好了

# kubectl get nodes
NAME     STATUS   ROLES           AGE   VERSION
master   Ready    control-plane   19h   v1.28.2
node1    Ready    <none>          19h   v1.28.2
node2    Ready    <none>          19h   v1.28.2
# kubectl get po -A
NAMESPACE     NAME                                       READY   ST
kube-system   calico-kube-controllers-6d48795585-hm9q7   1/1     Ru
kube-system   calico-node-jcg6z                          1/1     Ru
kube-system   calico-node-kpjnw                          1/1     Ru
kube-system   calico-node-wkkcb                          1/1     Ru
kube-system   coredns-6554b8b87f-5lt5x                   1/1     Ru
kube-system   coredns-6554b8b87f-dqx6t                   1/1     Ru
kube-system   etcd-master                                1/1     Ru
kube-system   kube-apiserver-master                      1/1     Ru
kube-system   kube-controller-manager-master             1/1     Ru
kube-system   kube-proxy-5rwvt                           1/1     Ru
kube-system   kube-proxy-5x555                           1/1     Ru
kube-system   kube-proxy-g79tw                           1/1     Ru
kube-system   kube-scheduler-master                      1/1     

创建节点

 # 添加一个新的pod[root@k8s-master calico]# kubectl run nginx0 --image=nginxpod/nginx0 created•[root@k8s-master calico]# kubectl get po -Aowide|grep nginx•# 查看日志[root@k8s-master calico]# kubectl logs nginx0Error from server (BadRequest): container "nginx0" in pod "nginx0" is waiting to start: trying and failing to pull image

删除节点

 [root@k8s-master calico]# kubectl delete pod nginx0pod "nginx0" deleted[root@k8s-master calico]# kubectl get po -Aowide|grep nginx

Metrics 部署

复制证书到所有节点

# scp /etc/kubernetes/pki/front-proxy-ca.crt  node1:/etc/kubernetes
The authenticity of host 'node1 (192.168.1.12)' can't be establishe
ECDSA key fingerprint is SHA256:donghBpnwWMN6JmjNdCNwYJP179r2qC20tk
ECDSA key fingerprint is MD5:ec:83:ce:f2:5b:6c:ee:2a:04:80:86:48:ad
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'node1' (ECDSA) to the list of known hos
front-proxy-ca.crt                                                 
您在 /var/spool/mail/root 中有新邮件
# scp /etc/kubernetes/pki/front-proxy-ca.crt  node2:
The authenticity of host 'node2 (192.168.1.13)' can't be establishe
ECDSA key fingerprint is SHA256:donghBpnwWMN6JmjNdCNwYJP179r2qC20tk
ECDSA key fingerprint is MD5:ec:83:ce:f2:5b:6c:ee:2a:04:80:86:48:ad
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'node2' (ECDSA) to the list of known hos
front-proxy-ca.crt  

安装metrics server

 [root@k8s-master ~]# ls components.yaml components.yaml[root@k8s-master ~]# mkdir pods[root@k8s-master ~]# mv components.yaml pods/[root@k8s-master ~]# cd pods/[root@k8s-master pods]# lscomponents.yaml[root@k8s-master pods]# cat components.yaml | wc -l202•# 添加metric server的pod资源[root@k8s-master pods]# kubectl create -f components.yaml •# 在kube-system命名空间下查看metrics server的pod运⾏状态[root@k8s-master pods]# kubectl get po -A|grep metricskube-system   metrics-server-79776b6d54-dmwk6            1/1     Running   0             2m26s

查看节点资源监控

 # 查看node节点的系统资源使⽤情况[root@k8s-master pods]# kubectl top nodesNAME         CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%   k8s-master   151m         7%     1099Mi          63%       k8s-node01   40m          4%     467Mi           53%       k8s-node02   39m          3%     483Mi           55%       [root@k8s-master pods]# kubectl top pods -A

搭建dashboard

1、安装dashboard

--cd /root/k8s-ha-install/dashboard

--ls

dashboard-user.yaml dashboard.yaml

--kubectl create -f .

2、设置svc模式

--kubectl edit svc kubernets-dashboard -n kubernets-dashboard

..

type:NodePort

..

--kubectl get svc kubernets-dashboard -n

浏览器访问

3、获得token

--kubectl create token admin-user -n kube-system

eyJhbGciOiJSUzI1NiIsImtpZCI6ImhvcW5UMVFUQzhtamNrcHEyWnFVV3R0aGMtTFRfOF9GeEFOdVVOeS11c2MifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiXSwiZXhwIjoxNzI2MTI1Mjk5LCJpYXQiOjE3MjYxMjE2OTksImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhZG1pbi11c2VyIiwidWlkIjoiYzJlYWI4ZTgtYTMyMC00NTI4LTgyOGYtMzk5NmNmZjkxODU1In19LCJuYmYiOjE3MjYxMjE2OTksInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlLXN5c3RlbTphZG1pbi11c2VyIn0.NpPA6L4XeXIDIZbm8aPVYYXLDSwEZvjhaz_urNbW-12y9CqHc4h66PDOhUPr1v0qqIXPOHA9jHF25EwGDk3QtNmtV5-MR8Te-n7rV-K_oM1QZNFvsQiit9nFlbvu7FuxxkyY_YjfW1IhWf1KuEsln_XOHGRHTMwxKN8xKUqFNjZTAc8UMKTp0hLEsf9Mi0oxxfHnd93tjxjyDhUDGxdFZOd2YNZGA-EWaPMuRcc5PdW3-5FIXUK12HZB7XT-X7R8uxhpboZuoO60Rxh-HPcz_mhNElAr0pDlzBcQeISVbqS5RaAtnKKuNEF5oouCifcMwCvtD137Hsuysn3379vZQg

添加更新

--kubectl patch daemonset kube-proxy -p "{\"spec\":{\"template\":{\"metadata\":{\"annotations\":{\"date\":\"date +'%s'\"}}}}}" -n kube-system

访问测试

--curl 127.0.0.1:10249/proxyModeipvs

验证节点

--kubectl get nodeNAME STATUS ROLES AGE VERSIONmaster Ready control-plane 23h v1.28.2node1 Ready <none> 22h v1.28.2node2 Ready <none> 22h v1.28.2

查看服务的网段

--kubectl get svcNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGEkubernetes ClusterIP 10.96.0.1 <none> 443/TCP 23h

查看service pod宿主机的网段

--kubectl get svc

--kubectl get po -Aowide

测试创建参数

--kubectl create deploy cluster-test --image=registry.cn-beijing.aliyuncs.com/dotbalo/debug-tools -- sleep=3066

访问dnds443端口何53端口

--curl -k https:10.96.0.1:443

--curl http://10.96.0.10:53

#测试创建参数
[root@master ~]# kubectl create deploy cluster-test1 --image=registry.cn-beijing.aliyuncs.com/dotbalo/debug-tools -- sleep 3600
deployment.apps/cluster-test1 created
您在 /var/spool/mail/root 中有新邮件
[root@master ~]# kubectl get po -A|grep cluster-test1
default                cluster-test1-54575cf56c-92grp               1/1     Running            0                7s
​
#进入创建的节点中
[root@master ~]# kubectl  exec -it cluster-test1-54575cf56c-92grp -- bash
(07:29 cluster-test1-54575cf56c-92grp:/) ifconfig
eth0      Link encap:Ethernet  HWaddr f6:21:45:f6:45:29  inet addr:172.16.104.8  Bcast:0.0.0.0  Mask:255.255.255.255inet6 addr: fe80::f421:45ff:fef6:4529/64 Scope:LinkUP BROADCAST RUNNING MULTICAST  MTU:1480  Metric:1RX packets:5 errors:0 dropped:0 overruns:0 frame:0TX packets:8 errors:0 dropped:0 overruns:0 carrier:0collisions:0 txqueuelen:1000 RX bytes:446 (446.0 B)  TX bytes:656 (656.0 B)
​
lo        Link encap:Local Loopback  inet addr:127.0.0.1  Mask:255.0.0.0inet6 addr: ::1/128 Scope:HostUP LOOPBACK RUNNING  MTU:65536  Metric:1RX packets:0 errors:0 dropped:0 overruns:0 frame:0TX packets:0 errors:0 dropped:0 overruns:0 carrier:0collisions:0 txqueuelen:1000 RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)
​
​
(1 07:29 cluster-test1-54575cf56c-92grp:/) nslookup kubernetes
Server:     10.96.0.10
Address:    10.96.0.10#53
​
Name:   kubernetes.default.svc.cluster.local
Address: 10.96.0.1
​
(07:30 cluster-test1-54575cf56c-92grp:/) nslookup kube-dns.kube-system
Server:     10.96.0.10
Address:    10.96.0.10#53
​
Name:   kube-dns.kube-system.svc.cluster.local
Address: 10.96.0.10
​
(07:30 cluster-test1-54575cf56c-92grp:/) exit
exit
您在 /var/spool/mail/root 中有新邮件
​
​
#访问dns的443端口和53端口
[root@master ~]# curl -k https://10.96.0.1:443
{"kind": "Status","apiVersion": "v1","metadata": {},"status": "Failure","message": "forbidden: User \"system:anonymous\" cannot get path \"/\"","reason": "Forbidden","details": {},"code": 403
}
[root@master ~]# curl  http://10.96.0.10:53
curl: (52) Empty reply from server
​

kubernetes自动补齐

常用指令

1、自动补齐

--yum -y install bash-completion //安装自动补齐

--source <(kubectl completion bash)

创建节点 [root@k8s-master ~]# kubectl run nginx1 --image nginx pod/nginx1 created [root@k8s-master ~]# kubectl get po -A ​删除节点

[root@k8s-master ~]# kubectl delete pod nginx1 pod "nginx1" deleted

--echo "source <(kubectl completion bash)" >>~/.bashrc //设置开机自启

2、基础指令

 # 删除节点[root@k8s-master ~]# kubectl delete pod cluster-test-64b7b9cbf-jjmmhpod "cluster-test-64b7b9cbf-jjmmh" deleted•# 节点还在[root@k8s-master ~]# kubectl get po -A|grep cluster-testdefault                cluster-test-64b7b9cbf-dnn2m                 0/1     ContainerCreating   0               20sdefault                cluster-test0-58689d5d5d-qr4mv               1/1     Running             0               34m•# 使用deployment删除[root@k8s-master ~]# kubectl delete deployment cluster-testdeployment.apps "cluster-test" deleted•# 已删除[root@k8s-master ~]# kubectl get po -A|grep cluster-test

编写yaml文件-创建节点

# vim pods/abc.yamlapiVersion: v1kind: Podmetadata:name: busybox-sleepspec:containers:- name: busyboximage: busybox:1.28args:- sleep- "1000"[root@k8s-master ~]# cd pods/[root@k8s-master pods]# lsabc.yaml  components.yaml[root@k8s-master pods]# kubectl create -f abc.yaml [root@k8s-master pods]# kubectl create -f abc.yaml pod/busybox-sleep created[root@k8s-master pods]# kubectl get po -A|grep busybox-sleepdefault                busybox-sleep                                1/1     Running   0               3s[root@k8s-master pods]# kubectl delete pod busybox-sleeppod "busybox-sleep" deleted[root@k8s-master pods]# kubectl get po -A|grep busy

编写json文件

# vim pods/abc.json
 {
      "apiVersion":"v1",
      "kind":"Pod",
      "metadata":{
          "name":"busybox-sleep000"
      },
      "spec":{
          "containers":[
              {
                  "name":"busybox000",
                  "image":"busybox:1.28",
                  "args":[
                      "sleep",
                      "1000"
                  ]
              }
          ]
      }
  }

版权声明:

本网仅为发布的内容提供存储空间,不对发表、转载的内容提供任何形式的保证。凡本网注明“来源:XXX网络”的作品,均转载自其它媒体,著作权归作者所有,商业转载请联系作者获得授权,非商业转载请注明出处。

我们尊重并感谢每一位作者,均已注明文章来源和作者。如因作品内容、版权或其它问题,请及时与我们联系,联系邮箱:809451989@qq.com,投稿邮箱:809451989@qq.com