- name:Stop if memory is too small for mastersassert:that:ansible_memtotal_mb >= 1500ignore_errors:"{{ ignore_assert_errors }}"when:inventory_hostname in groups['kube-master']- name:Stop if memory is too small for nodesassert:that:ansible_memtotal_mb >= 1024ignore_errors:"{{ ignore_assert_errors }}"when:inventory_hostname in groups['kube-node']
# Configure 'ip' variable to bind kubernetes services on a# different ip than the default iface# 主机名 ssh登陆IP ssh用户名 ssh登陆密码 机器IP 子网掩码kube-master-0 ansible_ssh_host=172.16.94.140 ansible_ssh_user=root ansible_ssh_pass=123 ip=172.16.94.140 mask=/24
kube-node-41 ansible_ssh_host=172.16.94.141 ansible_ssh_user=root ansible_ssh_pass=123 ip=172.16.94.141 mask=/24
kube-node-42 ansible_ssh_host=172.16.94.142 ansible_ssh_user=root ansible_ssh_pass=123 ip=172.16.94.142 mask=/24
# configure a bastion host if your nodes are not directly reachable# bastion ansible_ssh_host=x.x.x.x[kube-master]kube-master-0[etcd]kube-master-0[kube-node]kube-node-41kube-node-42[k8s-cluster:children]kube-nodekube-master[calico-rr]
# kubectl get all --namespace=kube-systemNAMEDESIREDCURRENTREADYUP-TO-DATEAVAILABLENODESELECTORAGEds/calico-node33333<none>2hNAMEDESIREDCURRENTUP-TO-DATEAVAILABLEAGEdeploy/kube-dns22222hdeploy/kubedns-autoscaler11112hdeploy/kubernetes-dashboard11112hNAMEDESIREDCURRENTREADYAGErs/kube-dns-79d99cdcd52222hrs/kubedns-autoscaler-5564b5585f1112hrs/kubernetes-dashboard-69cb58d7481112hNAMEDESIREDCURRENTREADYUP-TO-DATEAVAILABLENODESELECTORAGEds/calico-node33333<none>2hNAMEDESIREDCURRENTUP-TO-DATEAVAILABLEAGEdeploy/kube-dns22222hdeploy/kubedns-autoscaler11112hdeploy/kubernetes-dashboard11112hNAMEDESIREDCURRENTREADYAGErs/kube-dns-79d99cdcd52222hrs/kubedns-autoscaler-5564b5585f1112hrs/kubernetes-dashboard-69cb58d7481112hNAMEREADYSTATUSRESTARTSAGEpo/calico-node-22vsg1/1Running02hpo/calico-node-t7zgw1/1Running02hpo/calico-node-zqnx81/1Running02hpo/kube-apiserver-kube-master-01/1Running022hpo/kube-controller-manager-kube-master-01/1Running02hpo/kube-dns-79d99cdcd5-f2t6t3/3Running02hpo/kube-dns-79d99cdcd5-gw9443/3Running02hpo/kube-proxy-kube-master-01/1Running222hpo/kube-proxy-kube-node-411/1Running322hpo/kube-proxy-kube-node-421/1Running322hpo/kube-scheduler-kube-master-01/1Running02hpo/kubedns-autoscaler-5564b5585f-lt9bb1/1Running02hpo/kubernetes-dashboard-69cb58d748-wmb9x1/1Running02hpo/nginx-proxy-kube-node-411/1Running322hpo/nginx-proxy-kube-node-421/1Running322hNAMETYPECLUSTER-IPEXTERNAL-IPPORT(S) AGEsvc/kube-dnsClusterIP10.233.0.3<none>53/UDP,53/TCP2hsvc/kubernetes-dashboardClusterIP10.233.27.24<none>443/TCP2h
2、k8s节点信息
# kubectl get nodesNAMESTATUSROLESAGEVERSIONkube-master-0Readymaster22hv1.9.5kube-node-41Readynode22hv1.9.5kube-node-42Readynode22hv1.9.5
3、组件健康信息
# kubectl get csNAMESTATUSMESSAGEERRORschedulerHealthyokcontroller-managerHealthyoketcd-0Healthy{"health":"true"}
# kubectl get nodesNAMESTATUSROLESAGEVERSIONkube-master-0Readymaster1dv1.9.5kube-node-41Readynode1dv1.9.5kube-node-42Readynode1dv1.9.5kube-node-43Readynode1mv1.9.5#该节点为新增Node节点
可以看到新增的kube-node-43节点已经扩容完成。
3、k8s组件信息
# kubectl get po --namespace=kube-system -o wideNAMEREADYSTATUSRESTARTSAGEIPNODEcalico-node-22vsg1/1Running010h172.16.94.140kube-master-0calico-node-8fz9x1/1Running227m172.16.94.143kube-node-43calico-node-t7zgw1/1Running010h172.16.94.142kube-node-42calico-node-zqnx81/1Running010h172.16.94.141kube-node-41kube-apiserver-kube-master-01/1Running01d172.16.94.140kube-master-0kube-controller-manager-kube-master-01/1Running010h172.16.94.140kube-master-0kube-dns-79d99cdcd5-f2t6t3/3Running010h10.233.100.194kube-node-41kube-dns-79d99cdcd5-gw9443/3Running010h10.233.107.1kube-node-42kube-proxy-kube-master-01/1Running21d172.16.94.140kube-master-0kube-proxy-kube-node-411/1Running31d172.16.94.141kube-node-41kube-proxy-kube-node-421/1Running31d172.16.94.142kube-node-42kube-proxy-kube-node-431/1Running026m172.16.94.143kube-node-43kube-scheduler-kube-master-01/1Running010h172.16.94.140kube-master-0kubedns-autoscaler-5564b5585f-lt9bb1/1Running010h10.233.100.193kube-node-41kubernetes-dashboard-69cb58d748-wmb9x1/1Running010h10.233.107.2kube-node-42nginx-proxy-kube-node-411/1Running31d172.16.94.141kube-node-41nginx-proxy-kube-node-421/1Running31d172.16.94.142kube-node-42nginx-proxy-kube-node-431/1Running026m172.16.94.143kube-node-43
TASK [kubernetes/preinstall : Stop if memory is too small for masters] *********************************************************************************************************************************************************************************************************
taskpath:/root/gopath/src/kubespray/roles/kubernetes/preinstall/tasks/verify-settings.yml:52Friday10August201821:50:26+0800 (0:00:00.940) 0:01:14.088 *********fatal: [kube-master-0]: FAILED!=> {"assertion":"ansible_memtotal_mb >= 1500","changed":false,"evaluated_to":false}TASK [kubernetes/preinstall : Stop if memory is too small for nodes] ***********************************************************************************************************************************************************************************************************
taskpath:/root/gopath/src/kubespray/roles/kubernetes/preinstall/tasks/verify-settings.yml:58Friday10August201821:50:27+0800 (0:00:00.570) 0:01:14.659 *********fatal: [kube-node-41]: FAILED!=> {"assertion":"ansible_memtotal_mb >= 1024","changed":false,"evaluated_to":false}fatal: [kube-node-42]: FAILED!=> {"assertion":"ansible_memtotal_mb >= 1024","changed":false,"evaluated_to":false}toretry,use:--limit@/root/gopath/src/kubespray/cluster.retry
failed: [k8s-node-1] (item={u'name':u'docker-engine-1.13.1-1.el7.centos'}) => {"attempts":4,"changed":false,..."item":{"name":"docker-engine-1.13.1-1.el7.centos" },"msg":"Error: docker-ce-selinux conflicts with 2:container-selinux-2.66-1.el7.noarch\n","rc":1,"results": [ "Loaded plugins: fastestmirror\nLoading mirror speeds from cached hostfile\n * elrepo: mirrors.tuna.tsinghua.edu.cn\n * epel: mirrors.tongji.edu.cn\nPackage docker-engine is obsoleted by docker-ce, trying to install docker-ce-17.03.2.ce-1.el7.centos.x86_64 instead\nResolving Dependencies\n--> Running transaction check\n---> Package docker-ce.x86_64 0:17.03.2.ce-1.el7.centos will be installed\n--> Processing Dependency: docker-ce-selinux >= 17.03.2.ce-1.el7.centos for package: docker-ce-17.03.2.ce-1.el7.centos.x86_64\n--> Processing Dependency: libltdl.so.7()(64bit) for package: docker-ce-17.03.2.ce-1.el7.centos.x86_64\n--> Running transaction check\n---> Package docker-ce-selinux.noarch 0:17.03.2.ce-1.el7.centos will be installed\n---> Package libtool-ltdl.x86_64 0:2.4.2-22.el7_3 will be installed\n--> Processing Conflict: docker-ce-selinux-17.03.2.ce-1.el7.centos.noarch conflicts docker-selinux\n--> Restarting Dependency Resolution with new changes.\n--> Running transaction check\n---> Package container-selinux.noarch 2:2.55-1.el7 will be updated\n---> Package container-selinux.noarch 2:2.66-1.el7 will be an update\n--> Processing Conflict: docker-ce-selinux-17.03.2.ce-1.el7.centos.noarch conflicts docker-selinux\n--> Finished Dependency Resolution\n You could try using --skip-broken to work around the problem\n You could try running: rpm -Va --nofiles --nodigest\n"
]}