===================================================================================================================================================== Package Arch Version Repository Size ===================================================================================================================================================== Installing: ntp x86_64 4.2.6p5-29.el7.centos base 548 k Installing for dependencies: autogen-libopts x86_64 5.18-5.el7 base 66 k ntpdate x86_64 4.2.6p5-29.el7.centos base 86 k
# server 0.centos.pool.ntp.org iburst # server 1.centos.pool.ntp.org iburst # server 2.centos.pool.ntp.org iburst # server 3.centos.pool.ntp.org iburst
server ntp.aliyun.com iburst
[root@master55 /]# [root@master55 /]# sudo systemctl start ntpd [root@master55 /]# systemctl enable ntpd.service Created symlink from /etc/systemd/system/multi-user.target.wants/ntpd.service to /usr/lib/systemd/system/ntpd.service. [root@localhost /]# ntpq -p remote refid st t when poll reach delay offset jitter ============================================================================== *203.107.6.88 10.137.38.86 2 u 37 64 1 53.476 -5.668 2.224 [root@master55 /]#
# server 0.centos.pool.ntp.org iburst # server 1.centos.pool.ntp.org iburst # server 2.centos.pool.ntp.org iburst # server 3.centos.pool.ntp.org iburst
server master55.xincan.cn iburst
[root@slave56 /]# [root@slave56 /]# sudo systemctl start ntpd [root@slave56 /]# systemctl enable ntpd.service Created symlink from /etc/systemd/system/multi-user.target.wants/ntpd.service to /usr/lib/systemd/system/ntpd.service. [root@slave56 /]# ntpq -p remote refid st t when poll reach delay offset jitter ============================================================================== *master55.xincan 203.107.6.88 3 u 12 64 1 0.367 10.659 0.054 [root@slave56 /]#
slave57设置
script
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
[root@slave57 /]# vi /etc/ntp.conf
# server 0.centos.pool.ntp.org iburst # server 1.centos.pool.ntp.org iburst # server 2.centos.pool.ntp.org iburst # server 3.centos.pool.ntp.org iburst
server master55.xincan.cn iburst
[root@slave57 /]# [root@slave57 /]# sudo systemctl start ntpd [root@slave57 /]# systemctl enable ntpd.service Created symlink from /etc/systemd/system/multi-user.target.wants/ntpd.service to /usr/lib/systemd/system/ntpd.service. [root@slave57 /]# ntpq -p remote refid st t when poll reach delay offset jitter ============================================================================== *master55.xincan 203.107.6.88 3 u 12 64 1 0.367 10.659 0.054 [root@slave57 /]#
[root@master55 /]# ssh-keygen -t rsa Generating public/private rsa key pair. Enter file in which to save the key (/root/.ssh/id_rsa): Created directory '/root/.ssh'. Enter passphrase (empty for no passphrase): Enter same passphrase again: Your identification has been saved in /root/.ssh/id_rsa. Your public key has been saved in /root/.ssh/id_rsa.pub. The key fingerprint is: SHA256:I0PR5fMj01uGGb1Z3pbRFjzwTIFb2ONyZ1M2I9OVTNY root@master55.xincan.cn The key's randomart image is: +---[RSA 2048]----+ | .. .. .X**| | ... *=%E| | . o . B=X| | . + * X*| | o S o * Bo*| | o . o = . | | . | | | | | +----[SHA256]-----+ [root@master55 /]#
将公钥追加到authorized_keys
script
1 2 3 4 5 6 7 8
[root@master55 /]# cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys [root@master55 /]# cd ~/.ssh/ [root@master55 .ssh]# ll total 12 -rw-r--r-- 1 root root 405 Jun 16 17:03 authorized_keys -rw------- 1 root root 1675 Jun 16 17:01 id_rsa -rw-r--r-- 1 root root 405 Jun 16 17:01 id_rsa.pub [root@master55 .ssh]#
[root@master55 .ssh]# scp -r ~/.ssh/ root@slave56:~/.ssh/ The authenticity of host 'slave56 (192.168.1.56)' can't be established. ECDSA key fingerprint is SHA256:KhL6Vyv6q5fHHcZ3+xoLn6W/mZ7SBAFD+n/TCXEHtSM. ECDSA key fingerprint is MD5:71:35:87:3d:ff:73:04:fc:d7:a2:07:30:68:b8:62:5b. Are you sure you want to continue connecting (yes/no)? yes Warning: Permanently added 'slave56,192.168.1.56' (ECDSA) to the list of known hosts. root@slave56's password: id_rsa 100% 1675 1.1MB/s 00:00 id_rsa.pub 100% 405 282.4KB/s 00:00 authorized_keys 100% 405 277.0KB/s 00:00 known_hosts 100% 182 104.6KB/s 00:00 [root@master55 .ssh]# scp -r ~/.ssh/ root@slave57:~/.ssh/ The authenticity of host 'slave57 (192.168.1.57)' can't be established. ECDSA key fingerprint is SHA256:Gfz+xXR217Yb2ZWOIMsRzSe+iynRvpxLnt98cI4kBRA. ECDSA key fingerprint is MD5:8b:1d:cd:1d:24:79:de:80:c3:53:7c:d3:87:e0:d4:96. Are you sure you want to continue connecting (yes/no)? yes Warning: Permanently added 'slave57,192.168.1.57' (ECDSA) to the list of known hosts. root@slave57's password: id_rsa 100% 1675 1.0MB/s 00:00 id_rsa.pub 100% 405 304.6KB/s 00:00 authorized_keys 100% 405 352.7KB/s 00:00 known_hosts 100% 364 271.2KB/s 00:00 [root@master55 .ssh]#
验证master55、slave56、slave57三个节点免密登录 master55节点验证
script
1 2 3 4 5 6 7 8 9 10 11
[root@master55 /]# ssh root@slave56 Last login: Tue Jun 16 15:11:10 2020 from 192.168.1.182 [root@slave56 ~]# exit logout Connection to slave56 closed. [root@master55 /]# ssh root@slave57 Last login: Tue Jun 16 15:11:23 2020 from 192.168.1.182 [root@slave57 ~]# exit logout Connection to slave57 closed. [root@master55 /]#
[root@slave56 ~]# ssh root@master55 The authenticity of host 'master55 (192.168.1.55)' can't be established. ECDSA key fingerprint is SHA256:Dv4+42UAUC3FCEqZjwxJECtUHMgAYUtD2UsRASyffFw. ECDSA key fingerprint is MD5:fe:0b:32:39:20:9c:e1:3e:67:b7:3d:42:a1:22:df:2a. Are you sure you want to continue connecting (yes/no)? yes Warning: Permanently added 'master55,192.168.1.55' (ECDSA) to the list of known hosts. Last login: Tue Jun 16 15:58:21 2020 from 192.168.1.182 [root@master55 ~]# exit logout Connection to master55 closed. [root@slave56 ~]# ssh root@master55 Last login: Tue Jun 16 17:17:38 2020 from 192.168.1.56 [root@master55 ~]# exit logout Connection to master55 closed. [root@slave56 ~]# ssh root@slave57 The authenticity of host 'slave57 (192.168.1.57)' can't be established. ECDSA key fingerprint is SHA256:Gfz+xXR217Yb2ZWOIMsRzSe+iynRvpxLnt98cI4kBRA. ECDSA key fingerprint is MD5:8b:1d:cd:1d:24:79:de:80:c3:53:7c:d3:87:e0:d4:96. Are you sure you want to continue connecting (yes/no)? yes Warning: Permanently added 'slave57,192.168.1.57' (ECDSA) to the list of known hosts. Last login: Tue Jun 16 17:15:27 2020 from 192.168.1.55 [root@slave57 ~]# exit logout Connection to slave57 closed. [root@slave56 ~]# ssh root@slave57 Last login: Tue Jun 16 17:17:59 2020 from 192.168.1.56 [root@slave57 ~]# exit logout Connection to slave57 closed. [root@slave56 ~]#
[root@slave57 /]# ssh root@master55 The authenticity of host 'master55 (192.168.1.55)' can't be established. ECDSA key fingerprint is SHA256:Dv4+42UAUC3FCEqZjwxJECtUHMgAYUtD2UsRASyffFw. ECDSA key fingerprint is MD5:fe:0b:32:39:20:9c:e1:3e:67:b7:3d:42:a1:22:df:2a. Are you sure you want to continue connecting (yes/no)? yes Warning: Permanently added 'master55,192.168.1.55' (ECDSA) to the list of known hosts. Last login: Tue Jun 16 17:17:42 2020 from 192.168.1.56 [root@master55 ~]# exit logout Connection to master55 closed. [root@slave57 yum.repos.d]# ssh root@master55 Last login: Tue Jun 16 17:19:31 2020 from 192.168.1.57 [root@master55 ~]# exit logout Connection to master55 closed. [root@slave57 yum.repos.d]# ssh root@slave56 Last login: Tue Jun 16 17:15:11 2020 from 192.168.1.55 [root@slave56 ~]# exit logout Connection to slave56 closed. [root@slave57 /]#
[root@master55 /]# sudo kubeadm init --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.18.2 --apiserver-advertise-address 192.168.1.55 --pod-network-cidr=10.244.0.0/16 --token-ttl 0 W0616 17:24:47.742105 8831 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io] [init] Using Kubernetes version: v1.18.2 [preflight] Running pre-flight checks [preflight] Pulling images required for setting up a Kubernetes cluster [preflight] This might take a minute or two, depending on the speed of your internet connection [preflight] You can also perform this action in beforehand using 'kubeadm config images pull' [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" [kubelet-start] Starting the kubelet [certs] Using certificateDir folder "/etc/kubernetes/pki" [certs] Generating "ca" certificate and key [certs] Generating "apiserver" certificate and key [certs] apiserver serving cert is signed for DNS names [master55.xincan.cn kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.1.55] [certs] Generating "apiserver-kubelet-client" certificate and key [certs] Generating "front-proxy-ca" certificate and key [certs] Generating "front-proxy-client" certificate and key [certs] Generating "etcd/ca" certificate and key [certs] Generating "etcd/server" certificate and key [certs] etcd/server serving cert is signed for DNS names [master55.xincan.cn localhost] and IPs [192.168.1.55 127.0.0.1 ::1] [certs] Generating "etcd/peer" certificate and key [certs] etcd/peer serving cert is signed for DNS names [master55.xincan.cn localhost] and IPs [192.168.1.55 127.0.0.1 ::1] [certs] Generating "etcd/healthcheck-client" certificate and key [certs] Generating "apiserver-etcd-client" certificate and key [certs] Generating "sa" key and public key [kubeconfig] Using kubeconfig folder "/etc/kubernetes" [kubeconfig] Writing "admin.conf" kubeconfig file [kubeconfig] Writing "kubelet.conf" kubeconfig file [kubeconfig] Writing "controller-manager.conf" kubeconfig file [kubeconfig] Writing "scheduler.conf" kubeconfig file [control-plane] Using manifest folder "/etc/kubernetes/manifests" [control-plane] Creating static Pod manifest for "kube-apiserver" [control-plane] Creating static Pod manifest for "kube-controller-manager" W0616 17:29:47.640484 8831 manifests.go:225] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC" [control-plane] Creating static Pod manifest for "kube-scheduler" W0616 17:29:47.646613 8831 manifests.go:225] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC" [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests" [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s [apiclient] All control plane components are healthy after 31.505848 seconds [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace [kubelet] Creating a ConfigMap "kubelet-config-1.18" in namespace kube-system with the configuration for the kubelets in the cluster [upload-certs] Skipping phase. Please see --upload-certs [mark-control-plane] Marking the node master55.xincan.cn as control-plane by adding the label "node-role.kubernetes.io/master=''" [mark-control-plane] Marking the node master55.xincan.cn as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule] [bootstrap-token] Using token: 991hr9.scqkkyphn1cjjcl7 [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials [bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token [bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key [addons] Applied essential addon: CoreDNS [addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
You should now deploy a pod network to the cluster. Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
W0616 17:50:09.914108 4585 join.go:346] [preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when control-plane flag is not set. [preflight] Running pre-flight checks [preflight] Reading configuration from the cluster... [preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml' [kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.18" ConfigMap in the kube-system namespace [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" [kubelet-start] Starting the kubelet [kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster: * Certificate signing request was sent to apiserver and a response was received. * The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
[root@master55 /]# mkdir k8s [root@master55 /]# cd k8s/ [root@master55 k8s]# mkdir calico && cd calico [root@master55 calico]# wget https://kuboard.cn/install-script/calico/calico-3.13.1.yaml --2020-06-17 17:42:44-- https://kuboard.cn/install-script/calico/calico-3.13.1.yaml Resolving kuboard.cn (kuboard.cn)... 119.3.92.138, 122.112.240.69 Connecting to kuboard.cn (kuboard.cn)|119.3.92.138|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 21079 (21K) [application/octet-stream] Saving to: ‘calico-3.13.1.yaml’
100%[====================================================================================================================================================>] 21,079 --.-K/s in 0s
[root@master55 calico]# kubectl apply -f calico-3.13.1.yaml configmap/calico-config created customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created clusterrole.rbac.authorization.k8s.io/calico-node created clusterrolebinding.rbac.authorization.k8s.io/calico-node created daemonset.apps/calico-node created serviceaccount/calico-node created deployment.apps/calico-kube-controllers created serviceaccount/calico-kube-controllers created [root@master55 calico]#
十六、再次查看node和pod
需要等待一会时间
查看nodes,发现状态已经为Ready
script
1 2 3 4 5 6
[root@master55 /]# kubectl get nodes NAME STATUS ROLES AGE VERSION master55.xincan.cn Ready master 16h v1.18.3 slave56.xincan.cn Ready <none> 16h v1.18.3 slave57.xincan.cn Ready <none> 16h v1.18.3 [root@master55 /]#
[root@master55 prometheus]# find -type f -exec sed -ri 's#k8s.gcr.io#gcr.azk8s.cn/google_containers#' {} \; [root@master55 prometheus]#
生成namespace
script
1 2 3
[root@master55 prometheus]# kubectl apply -f . namespace/monitoring created [root@master55 prometheus]#
安装operater
script
1 2 3 4 5 6 7 8 9 10 11 12 13
[root@master55 prometheus]# kubectl apply -f operator/ customresourcedefinition.apiextensions.k8s.io/alertmanagers.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/podmonitors.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/prometheuses.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/prometheusrules.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/servicemonitors.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/thanosrulers.monitoring.coreos.com created clusterrole.rbac.authorization.k8s.io/prometheus-operator created clusterrolebinding.rbac.authorization.k8s.io/prometheus-operator created deployment.apps/prometheus-operator created service/prometheus-operator created serviceaccount/prometheus-operator created [root@master55 prometheus]#
[root@xincan /]#cd /k8s/ [root@xincan k8s]#git clone https://github.com/xincan/kubernetes.git [root@xincan k8s]#ls calico kube-prometheus kubernetes-dashboard prometheus [root@master55 k8s]# cd kubernetes-dashboard/ [root@master55 kubernetes-dashboard]# ls login-token recommended.yaml [root@master55 kubernetes-dashboard]#kubectl create -f recommended.yaml namespace/kubernetes-dashboard created serviceaccount/kubernetes-dashboard created service/kubernetes-dashboard created secret/kubernetes-dashboard-certs created secret/kubernetes-dashboard-csrf created secret/kubernetes-dashboard-key-holder created configmap/kubernetes-dashboard-settings created role.rbac.authorization.k8s.io/kubernetes-dashboard created clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created deployment.apps/kubernetes-dashboard created service/dashboard-metrics-scraper created deployment.apps/dashboard-metrics-scraper created [root@master55 kubernetes-dashboard]# ## 查看kubernetes-dashboard命名空间下pod,svc [root@master55 kubernetes-dashboard]# kubectl get pod,svc -n kubernetes-dashboard NAME READY STATUS RESTARTS AGE pod/dashboard-metrics-scraper-779f5454cb-hzgc4 1/1 ContainerCreating 0 30s pod/kubernetes-dashboard-857bb4c778-gsf2q 1/1 ContainerCreating 0 30s
dit cancelled, no changes made. ## 修改完成后,可以看到kubernetes-dashboard的type为NodePort,端口为30000 [root@master55 kubernetes-dashboard]# kubectl get svc -n kubernetes-dashboard kubernetes-dashboard NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes-dashboard NodePort 10.110.40.170 <none> 443:30001/TCP 14m [root@master55 kubernetes-dashboard]#