192.168.9.171 k8s-master-1
192.168.9.172 k8s-master-2
192.168.9.173 k8s-master-3
192.168.9.174 k8s-node-1
192.168.9.175 k8s-node-2
hostnamectl --static set-hostname k8s-master-1
hostnamectl --static set-hostname k8s-master-2
hostnamectl --static set-hostname k8s-master-3
hostnamectl --static set-hostname k8s-node-1
hostnamectl --static set-hostname k8s-node-2
ssh-keygen -t rsa
ssh-keygen -t rsa -f ~/.ssh/id_rsa -N ''
ssh-copy-id 192.168.9.171
ssh-copy-id 192.168.9.172
ssh-copy-id 192.168.9.173
ssh-copy-id 192.168.9.174
ssh-copy-id 192.168.9.175
自动设置免密的脚本 ssh-cpoy
yum install etcd -y
修改etcd-csr.json里面的IP
生成etcd的证书,分发到所有节点,包括node节点
./genCerts.sh
./deliver-pem.sh 192.168.9.171
./deliver-pem.sh 192.168.9.172
./deliver-pem.sh 192.168.9.173
./deliver-pem.sh 192.168.9.174
./deliver-pem.sh 192.168.9.175
export ETCDCTL_API=3
etcdctl --cacert=/etc/etcd/ssl/etcd-root-ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem --endpoints=https://192.168.18.181:2379,https://192.168.18.182:2379,https://192.168.18.183:2379 endpoint health
具体etcd的安装配置参考install-etcd项目 (install-etcd)https://gitee.com/jack2zhang/install-etcd
bin目录
wget http://test.wgmf.com/mirrors/kubernetes/v191/bin/kube-apiserver
wget http://test.wgmf.com/mirrors/kubernetes/v191/bin/kube-controller-manager
wget http://test.wgmf.com/mirrors/kubernetes/v191/bin/kube-scheduler
wget http://test.wgmf.com/mirrors/kubernetes/v191/bin/kubectl
wget http://test.wgmf.com/mirrors/kubernetes/v191/bin/kubelet
wget http://test.wgmf.com/mirrors/kubernetes/v191/bin/kube-proxy
deliver-bin-master.sh 192.168.9.171
deliver-bin-master.sh 192.168.9.172
deliver-bin-master.sh 192.168.9.173
deliver-bin-node.sh 192.168.9.174
deliver-bin-node.sh 192.168.9.175
certs/kubernetes
改kubernetes-csr.json 文件里面的IP master和node 要分别生成 config
genCerts.sh 生成证书
genTokenKubeConfig.sh 生成master的config和token
set-kubelet-config.sh 生成node的config
./deliver-pem-master.sh 192.168.9.171
./deliver-pem-master.sh 192.168.9.172
./deliver-pem-master.sh 192.168.9.173
./deliver-pem-node.sh 192.168.9.174
./deliver-pem-node.sh 192.168.9.175
./deliver-systemd-master.sh 192.168.9.171
./deliver-systemd-master.sh 192.168.9.172
./deliver-systemd-master.sh 192.168.9.173
kube-apiserver.service kube-controller-manager kube-scheduler.service
start-systemd-master.sh
kubectl get cs
docker/
install_docker4centos7.sh
nginx.conf改成master的IP
部署
deploy-nginx.sh 192.168.9.174
deploy-nginx.sh 192.168.9.175
master上执行授权 创建请求证书
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
./deliver-systemd-node.sh 192.168.18.174
./deliver-systemd-node.sh 192.168.18.175
改IP 和 hostname 并启动
/deliver-systemd-node.sh 192.168.18.174
/deliver-systemd-node.sh 192.168.18.175
systemd路径
/etc/systemd/system/
改IP和hostname
下载基础镜像
docker-wrapper ./docker-wrapper.py pull gcr.io/google_containers/pause-amd64:3.0
yum install ipvsadm -y
启动
systemctl start kubelet.service
systemctl enable kubelet.service
systemctl start kube-proxy.service
systemctl enable kube-proxy.service
kubectl cordon k8s-node-1
kubectl uncordon k8s-node-1
kubectl get csr
kubectl get csr | grep Pending | awk '{print $1}' | xargs kubectl certificate approve
kubectl get nodes
docker conf
mv /usr/lib/systemd/system/docker.service.d/flannel.conf /etc/systemd/system/docker.service.d
set-etcdctl-flannel
etcdctl --endpoints=https://192.168.18.171:2379,https://192.168.18.172:2379,https://192.168.18.173:2379 --cert-file=/etc/etcd/ssl/etcd.pem --ca-file=/etc/etcd/ssl/etcd-root-ca.pem --key-file=/etc/etcd/ssl/etcd-key.pem set /flannel/network/config \ '{"Network":"10.254.64.0/18","SubnetLen":24,"Backend":{"Type":"host-gw"}}'
/etc/sysconfig/flanneld
FLANNEL_ETCD_ENDPOINTS="https://192.168.18.171:2379,https://192.168.18.172:2379,https://192.168.18.173:2379"
FLANNEL_ETCD_PREFIX="/flannel/network"
FLANNEL_OPTIONS="-ip-masq=true -etcd-cafile=/etc/etcd/ssl/etcd-root-ca.pem -etcd-certfile=/etc/etcd/ssl/etcd.pem -etcd-keyfile=/etc/etcd/ssl/etcd-key.pem -iface=eth0"
重新设置docker
docker/ set-docker-kubernetes.sh
yum install socat -y
docker-wrapp️er.py pull gcr.io/kubernetes-helm/tiller:v2.14.0
install-helm.sh
coredns/ exam.sh ./deploy.sh -r 10.254.0.0/18 -i 10.254.0.2 | kubectl apply -f -
kubectl get pods -n kube-system
testcore/ kubectl apply -f alpine.yaml
test
kubectl get pods,svc
kubectl exec -it alpine nslookup nginx-svc
kubectl exec -it alpine nslookup kubernetes
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。