代码拉取完成,页面将自动刷新
CentOS7版本部署K8S集群
kubeadm是官方社区推出的一个用于快速部署kubernetes集群的工具
三台主机
IP: 192.168.101.2 主机名:master 系统: centos 7.6 配置: 2C 2G
IP: 192.168.101.3 主机名:node1 系统: centos 7.6 配置: 2C 2G
IP: 192.168.101.4 主机名:node2 系统: centos 7.6 配置: 2C 2G
更改所有节点的主机名,便于区分(各自执行):
master节点: hostnamectl set-hostname k8s-master
node1节点: hostnamectl set-hostname k8s-node1
node2节点: hostnamectl set-hostname k8s-node2
bash
# 查看主机名
hostname
systemctl stop firewalld
systemctl disable firewalld
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
setenforce 0
swapoff -a # 临时关闭
cat /etc/fstab 注释到swap那一行 # 永久关闭
sed -i 's/.*swap.*/#&/g' /etc/fstab
cat >> /etc/hosts << EOF
192.168.101.2 k8s-master
192.168.101.3 k8s-node1
192.168.101.4 k8s-node2
EOF
yum install ntpdate -y
ntpdate ntp.api.bz
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system
yum -y install wget
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
yum -y install docker-ce-19.03.12-3.el7
# 启动docker,并设置docker开机自启
systemctl start docker
systemctl enable docker
# 配置加速,并设置驱动
cat > /etc/docker/daemon.json <<EOF
{
"registry-mirrors": ["https://6ze43vnb.mirror.aliyuncs.com"],
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2"
}
EOF
# 加载daemon并重启docker
systemctl daemon-reload
systemctl restart docker
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum install -y kubelet-1.18.6 kubeadm-1.18.6 kubectl-1.18.6
systemctl enable kubelet
# 查看所需要的容器
kubeadm config images list
# 国外环境:下载所需要的容器
kubeadm config images pull
# --- 或 ---
# 国内环境:使用阿里云镜像仓库下载(国内环境该命令可不执行,下步骤kubeadm init已经默认为国内环境):
kubeadm config
images=(
kube-apiserver:v1.18.6
kube-controller-manager:v1.18.6
kube-scheduler:v1.18.6
kube-proxy:v1.18.6
pause:3.2
etcd:3.4.3-0
coredns:1.6.7
)
for imageName in ${images[@]} ; do
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/${imageName}
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/${image} k8s.gcr.io/${imageName}
done
----------------------
[root@k8s-master ~]# docker images
------------------------
kubeadm init \
--apiserver-advertise-address=192.168.101.2 \
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version v1.18.6 \
--service-cidr=10.1.0.0/16 \
--pod-network-cidr=10.244.0.0/16
# 执行成功会输出下面的数据,将下面的数据拷贝到从节点执行(每次都不一样根据自己实际生成的为准,这个是node节点加入集群使用)
kubeadm join 192.168.101.2:6443 --token m42oxn.6a8dk04txtpxd0kt \
--discovery-token-ca-cert-hash sha256:743178d212dcf0bbf40e1a8d8e3f74604035363b3e085835d7901d57d7263836
# 在master节点执行
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# 查看各节点是否连接上主节点
kubectl get nodes
# kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/v0.10.0/Documentation/kube-flannel.yml
# 或
kubectl apply -f kube-flannel.yaml
# 查看pods节点信息(所有节点的状态为:Running,说明正常)
kubectl get pods -n kube-system
(k8s安装完成)
# 选择边缘节点(对外暴露网络的节点)打标签,指定ingress-controller部署在边缘节点节点上。
kubectl label nodes k8s-master edgenode=true
kubectl apply -f ingress-deploy.yaml
# 检查服务
kubectl get pod -o wide -n ingress-nginx
kubectl create deployment nginx --image=nginx
kubectl expose deployment nginx --port=80 --type=NodePort
kubectl get pod,svc
创建证书
mkdir dashboard-certs
cd dashboard-certs/
#创建命名空间
kubectl create namespace kubernetes-dashboard
# 创建key文件
openssl genrsa -out dashboard.key 2048
#证书请求,CN=可修改为实际IP或者域名
openssl req -days 36000 -new -out dashboard.csr -key dashboard.key -subj '/CN=kubernetes-dashboard-certs'
#自签证书
openssl x509 -req -in dashboard.csr -signkey dashboard.key -out dashboard.crt
#创建kubernetes-dashboard-certs对象
kubectl create secret generic kubernetes-dashboard-certs --from-file=dashboard.key --from-file=dashboard.crt -n kubernetes-dashboard
cd ../
kubectl apply -f dashboard.yaml
kubectl get pods -n kubernetes-dashboard
kubectl create serviceaccount dashboard-admin -n kubernetes-dashboard
kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:dashboard-admin
# 获取登录token
kubectl describe secrets -n kubernetes-dashboard $(kubectl -n kubernetes-dashboard get secret | awk '/dashboard-admin/{print $1}')
(完)
wget https://github.com/kubernetes-sigs/metrics-server/archive/v0.3.6.tar.gz
tar -zxvf v0.3.6.tar.gz
cd metrics-server-0.3.6/deploy/1.8+/
vi metrics-server-deployment.yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: metrics-server
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: metrics-server
namespace: kube-system
labels:
k8s-app: metrics-server
spec:
selector:
matchLabels:
k8s-app: metrics-server
template:
metadata:
name: metrics-server
labels:
k8s-app: metrics-server
spec:
serviceAccountName: metrics-server
volumes:
# mount in tmp so we can safely use from-scratch images and/or read-only containers
- name: tmp-dir
emptyDir: {}
containers:
- name: metrics-server
image: k8s.gcr.io/metrics-server-amd64:v0.3.6
imagePullPolicy: Always
volumeMounts:
- name: tmp-dir
mountPath: /tmp
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: metrics-server
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: metrics-server
namespace: kube-system
labels:
k8s-app: metrics-server
spec:
selector:
matchLabels:
k8s-app: metrics-server
template:
metadata:
name: metrics-server
labels:
k8s-app: metrics-server
spec:
serviceAccountName: metrics-server
volumes:
# mount in tmp so we can safely use from-scratch images and/or read-only containers
- name: tmp-dir
emptyDir: {}
containers:
- name: metrics-server
# 修改image 和 imagePullPolicy
image: mirrorgooglecontainers/metrics-server-amd64:v0.3.6
imagePullPolicy: IfNotPresent
# 新增command配置
command:
- /metrics-server
- --kubelet-insecure-tls
- --kubelet-preferred-address-types=InternalDNS,InternalIP,ExternalDNS,ExternalIP,Hostname
volumeMounts:
- name: tmp-dir
mountPath: /tmp
# 新增resources配置
resources:
limits:
cpu: 300m
memory: 200Mi
requests:
cpu: 200m
memory: 100Mi
kubectl apply -f metrics-server-0.3.6/deploy/1.8+/
[root@binghe101 ~]# kubectl top node
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
binghe101 141m 7% 1113Mi 65%
binghe102 62m 3% 549Mi 32%
binghe103 100m 5% 832Mi 48%
kubectl top pod --all-namespaces
error: metrics not available yet
dashboard.yaml
kube-flannel.yaml
calico-3.13.1.yaml
v0.3.6.tar.gz
# 查看pod
kubectl get pod -n kube-system
# 遇到异常状态0/1的pod长时间启动不了可删除它等待集群创建新的pod资源
kubectl delete pod <pod name> -n kube-system
# 查看某个节点失败原因
kubectl describe pod <pod name> -n kube-system
kubeadm reset
# 查看master的kubeadm join命令
kubeadm token create --print-join-command
# 查看是哪个从节点,在从节点执行
sudo yum install net-tools
ifconfig cni0 down
ip link delete cni0
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。