二进制模式部署k8s-1.21版-域名形式
说明:系统参数
主机名称 | IP地址 | 部署节点 | 部署组件 |
---|---|---|---|
m1 | 192.168.11.187 | k8s1: masterk8s2: master | k8s1:etcd、apiserver、controller-manager、schedulerk8s2:etcd、apiserver、controller-manager、scheduler |
n1 | 192.168.11.188 | k8s1: node | k8s1:docker、kubelet、proxy |
n2 | 192.168.11.189 | k8s2: node | k8s2:docker、kubelet、proxy |
k8s版本:v1.21.10 启动二进制形式
etcd版本:3.4.17 启动二进制形式
docker版本:19.03.9 启动二进制形式
calico版本:3.20.1 pod形式启动
1.以域名的形式部署,以域名的形式签发证书
2.部署两套k8s,m1和n1是一套k8s,m1和n2是一套k8s
3.在此基础上测试两个集群service-cluster-ip都用11.254.0.是否会引起冲突
一、操作系统初始化
1.修改系统参数
1.1 关闭防火墙
systemctl stop firewalld systemctl disable firewalld
1.2 关闭selinux
sed -i 's/enforcing/disabled/' /etc/selinux/config # 永久 setenforce 0 # 临时
1.3 关闭swap
swapoff -a # 临时 sed -ri 's/.*swap.*/#&/' /etc/fstab # 永久
1.4.关闭NetworkManager
# systemctl disable NetworkManager
1.5 主机名按规划设置
hostnamectl set-hostname <hostname> # 查看主机名 hostname
1.6 设置 hosts
# m1 cat >> /etc/hosts << EOF 192.168.11.187 m1.etcd 192.168.11.187 m1.apiserver EOF # n1 cat >> /etc/hosts << EOF 192.168.11.187 m1.etcd # calico-node连接etcd 192.168.11.187 m1.apiserver # kubelet和kube-proxy连接apiserver EOF # n2 cat >> /etc/hosts << EOF 192.168.11.187 m1.etcd 192.168.11.187 m1.apiserver EOF
1.7 同步配置时间
yum install -y chrony vim /etc/chrony.conf server time1.aliyun.com iburst
启动并同步时间
systemctl enable chronyd && systemctl restart chronyd && sleep 5s && chronyc sources
1.8.自动补充配置命令
#安装自动补充软件 yum install -y bash-completion vim ~/.bashrc #添加以下内容 export PATH=/usr/local/bin:$PATH source <(kubectl completion bash) #加载环境变量 source ~/.bashrc
2. 调整核心参数
2.1 加载 overlay、br_netfilter 模块
modprobe overlay modprobe br_netfilter
2.2 设置核心参数
cat << EOF > /etc/sysctl.d/k8s.conf net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 net.ipv4.ip_forward = 1 user.max_user_namespaces=28633 EOF
执行以下命令使配置生效:
sysctl --system # 生效
2.3 开启ipvs
cat > /etc/sysconfig/modules/ipvs.modules <<EOF #!/bin/bash modprobe -- ip_vs modprobe -- ip_vs_rr modprobe -- ip_vs_wrr modprobe -- ip_vs_sh modprobe -- nf_conntrack_ipv4 EOF
加载ipvs模块
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
二、二进制文件
# 需要提前下载 cfss cfssl-certinfo cfssljson
etcd
docker
k8s
三、构造各种证书
说明:先在m1上创建好k8s1和k8s2的证书,然后把k8s1的证书发送到n1上,把k8s的证书发送到n2上
创建文件夹
mkdir -p /root/k8s1/{certs,cfg,manifests,etcdCerts}
mkdir -p /root/k8s2/{certs,cfg,manifests,etcdCerts}
ETCD-ca证书
cd /root/k8s1
cd /root/k8s2
cat <<EOF > ./etcdCerts/ca-config.json
{
"signing":{
"default":{
"expiry":"87600h"
},
"profiles":{
"etcd":{
"expiry":"87600h",
"usages":[
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
cat<<EOF > ./etcdCerts/ca-csr.json
{
"CN":"etcd",
"key":{
"algo":"rsa",
"size":2048
},
"names":[
{
"C":"CN",
"L":"BeiJing",
"ST":"BeiJing",
"O":"k8s",
"OU":"System"
}
]
}
EOF
cfssl gencert -initca ./etcdCerts/ca-csr.json | cfssljson -bare ./etcdCerts/ca
K8s-ca证书
cd /root/k8s1
cd /root/k8s2
cat<<EOF > ./certs/ca-config.json
{
"signing":{
"default":{
"expiry":"87600h"
},
"profiles":{
"kubernetes":{
"expiry":"87600h",
"usages":[
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
cat<<EOF > ./certs/ca-csr.json
{
"CN":"kubernetes",
"key":{
"algo":"rsa",
"size":2048
},
"names":[
{
"C":"CN",
"L":"BeiJing",
"ST":"BeiJing",
"O":"k8s",
"OU":"System"
}
]
}
EOF
cfssl gencert -initca ./certs/ca-csr.json | cfssljson -bare ./certs/ca
etcd 证书
cd /root/k8s1
cd /root/k8s2
cat<<EOF >./etcdCerts/etcd-csr.json
{
"CN":"etcd",
"hosts":[
"127.0.0.1",
"*.etcd"
],
"key":{
"algo":"rsa",
"size":2048
},
"names":[
{
"C":"CN",
"L":"BeiJing",
"ST":"BeiJing",
"O":"k8s",
"OU":"System"
}
]
}
EOF
cfssl gencert -ca=./etcdCerts/ca.pem -ca-key=./etcdCerts/ca-key.pem -config=./etcdCerts/ca-config.json -profile=etcd ./etcdCerts/etcd-csr.json | cfssljson -bare ./etcdCerts/etcd
cp ./etcdCerts/etcd-csr.json ./etcdCerts/peer-etcd-csr.json
cfssl gencert -ca=./etcdCerts/ca.pem -ca-key=./etcdCerts/ca-key.pem -config=./etcdCerts/ca-config.json -profile=etcd ./etcdCerts/peer-etcd-csr.json | cfssljson -bare ./etcdCerts/peer-etcd
APIServer证书
cd /root/k8s1
cd /root/k8s2
cat<<EOF > ./certs/apiserver-csr.json
{
"CN":"kubernetes",
"hosts":[
"127.0.0.1",
"*.apiserver",
"11.254.0.1",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key":{
"algo":"rsa",
"size":2048
},
"names":[
{
"C":"CN",
"L":"BeiJing",
"ST":"BeiJing",
"O":"k8s",
"OU":"System"
}
]
}
EOF
cfssl gencert -ca=./certs/ca.pem -ca-key=./certs/ca-key.pem -config=./certs/ca-config.json -profile=kubernetes ./certs/apiserver-csr.json | cfssljson -bare ./certs/apiserver
Admin证书(用于kubectl)
#kubectl证书放在这,由于kubectl相当于系统管理员,我们使用admin命名
#准备admin证书配置 - kubectl只需客户端证书,因此证书请求中 hosts 字段可以为空
cd /root/k8s1
cd /root/k8s2
cat<<EOF > ./certs/admin-csr.json { "CN":"admin", "hosts":[], "key":{ "algo":"rsa", "size":2048 }, "names":[ { "C":"CN", "L":"BeiJing", "ST":"BeiJing", "O":"system:masters", "OU":"System" } ] } EOF
#使用根证书(ca.pem)签发admin证书
cfssl gencert -ca=./certs/ca.pem -ca-key=./certs/ca-key.pem -config=./certs/ca-config.json -profile=kubernetes ./certs/admin-csr.json | cfssljson -bare ./certs/admin
apiserver访问kubelet的证书
cd /root/k8s1
cd /root/k8s2
cat<<EOF > ./certs/apiserver-kubelet-client-csr.json
{
"CN":"kube-apiserver-kubelet-client",
"hosts":[],
"key":{
"algo":"rsa",
"size":2048
},
"names":[
{
"C":"CN",
"L":"BeiJing",
"ST":"BeiJing",
"O":"system:masters",
"OU":"System"
}
]
}
EOF
cfssl gencert -ca=./certs/ca.pem -ca-key=./certs/ca-key.pem -config=./certs/ca-config.json -profile=kubernetes ./certs/apiserver-kubelet-client-csr.json | cfssljson -bare ./certs/apiserver-kubelet-client
kube-proxy证书
cd /root/k8s1
cd /root/k8s2
cat<<EOF > ./certs/kube-proxy-csr.json
{
"CN":"system:kube-proxy",
"hosts":[],
"key":{
"algo":"rsa",
"size":2048
},
"names":[
{
"C":"CN",
"L":"BeiJing",
"ST":"BeiJing",
"O":"k8s",
"OU":"System"
}
]
}
EOF
cfssl gencert -ca=./certs/ca.pem -ca-key=./certs/ca-key.pem -config=./certs/ca-config.json -profile=kubernetes ./certs/kube-proxy-csr.json | cfssljson -bare ./certs/kube-proxy
proxy-client ca证书
cd /root/k8s1
cd /root/k8s2
cat<<EOF > ./certs/proxy-client-ca-csr.json
{
"CN":"front-proxy",
"key":{
"algo":"rsa",
"size":2048
},
"names":[
{
"C":"CN",
"L":"BeiJing",
"ST":"BeiJing",
"O":"k8s",
"OU":"System"
}
]
}
EOF
cfssl gencert -initca ./certs/proxy-client-ca-csr.json | cfssljson -bare ./certs/proxy-client-ca -
proxy-client证书
cd /root/k8s1
cd /root/k8s2
cat<<EOF > ./certs/proxy-client-csr.json
{
"CN":"front-proxy",
"hosts":[],
"key":{
"algo":"rsa",
"size":2048
},
"names":[
{
"C":"CN",
"L":"BeiJing",
"ST":"BeiJing",
"O":"k8s",
"OU":"System"
}
]
}
EOF
cfssl gencert -ca=./certs/proxy-client-ca.pem -ca-key=./certs/proxy-client-ca-key.pem -config=./certs/ca-config.json -profile=kubernetes ./certs/proxy-client-csr.json | cfssljson -bare ./certs/proxy-client
kube-controller-manager证书
cd /root/k8s1
cd /root/k8s2
cat<<EOF > ./certs/kube-controller-manager-csr.json
{
"CN":"system:kube-controller-manager",
"hosts":[
"127.0.0.1",
"*.controller",
"11.254.0.1"
],
"key":{
"algo":"rsa",
"size":2048
},
"names":[
{
"C":"CN",
"L":"BeiJing",
"ST":"BeiJing",
"O":"k8s",
"OU":"System"
}
]
}
EOF
cfssl gencert -ca=./certs/ca.pem -ca-key=./certs/ca-key.pem -config=./certs/ca-config.json -profile=kubernetes ./certs/kube-controller-manager-csr.json | cfssljson -bare ./certs/kube-controller-manager
kube-scheduler 证书
cd /root/k8s1
cd /root/k8s2
cat<<EOF > ./certs/kube-scheduler-csr.json
{
"CN":"system:kube-scheduler",
"hosts":[
"127.0.0.1",
"*.scheduler ",
"11.254.0.1"
],
"key":{
"algo":"rsa",
"size":2048
},
"names":[
{
"C":"CN",
"L":"BeiJing",
"ST":"BeiJing",
"O":"k8s",
"OU":"System"
}
]
}
EOF
cfssl gencert -ca=./certs/ca.pem -ca-key=./certs/ca-key.pem -config=./certs/ca-config.json -profile=kubernetes ./certs/kube-scheduler-csr.json | cfssljson -bare ./certs/kube-scheduler
sa证书和pub
cd /root/k8s1
cd /root/k8s2
cat<<EOF > ./certs/sa-csr.json
{
"CN":"sa",
"key":{
"algo":"rsa",
"size":2048
},
"names":[
{
"C":"CN",
"L":"BeiJing",
"ST":"BeiJing",
"O":"k8s",
"OU":"System"
}
]
}
EOF
cfssl gencert -initca ./certs/sa-csr.json | cfssljson -bare ./certs/sa -
openssl x509 -in ./certs/sa.pem -pubkey -noout > ./certs/sa.pub
复制证书到其他节点
#分发证书 其他节点
scp -r /root/k8s1/certs root@192.168.11.188:/root/k8s1/
scp -r /root/k8s1/etcdCerts root@192.168.11.188:/root/k8s1/
scp -r /root/k8s2/certs root@192.168.11.189:/root/k8s2/
scp -r /root/k8s2/etcdCerts root@192.168.11.189:/root/k8s2/
四、安装ETCD(m1节点的k8s1)
cat<<EOF > /usr/lib/systemd/system/etcd1.service [Unit] Description=Kubernetes etcd [Service] EnvironmentFile=/root/k8s1/cfg/etcd ExecStart=/usr/local/bin/etcd \$ETCD_OPTS Type=notify Restart=on-failure StartLimitInterval=0 RestartSec=10 KillMode=process [Install] WantedBy=multi-user.target EOF
# 多个节点要设置 initial-cluster 为全部的节点列表 其他节点要修改名字和地址
cat<<EOF > /root/k8s1/cfg/etcd ETCD_OPTS="--name=etcd-1 \\ --listen-peer-urls=https://192.168.11.187:2380 \\ --initial-advertise-peer-urls=https://m1.etcd:2380 \\ --listen-client-urls=https://192.168.11.187:2379 \\ --advertise-client-urls=https://192.168.11.187:2379 \\ --initial-cluster=etcd-1=https://m1.etcd:2380 \\ --cert-file=/root/k8s1/etcdCerts/etcd.pem \\ --key-file=/root/k8s1/etcdCerts/etcd-key.pem \\ --peer-cert-file=/root/k8s1/etcdCerts/peer-etcd.pem \\ --peer-key-file=/root/k8s1/etcdCerts/peer-etcd-key.pem \\ --trusted-ca-file=/root/k8s1/etcdCerts/ca.pem \\ --peer-trusted-ca-file=/root/k8s1/etcdCerts/ca.pem \\ --initial-cluster-token=etcd-cluster \\ --initial-cluster-state=new \\ --data-dir=/var/lib/etcd1/data/data \\ --wal-dir=/var/lib/etcd1/wal/wal \\ --max-wals=5 \\ --max-request-bytes=1572864 \\ --snapshot-count=100000 \\ --heartbeat-interval=100 \\ --election-timeout=500 \\ --max-snapshots=5 \\ --quota-backend-bytes=8589934592 \\ --auto-compaction-retention=5m \\ --enable-pprof=false \\ --metrics=extensive \\ --log-level=info" EOF
systemctl stop etcd1 && systemctl daemon-reload && systemctl enable etcd1 && systemctl start etcd1 && systemctl status etcd1
# 常用命令
ETCDCTL_API=3 /usr/local/bin/etcdctl --cacert=/root/k8s1/etcdCerts/ca.pem --cert=/root/k8s1/etcdCerts/etcd.pem --key=/root/k8s1/etcdCerts/etcd-key.pem --endpoints="https://m1.etcd:2379" endpoint health --write-out=table
ETCDCTL_API=3 /usr/local/bin/etcdctl --cacert=/root/k8s1/etcdCerts/ca.pem --cert=/root/k8s1/etcdCerts/etcd.pem --key=/root/k8s1/etcdCerts/etcd-key.pem --endpoints="https://m1.etcd:2379" endpoint health --write-out=table member list -w table
ETCDCTL_API=3 /usr/local/bin/etcdctl --cacert=/root/k8s1/etcdCerts/ca.pem --cert=/root/k8s1/etcdCerts/etcd.pem --key=/root/k8s1/etcdCerts/etcd-key.pem --endpoints="https://m1.etcd:2379" endpoint health --write-out=table get / --prefix
# curl -k https://m1.etcd:2379/version --cacert /root/k8s1/etcdCerts/ca.pem --key /root/k8s1/etcdCerts/etcd-key.pem --cert /root/k8s1/etcdCerts/etcd.pem
{
"etcdserver":"3.4.17","etcdcluster":"3.4.0"}
lsof -i:2379
lsof -i:2380
netstat -nltp | grep 2379
netstat -nltp | grep 2380
# 测试写入和读取
ETCDCTL_API=3 /usr/local/bin/etcdctl --cacert=/root/k8s1/etcdCerts/ca.pem --cert=/root/k8s1/etcdCerts/etcd.pem --key=/root/k8s1/etcdCerts/etcd-key.pem --endpoints="https://m1.etcd:2379" put 11111 22222
ETCDCTL_API=3 /usr/local/bin/etcdctl --cacert=/root/k8s1/etcdCerts/ca.pem --cert=/root/k8s1/etcdCerts/etcd.pem --key=/root/k8s1/etcdCerts/etcd-key.pem --endpoints="https://m1.etcd:2379" get 11111
四、安装ETCD(m1节点的k8s2)
cat<<EOF > /usr/lib/systemd/system/etcd2.service [Unit] Description=Kubernetes etcd [Service] EnvironmentFile=/root/k8s2/cfg/etcd ExecStart=/usr/local/bin/etcd \$ETCD_OPTS Type=notify Restart=on-failure StartLimitInterval=0 RestartSec=10 KillMode=process [Install] WantedBy=multi-user.target EOF
# 多个节点要设置 initial-cluster 为全部的节点列表 其他节点要修改名字和地址
cat<<EOF > /root/k8s2/cfg/etcd ETCD_OPTS="--name=etcd-2 \\ --listen-peer-urls=https://192.168.11.187:23800 \\ --initial-advertise-peer-urls=https://m1.etcd:23800 \\ --listen-client-urls=https://192.168.11.187:23799 \\ --advertise-client-urls=https://192.168.11.187:23799 \\ --initial-cluster=etcd-2=https://m1.etcd:23800 \\ --cert-file=/root/k8s2/etcdCerts/etcd.pem \\ --key-file=/root/k8s2/etcdCerts/etcd-key.pem \\ --peer-cert-file=/root/k8s2/etcdCerts/peer-etcd.pem \\ --peer-key-file=/root/k8s2/etcdCerts/peer-etcd-key.pem \\ --trusted-ca-file=/root/k8s2/etcdCerts/ca.pem \\ --peer-trusted-ca-file=/root/k8s2/etcdCerts/ca.pem \\ --initial-cluster-token=etcd-cluster \\ --initial-cluster-state=new \\ --data-dir=/var/lib/etcd2/data/data \\ --wal-dir=/var/lib/etcd2/wal/wal \\ --max-wals=5 \\ --max-request-bytes=1572864 \\ --snapshot-count=100000 \\ --heartbeat-interval=100 \\ --election-timeout=500 \\ --max-snapshots=5 \\ --quota-backend-bytes=8589934592 \\ --auto-compaction-retention=5m \\ --enable-pprof=false \\ --metrics=extensive \\ --log-level=info" EOF
systemctl stop etcd2 && systemctl daemon-reload && systemctl enable etcd2 && systemctl start etcd2 && systemctl status etcd2
# 常用命令
ETCDCTL_API=3 /usr/local/bin/etcdctl --cacert=/root/k8s2/etcdCerts/ca.pem --cert=/root/k8s2/etcdCerts/etcd.pem --key=/root/k8s2/etcdCerts/etcd-key.pem --endpoints="https://m1.etcd:23799" endpoint health --write-out=table
五、安装Docker(n1和n2节点)
以下在所有节点操作。这里采用二进制安装,用 yum 安装也一样。
(1)下载二进制包
下载地址:https://download.docker.com/linux/static/stable/x86_64/docker-19.03.9.tgz
# 公网下载
wget https://download.docker.com/linux/static/stable/x86_64/docker-19.03.9.tgz
tar zxvf docker-19.03.9.tgz
mv docker/* /usr/bin
(2) systemd管理 docker
cat > /usr/lib/systemd/system/docker.service << EOF [Unit] Description=Docker Application Container Engine Documentation=https://docs.docker.com After=network-online.target firewalld.service Wants=network-online.target [Service] Type=notify ExecStart=/usr/bin/dockerd ExecReload=/bin/kill -s HUP $MAINPID LimitNOFILE=infinity LimitNPROC=infinity LimitCORE=infinity TimeoutStartSec=0 Delegate=yes KillMode=process Restart=on-failure StartLimitBurst=3 StartLimitInterval=60s [Install] WantedBy=multi-user.target EOF
(3)创建配置文件
mkdir /etc/docker
cat > /etc/docker/daemon.json << EOF { "registry-mirrors": ["https://b9pmyelo.mirror.aliyuncs.com"] } EOF
registry-mirrors 阿里云镜像加速器
(4)启动并设置开机启动
# 启动docker
systemctl stop docker
systemctl daemon-reload
systemctl enable docker
systemctl start docker
systemctl status docker
六、安装master
安装kube-apiserver(m1节点的k8s1)
# 安装kube-apiserver
APISERVER="https://m1.apiserver:6443"
# 给kubectl用
cat<<EOF > /root/k8s1/cfg/admin.kubeconfig apiVersion: v1 clusters: - cluster: certificate-authority: /root/k8s1/certs/ca.pem server: ${APISERVER} name: kubernetes contexts: - context: cluster: kubernetes user: system:admin name: system:admin current-context: system:admin kind: Config preferences: {} users: - name: system:admin user: client-certificate: /root/k8s1/certs/admin.pem client-key: /root/k8s1/certs/admin-key.pem EOF
cp /root/k8s1/cfg/admin.kubeconfig ~/.kube/config
cat<<EOF > /usr/lib/systemd/system/kube-apiserver1.service [Unit] Description=Kubernetes kube-apiserver [Service] EnvironmentFile=/root/k8s1/cfg/kube-apiserver ExecStart=/usr/local/bin/kube-apiserver \$KUBE_APISERVER_OPTS Restart=on-failure StartLimitInterval=0 RestartSec=10 KillMode=process [Install] WantedBy=multi-user.target EOF
cat<<EOF > /root/k8s1/cfg/kube-apiserver KUBE_APISERVER_OPTS="--advertise-address=192.168.11.187 \\ --anonymous-auth=false \\ --insecure-port=0 \\ --secure-port=6443 \\ --service-cluster-ip-range=11.254.0.0/16 \\ --kubelet-https=true \\ --tls-cert-file=/root/k8s1/certs/apiserver.pem \\ --tls-private-key-file=/root/k8s1/certs/apiserver-key.pem \\ --client-ca-file=/root/k8s1/certs/ca.pem \\ --kubelet-client-certificate=/root/k8s1/certs/apiserver-kubelet-client.pem \\ --kubelet-client-key=/root/k8s1/certs/apiserver-kubelet-client-key.pem \\ --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \\ --etcd-cafile=/root/k8s1/etcdCerts/ca.pem \\ --etcd-certfile=/root/k8s1/etcdCerts/etcd.pem \\ --etcd-keyfile=/root/k8s1/etcdCerts/etcd-key.pem \\ --etcd-servers=https://m1.etcd:2379 \\ --apiserver-count=1 \\ --logtostderr=true \\ --v=5 \\ --allow-privileged=true \\ --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,Priority \\ --authorization-mode=RBAC,Node \\ --enable-bootstrap-token-auth \\ --service-node-port-range=443-50000 \\ --requestheader-client-ca-file=/root/k8s1/certs/proxy-client-ca.pem \\ --requestheader-allowed-names=front-proxy \\ --requestheader-extra-headers-prefix=X-Remote-Extra- \\ --requestheader-group-headers=X-Remote-Group \\ --requestheader-username-headers=X-Remote-User \\ --proxy-client-cert-file=/root/k8s1/certs/proxy-client.pem \\ --proxy-client-key-file=/root/k8s1/certs/proxy-client-key.pem \\ --service-account-key-file=/root/k8s1/certs/sa.pub \\ --service-account-signing-key-file=/root/k8s1/certs/sa-key.pem \\ --service-account-issuer=api \\ --enable-aggregator-routing=true" EOF
systemctl stop kube-apiserver1 && systemctl daemon-reload && systemctl enable kube-apiserver1 && systemctl start kube-apiserver1 && systemctl status kube-apiserver1
安装kube-apiserver(m1节点的k8s2)
# 安装kube-apiserver
APISERVER="https://m1.apiserver:16443"
# 给kubectl用
cat<<EOF > /root/k8s2/cfg/admin.kubeconfig apiVersion: v1 clusters: - cluster: certificate-authority: /root/k8s2/certs/ca.pem server: ${APISERVER} name: kubernetes contexts: - context: cluster: kubernetes user: system:admin name: system:admin current-context: system:admin kind: Config preferences: {} users: - name: system:admin user: client-certificate: /root/k8s2/certs/admin.pem client-key: /root/k8s2/certs/admin-key.pem EOF
cat<<EOF > /usr/lib/systemd/system/kube-apiserver2.service [Unit] Description=Kubernetes kube-apiserver [Service] EnvironmentFile=/root/k8s2/cfg/kube-apiserver ExecStart=/usr/local/bin/kube-apiserver \$KUBE_APISERVER_OPTS Restart=on-failure StartLimitInterval=0 RestartSec=10 KillMode=process [Install] WantedBy=multi-user.target EOF
cat<<EOF > /root/k8s2/cfg/kube-apiserver KUBE_APISERVER_OPTS="--advertise-address=192.168.11.187 \\ --anonymous-auth=false \\ --insecure-port=0 \\ --secure-port=16443 \\ --service-cluster-ip-range=11.254.0.0/16 \\ --kubelet-https=true \\ --tls-cert-file=/root/k8s2/certs/apiserver.pem \\ --tls-private-key-file=/root/k8s2/certs/apiserver-key.pem \\ --client-ca-file=/root/k8s2/certs/ca.pem \\ --kubelet-client-certificate=/root/k8s2/certs/apiserver-kubelet-client.pem \\ --kubelet-client-key=/root/k8s2/certs/apiserver-kubelet-client-key.pem \\ --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \\ --etcd-cafile=/root/k8s2/etcdCerts/ca.pem \\ --etcd-certfile=/root/k8s2/etcdCerts/etcd.pem \\ --etcd-keyfile=/root/k8s2/etcdCerts/etcd-key.pem \\ --etcd-servers=https://m1.etcd:23799 \\ --apiserver-count=1 \\ --logtostderr=true \\ --v=5 \\ --allow-privileged=true \\ --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,Priority \\ --authorization-mode=RBAC,Node \\ --enable-bootstrap-token-auth \\ --service-node-port-range=443-50000 \\ --requestheader-client-ca-file=/root/k8s2/certs/proxy-client-ca.pem \\ --requestheader-allowed-names=front-proxy \\ --requestheader-extra-headers-prefix=X-Remote-Extra- \\ --requestheader-group-headers=X-Remote-Group \\ --requestheader-username-headers=X-Remote-User \\ --proxy-client-cert-file=/root/k8s2/certs/proxy-client.pem \\ --proxy-client-key-file=/root/k8s2/certs/proxy-client-key.pem \\ --service-account-key-file=/root/k8s2/certs/sa.pub \\ --service-account-signing-key-file=/root/k8s2/certs/sa-key.pem \\ --service-account-issuer=api \\ --enable-aggregator-routing=true" EOF
systemctl stop kube-apiserver2 && systemctl daemon-reload && systemctl enable kube-apiserver2 && systemctl start kube-apiserver2 && systemctl status kube-apiserver2
安装kube-controller-manager(m1节点的k8s1)
APISERVER="https://m1.apiserver:6443"
cat<<EOF > /root/k8s1/cfg/kube-controller-manager.kubeconfig apiVersion: v1 clusters: - cluster: certificate-authority: /root/k8s1/certs/ca.pem server: ${APISERVER} name: kubernetes contexts: - context: cluster: kubernetes user: system:kube-controller-manager name: system:kube-controller-manager current-context: system:kube-controller-manager kind: Config preferences: {} users: - name: system:kube-controller-manager user: client-certificate: /root/k8s1/certs/kube-controller-manager.pem client-key: /root/k8s1/certs/kube-controller-manager-key.pem EOF
cat<<EOF > /usr/lib/systemd/system/kube-controller-manager1.service [Unit] Description=Kubernetes kube-controller-manager [Service] EnvironmentFile=/root/k8s1/cfg/kube-controller-manager ExecStart=/usr/local/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS Restart=on-failure StartLimitInterval=0 RestartSec=10 KillMode=process [Install] WantedBy=multi-user.target EOF
cat<<EOF > /root/k8s1/cfg/kube-controller-manager KUBE_CONTROLLER_MANAGER_OPTS="--master=${APISERVER} \\ --port=10252 \\ --secure-port=10257 \\ --controllers=*,bootstrapsigner,tokencleaner \\ --use-service-account-credentials=true \\ --kubeconfig=/root/k8s1/cfg/kube-controller-manager.kubeconfig \\ --authentication-kubeconfig=/root/k8s1/cfg/kube-controller-manager.kubeconfig \\ --authorization-kubeconfig=/root/k8s1/cfg/kube-controller-manager.kubeconfig \\ --allocate-node-cidrs \\ --service-cluster-ip-range=11.254.0.0/16 \\ --cluster-cidr=172.248.0.0/16 \\ --cluster-signing-cert-file=/root/k8s1/certs/ca.pem \\ --cluster-signing-key-file=/root/k8s1/certs/ca-key.pem \\ --root-ca-file=/root/k8s1/certs/ca.pem \\ --requestheader-client-ca-file=/root/k8s1/certs/proxy-client-ca.pem \\ --service-account-private-key-file=/root/k8s1/certs/sa-key.pem \\ --logtostderr=true \\ --v=4 \\ --leader-elect=true \\ --address=127.0.0.1 \\ --cluster-name=kubernetes" EOF
systemctl stop kube-controller-manager1 && systemctl daemon-reload && systemctl enable kube-controller-manager1 && systemctl start kube-controller-manager1 && systemctl status kube-controller-manager1
journalctl -xefu kube-controller-manager
安装kube-controller-manager(m1节点的k8s2)
APISERVER="https://m1.apiserver:16443"
cat<<EOF > /root/k8s2/cfg/kube-controller-manager.kubeconfig apiVersion: v1 clusters: - cluster: certificate-authority: /root/k8s2/certs/ca.pem server: ${APISERVER} name: kubernetes contexts: - context: cluster: kubernetes user: system:kube-controller-manager name: system:kube-controller-manager current-context: system:kube-controller-manager kind: Config preferences: {} users: - name: system:kube-controller-manager user: client-certificate: /root/k8s2/certs/kube-controller-manager.pem client-key: /root/k8s2/certs/kube-controller-manager-key.pem EOF
cat<<EOF > /usr/lib/systemd/system/kube-controller-manager2.service [Unit] Description=Kubernetes kube-controller-manager [Service] EnvironmentFile=/root/k8s2/cfg/kube-controller-manager ExecStart=/usr/local/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS Restart=on-failure StartLimitInterval=0 RestartSec=10 KillMode=process [Install] WantedBy=multi-user.target EOF
cat<<EOF > /root/k8s2/cfg/kube-controller-manager KUBE_CONTROLLER_MANAGER_OPTS="--master=${APISERVER} \\ --port=11252 \\ --secure-port=11257 \\ --controllers=*,bootstrapsigner,tokencleaner \\ --use-service-account-credentials=true \\ --kubeconfig=/root/k8s2/cfg/kube-controller-manager.kubeconfig \\ --authentication-kubeconfig=/root/k8s2/cfg/kube-controller-manager.kubeconfig \\ --authorization-kubeconfig=/root/k8s2/cfg/kube-controller-manager.kubeconfig \\ --allocate-node-cidrs \\ --service-cluster-ip-range=11.254.0.0/16 \\ --cluster-cidr=172.248.0.0/16 \\ --cluster-signing-cert-file=/root/k8s2/certs/ca.pem \\ --cluster-signing-key-file=/root/k8s2/certs/ca-key.pem \\ --root-ca-file=/root/k8s2/certs/ca.pem \\ --requestheader-client-ca-file=/root/k8s2/certs/proxy-client-ca.pem \\ --service-account-private-key-file=/root/k8s2/certs/sa-key.pem \\ --logtostderr=true \\ --v=4 \\ --leader-elect=true \\ --address=127.0.0.1 \\ --cluster-name=kubernetes" EOF
systemctl stop kube-controller-manager2 && systemctl daemon-reload && systemctl enable kube-controller-manager2 && systemctl start kube-controller-manager2 && systemctl status kube-controller-manager2
journalctl -xefu kube-controller-manager
安装kube-scheduler(m1节点的k8s1)
APISERVER="https://m1.apiserver:6443"
cat<<EOF > /root/k8s1/cfg/kube-scheduler.kubeconfig apiVersion: v1 clusters: - cluster: certificate-authority: /root/k8s1/certs/ca.pem server: ${APISERVER} name: kubernetes contexts: - context: cluster: kubernetes user: system:kube-scheduler name: system:kube-scheduler current-context: system:kube-scheduler kind: Config preferences: {} users: - name: system:kube-scheduler user: client-certificate: /root/k8s1/certs/kube-scheduler.pem client-key: /root/k8s1/certs/kube-scheduler-key.pem EOF
cat<<EOF > /usr/lib/systemd/system/kube-scheduler1.service [Unit] Description=Kubernetes kube-scheduler [Service] EnvironmentFile=/root/k8s1/cfg/kube-scheduler ExecStart=/usr/local/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS Restart=on-failure StartLimitInterval=0 RestartSec=10 KillMode=process [Install] WantedBy=multi-user.target EOF
cat<<EOF > /root/k8s1/cfg/kube-scheduler KUBE_SCHEDULER_OPTS="--master=${APISERVER} \\ --port=10251 \\ --secure-port=10259 \\ --kubeconfig=/root/k8s1/cfg/kube-scheduler.kubeconfig \\ --authentication-kubeconfig=/root/k8s1/cfg/kube-scheduler.kubeconfig \\ --authorization-kubeconfig=/root/k8s1/cfg/kube-scheduler.kubeconfig \\ --logtostderr=true \\ --v=4 \\ --leader-elect" EOF
systemctl stop kube-scheduler1 && systemctl daemon-reload && systemctl enable kube-scheduler1 && systemctl start kube-scheduler1 && systemctl status kube-scheduler1
安装kube-scheduler(m1节点的k8s2)
APISERVER="https://m1.apiserver:16443"
cat<<EOF > /root/k8s2/cfg/kube-scheduler.kubeconfig apiVersion: v1 clusters: - cluster: certificate-authority: /root/k8s2/certs/ca.pem server: ${APISERVER} name: kubernetes contexts: - context: cluster: kubernetes user: system:kube-scheduler name: system:kube-scheduler current-context: system:kube-scheduler kind: Config preferences: {} users: - name: system:kube-scheduler user: client-certificate: /root/k8s2/certs/kube-scheduler.pem client-key: /root/k8s2/certs/kube-scheduler-key.pem EOF
cat<<EOF > /usr/lib/systemd/system/kube-scheduler2.service [Unit] Description=Kubernetes kube-scheduler [Service] EnvironmentFile=/root/k8s2/cfg/kube-scheduler ExecStart=/usr/local/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS Restart=on-failure StartLimitInterval=0 RestartSec=10 KillMode=process [Install] WantedBy=multi-user.target EOF
cat<<EOF > /root/k8s2/cfg/kube-scheduler KUBE_SCHEDULER_OPTS="--master=${APISERVER} \\ --port=11251 \\ --secure-port=11259 \\ --kubeconfig=/root/k8s2/cfg/kube-scheduler.kubeconfig \\ --authentication-kubeconfig=/root/k8s2/cfg/kube-scheduler.kubeconfig \\ --authorization-kubeconfig=/root/k8s2/cfg/kube-scheduler.kubeconfig \\ --logtostderr=true \\ --v=4 \\ --leader-elect" EOF
systemctl stop kube-scheduler2 && systemctl daemon-reload && systemctl enable kube-scheduler2 && systemctl start kube-scheduler2 && systemctl status kube-scheduler2
为安装node做准备(m1节点的k8s1和k8s2)
创建bootstrap token
cd /root/k8s1
cd /root/k8s2
cat<<EOF > ./cfg/secret
apiVersion: v1
kind: Secret
metadata:
name: bootstrap-token-8edc64
namespace: kube-system
type: bootstrap.kubernetes.io/token
stringData:
description: "The default bootstrap token"
token-id: 8edc64
token-secret: cfc6c83dc955d6s3
usage-bootstrap-authentication: "true"
usage-bootstrap-signing: "true"
auth-extra-groups: system:bootstrappers:worker,system:bootstrappers:ingress
EOF
配置rbac
# 参考说明:https://kubernetes.io/zh/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/
### 现在启动引导节点被身份认证为 system:bootstrapping 组的成员,它需要被 授权 创建证书签名请求(CSR)并在证书被签名之后将其取回。 幸运的是,Kubernetes 提供了一个 ClusterRole,其中精确地封装了这些许可, system:node-bootstrapper。 为了实现这一点,你只需要创建 ClusterRoleBinding,将 system:bootstrappers 组绑定到集群角色 system:node-bootstrapper。
# 允许启动引导节点创建 CSR
cd /root/k8s1
cd /root/k8s2
cat<<EOF > ./cfg/rb.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: create-csrs-for-bootstrapping subjects: - kind: Group name: system:bootstrappers apiGroup: rbac.authorization.k8s.io roleRef: kind: ClusterRole name: system:node-bootstrapper apiGroup: rbac.authorization.k8s.io EOF
--------------------------------------------------------------------------------
# 批复 "system:bootstrappers" 组的所有 CSR
cat<<EOF > ./cfg/approve.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: auto-approve-csrs-for-group subjects: - kind: Group name: system:bootstrappers apiGroup: rbac.authorization.k8s.io roleRef: kind: ClusterRole name: system:certificates.k8s.io:certificatesigningrequests:nodeclient apiGroup: rbac.authorization.k8s.io EOF
# 批复 "system:nodes" 组的 CSR 续约请求
cat <<EOF > ./cfg/approve-node.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: auto-approve-renewals-for-nodes subjects: - kind: Group name: system:nodes apiGroup: rbac.authorization.k8s.io roleRef: kind: ClusterRole name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient apiGroup: rbac.authorization.k8s.io EOF
kubectl apply -f /root/k8s1/cfg/secret
kubectl apply -f /root/k8s1/cfg/rb.yaml
kubectl apply -f /root/k8s1/cfg/approve.yaml
kubectl apply -f /root/k8s1/cfg/approve-node.yaml
kubectl --kubeconfig /root/k8s2/cfg/admin.kubeconfig apply -f /root/k8s2/cfg/secret
kubectl --kubeconfig /root/k8s2/cfg/admin.kubeconfig apply -f /root/k8s2/cfg/rb.yaml
kubectl --kubeconfig /root/k8s2/cfg/admin.kubeconfig apply -f /root/k8s2/cfg/approve.yaml
kubectl --kubeconfig /root/k8s2/cfg/admin.kubeconfig apply -f /root/k8s2/cfg/approve-node.yaml
七、当前集群状态
# 三台主机显示的是同样的内容
# ipvsadm没有记录
[root@m1 certs]# ipvsadm
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
[root@n1 certs]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
# 访问11.254.0.1:443也访问不到
[root@m1 certs]# curl https://11.254.0.1:443 --cacert ca.pem --cert apiserver.pem --key apiserver-key.pem
curl: (7) Failed connect to 11.254.0.1:443; 拒绝连接
[root@n1 certs]# curl https://11.254.0.1:443 --cacert ca.pem --cert apiserver.pem --key apiserver-key.pem
curl: (7) Failed connect to 11.254.0.1:443; 拒绝连接
八、安装node
安装kubelet(n1节点的k8s1)
cat<<EOF > /usr/lib/systemd/system/kubelet.service [Unit] Description=Kubernetes Kubelet After=docker.service Requires=docker.service [Service] EnvironmentFile=/root/k8s1/cfg/kubelet ExecStart=/usr/local/bin/kubelet \$KUBELET_OPTS Restart=on-failure StartLimitInterval=0 RestartSec=10 KillMode=process [Install] WantedBy=multi-user.target EOF cat<<EOF > /root/k8s1/cfg/kubelet KUBELET_OPTS="--hostname-override=n1 \\ --bootstrap-kubeconfig=/root/k8s1/cfg/bootstrap.kubeconfig \\ --kubeconfig=/root/k8s1/cfg/kubelet.kubeconfig \\ --cert-dir=/root/k8s1/certs \\ --network-plugin=cni \\ --config=/root/k8s1/cfg/kubelet.config \\ --fail-swap-on=false \\ --container-runtime=docker \\ --runtime-request-timeout=15m \\ --rotate-certificates 标签:
tlh0400位移传感器6003rb网承3m连接器