1.安装k8s依赖环境
2.安装docker
3.安装k8s
1.安装k8s依赖环境
- #关闭swap
swapoff -a
- #注释掉swap行 /etc/fstab
- #查看分区swap,swap都是 关闭成功。
free -h
- # 关闭 Selinux
setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
- #重启,检查是否关闭
rebootgetenforce
- #升级内核
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm # 安装后检查 /boot/grub2/grub.cfg 中对应内核 menuentry 中是否包含 新内核 配置,如果没有,再安装一次! yum --enablerepo=elrepo-kernel install -y kernel-lt#grep menuentry /boot/grub2/grub.cfg
- # 设置启动从新内核启动 (/boot/grub2/grub.cfg 在文件中找到完整的核心版本"CentOS linux (4.4.182-1.el7.elrepo.x86_64) 7 (Core)")
grub2-set-default "CentOS linux (4.4.182-1.el7.elrepo.x86_64) 7 (Core)" #grub2-set-default 'CentOS Linux (5.4.197-1.el7.elrepo.x86_64) 7 (Core)' reboot uname -r
- #另一种针对4.x内核安装方法
wget http://mirrors.aliyun.com/elrepo/kernel/el7/x86_64/RPMS/kernel-lt-4.4.184-1.el7.elrepo.x86_64.rpm yum install kernel-lt-4.4.184-1.el7.elrepo.x86_64.rpm awk -F\' '$1=="menuentry " {print i " : " $2}' /etc/grub2.cfg#根据上面内核号,设置启动顺序。 grub2-set-default 0
- 加载内核转发模块k8s必要的网络。
yum install -y ipvsadm yum install -y conntrack echo 'modprobe br_netfilter' >>/etc/profile echo 'modprobe -- ip_vs' >>/etc/profile echo 'modprobe -- ip_vs_rr' >>/etc/profile echo 'modprobe -- ip_vs_wrr' >>/etc/profile echo 'modprobe -- ip_vs_sh' >>/etc/profile echo 'modprobe -- nf_conntrack' >>/etc/profile
#modprobe -- nf_conntrack_ipv4 #modprobe -- nf_conntrack 如果是内核5.2以上去掉了ipv4的后缀 #验证 reboot lsmod |grep -e ip_vs -e nf_conntrack
- #调整核参数进行调整k8s
#调整这里,先重启服务器,因为之前的参数没有生效,影响这里。cat > /etc/sysctl.d/k8s.conf << EOF net.bridge.bridge-nf-call-iptables=1 net.bridge.bridge-nf-call-ip6tables=1 net.ipv4.ip_forward=1 #内核4.x这个参数已经从上面删除 #net.ipv4.tcp_tw_recycle=0 vm.swappiness=0 vm.overcommit_memory=1 vm.panic_on_oom=0 fs.inotify.max_user_instances=8192 fs.inotify.max_user_watches=1048576 fs.file-max=52706963 fs.nr_open=52706963 net.ipv6.conf.all.disable_ipv6=1 EOF sysctl -p /etc/sysctl.d/k8s.conf
配置docker的下载yum源。注意这个不是docker镜像仓库!!!只是下载docker软件的地址。 #device-mapper-persistent-data lvm2 为docker依赖的包 yum install -y yum-utils device-mapper-persistent-data lvm2 #配置docker默认yum仓库:任选之一 yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo #yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo #yum-config-manager --add-repo https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/docker-ce.repo #禁用和启用yum仓库,如果要删除,直接删除文件。 # yum-config-manager --disable http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo # yum-config-manager --enable http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo #安装docker-ce yum install docker-ce -y
- 配置docker的日志信息,国内镜像仓库地址
cat > /etc/docker/daemon.json <<EOF { "exec-opts": ["native.cgroupdriver=systemd"], "log-driver": "json-file", "log-opts": { "max-size": "100m" }, "storage-driver": "overlay2", "storage-opts": [ "overlay2.override_kernel_check=true" ], "registry-mirrors": ["https://registry.docker-cn.com"] } EOF
- #启动
systemctl start docker systemctl enable docker
- #测试
如果打印一行Hello from Docker! 表示安装完成 docker run hello-world
- #删除所有容器信息都在这个目录
rm -rf /var/lib/docker
- docker日志:
/var/lib/docker/containers/<容器id>/<容器id>-json.log 里面
3.安装k8s
- #安装kubeadm,kubectl,kubelet即安装k8s平台的官方工具,安装kubectl,kubelet k8s 服务器端和客户端 管理插件,安装yum源,默认源没有安装包。
cat <<EOF > /etc/yum.repos.d/k8s.repo [k8s] name=Kubernetes baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64 enabled=1 gpgcheck=0 EOF yum clean all && yum makecache yum -y install kubeadm-1.15.1 kubectl-1.15.1 kubelet-1.15.1#开启开启启动(现在无法启动,还没初始化集群) systemctl enable kubelet
######################################################################################## ↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑ 以上操作,所有节点都要操作 ↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑
- 初始化主节点
#只在主节点操作 #推荐导出配置文件,然后指定配置文件完成初始化,这样可以保留初始化参数。 #默认有许多参数,只需要调整主节点ip,pod网络段,以及kubeproxy支持ipvs模式,版本信息即可。 #初始化时候,会读取hosts去识别主机名字,改了名字记得在这里解析,不然无法启动。 #虚拟机如果cpu只有1核要改2,不然也报错。kubeadm config print init-defaults > kubeadm-config.yaml
#初始化主节点,过程下载许多镜像,需要等待。kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs |tee kubeadm-init.log #初始化报错 #修改docker配置文件"exec-opts": ["native.cgroupdriver=systemd"] #Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf --cgroup-driver=systemd #重启两个软件。
#重置初始化 #echo y |kubeadm reset
- 下载网络插件
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml kubectl apply -f kube-flannel.yml
#主节点增加访问控制文件mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config
- 操作节点,加入集群
kubeadm join 192.168.18.101:6443 --token abcdef.0123456789abcdef \ --discovery-token-ca-cert-hash sha256:a3a8b8e1ee899c0df1f19887942d71c6412d9de56955e8aa43191ca49d10b961 #删除节点 #主节点操作kubectl delete node c102 #被删除的节点:rm -f /etc/kubernetes/kubelet.conf rm -f /etc/kubernetes/pki/ca.crt rm -f /etc/kubernetes/bootstrap-kubelet.conf reboot #从节点加入成功后,可能网络、proxy 容器无法启动,可以把主节点的pause proxy 镜像导入节点 #如果还有节点的容器没有启动成功,可以重新创建它,方法如下:kubectl get pod kube-flannel-ds-g85zs -n kube-system -o yaml >kube-flannel-ds-g85zs.yaml kubectl delete -f kube-flannel-ds-g85zs.yaml kubectl apply -f kube-flannel-ds-g85zs.yaml
- 修改集群插件通信证书时间,默认是一年,如果更新版本,会自动更新。kubeadm alpha certs renew all#检查所有证书到期时间。kubeadm alpha certs check-expiration
#如果在没网的环境,需要自己手动修改 检查证书时间: #重新更新所有证书,期限1年
#安装metrics监控模块
用于查看pod使用的服务器的使用率和Hpa自动扩容的基础组件
安装如下所有yaml文件使用命令:kubectl apply -f ./aggregated-metrics-reader.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:aggregated-metrics-reader
labels:
rbac.authorization.k8s.io/aggregate-to-view: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rules:
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
auth-delegator.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:aggregated-metrics-reader
labels:
rbac.authorization.k8s.io/aggregate-to-view: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rules:
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
[root@c101 1.8+]# cat auth-delegator.yaml
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
auth-reader.yaml
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
metrics-apiservice.yaml
apiVersion: apiregistration.k8s.io/v1beta1
kind: APIService
metadata:
name: v1beta1.metrics.k8s.io
spec:
service:
name: metrics-server
namespace: kube-system
group: metrics.k8s.io
version: v1beta1
insecureSkipTLSVerify: true
groupPriorityMinimum: 100
versionPriority: 100
metrics-server-deployment.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: metrics-server
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: metrics-server
namespace: kube-system
labels:
k8s-app: metrics-server
spec:
selector:
matchLabels:
k8s-app: metrics-server
template:
metadata:
name: metrics-server
labels:
k8s-app: metrics-server
spec:
serviceAccountName: metrics-server
volumes:
# mount in tmp so we can safely use from-scratch images and/or read-only containers
- name: tmp-dir
emptyDir: {}
containers:
- name: metrics-server
image: k8s.gcr.io/metrics-server-amd64:v0.3.6
imagePullPolicy: IfNotPresent
command:
- /metrics-server
- --kubelet-insecure-tls
- --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP
- --metric-resolution=30s
volumeMounts:
- name: tmp-dir
mountPath: /tmp
metrics-server-service.yaml
apiVersion: v1
kind: Service
metadata:
name: metrics-server
namespace: kube-system
labels:
kubernetes.io/name: "Metrics-server"
kubernetes.io/cluster-service: "true"
spec:
selector:
k8s-app: metrics-server
ports:
- port: 443
protocol: TCP
targetPort: 443
resource-reader.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/stats
- namespaces
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system