建设银行审计招聘网站,网站欢迎页怎么做,手机 网站内 搜索,长沙 网页制作安装k8s
1. 准备机器
准备三台机器
192.168.136.104 master节点
192.168.136.105 worker节点
192.168.136.106 worker节点2. 安装前配置
1.基础环境
#########################################################################
#关闭防火墙#xff1a; 如果是云服务器 如果是云服务器需要设置安全组策略放行端口
# https://kubernetes.io/zh/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#check-required-ports
systemctl stop firewalld
systemctl disable firewalld# 修改 hostname
hostnamectl set-hostname k8s-01
# 查看修改结果
hostnamectl status
# 设置 hostname 解析
echo 127.0.0.1 $(hostname) /etc/hosts#关闭 selinux
sed -i s/enforcing/disabled/ /etc/selinux/config
setenforce 0#关闭 swap
swapoff -a
sed -ri s/.*swap.*/#/ /etc/fstab #允许 iptables 检查桥接流量
#https://kubernetes.io/zh/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#%E5%85%81%E8%AE%B8-iptables-%E6%A3%80%E6%9F%A5%E6%A1%A5%E6%8E%A5%E6%B5%81%E9%87%8F
## 开启br_netfilter
## sudo modprobe br_netfilter
## 确认下
## lsmod | grep br_netfilter## 修改配置#####这里用这个不要用课堂上的配置。。。。。。。。。
#将桥接的 IPv4 流量传递到 iptables 的链
# 修改 /etc/sysctl.conf
# 如果有配置则修改
sed -i s#^net.ipv4.ip_forward.*#net.ipv4.ip_forward1#g /etc/sysctl.conf
sed -i s#^net.bridge.bridge-nf-call-ip6tables.*#net.bridge.bridge-nf-call-ip6tables1#g /etc/sysctl.conf
sed -i s#^net.bridge.bridge-nf-call-iptables.*#net.bridge.bridge-nf-call-iptables1#g /etc/sysctl.conf
sed -i s#^net.ipv6.conf.all.disable_ipv6.*#net.ipv6.conf.all.disable_ipv61#g /etc/sysctl.conf
sed -i s#^net.ipv6.conf.default.disable_ipv6.*#net.ipv6.conf.default.disable_ipv61#g /etc/sysctl.conf
sed -i s#^net.ipv6.conf.lo.disable_ipv6.*#net.ipv6.conf.lo.disable_ipv61#g /etc/sysctl.conf
sed -i s#^net.ipv6.conf.all.forwarding.*#net.ipv6.conf.all.forwarding1#g /etc/sysctl.conf
# 可能没有追加
echo net.ipv4.ip_forward 1 /etc/sysctl.conf
echo net.bridge.bridge-nf-call-ip6tables 1 /etc/sysctl.conf
echo net.bridge.bridge-nf-call-iptables 1 /etc/sysctl.conf
echo net.ipv6.conf.all.disable_ipv6 1 /etc/sysctl.conf
echo net.ipv6.conf.default.disable_ipv6 1 /etc/sysctl.conf
echo net.ipv6.conf.lo.disable_ipv6 1 /etc/sysctl.conf
echo net.ipv6.conf.all.forwarding 1 /etc/sysctl.conf
# 执行命令以应用
sysctl -p#################################################################2. docker环境
sudo yum remove docker*
sudo yum install -y yum-utils
#配置docker yum 源
sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# 查找相应的docker版本
yum list docker-ce --showduplicates | sort -r#安装docker 3:26.1.4-1.el7
yum install -y docker-ce-3:26.1.4-1.el7.x86_64 docker-ce-cli-3:26.1.4-1.el7.x86_64 containerd.io#启动服务
systemctl start docker
systemctl enable docker#配置加速 自己申请配置
sudo mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json -EOF
{registry-mirrors : [https://#############.mirror.swr.myhuaweicloud.com,https://#############.mirror.aliyuncs.com]
}
EOF
sudo systemctl daemon-reload
sudo systemctl restart docker3. 安装k8s核心 都执行
# 配置K8S的yum源
cat EOF /etc/yum.repos.d/kubernetes.repo
[kubernetes]
nameKubernetes
baseurlhttp://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled1
gpgcheck0
repo_gpgcheck0
gpgkeyhttp://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpghttp://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF# 卸载旧版本
yum remove -y kubelet kubeadm kubectl# 查看可以安装的版本
yum list kubelet --showduplicates | sort -r# 安装kubelet、kubeadm、kubectl 指定版本
yum install -y kubelet-1.21.0 kubeadm-1.21.0 kubectl-1.21.0# 开机启动kubelet
systemctl enable kubelet systemctl start kubelet# 此时查看kubelet状态是fail是正常的因为其他的组件还没完全安装
[rootdocker104 ~]#
● kubelet.service - kubelet: The Kubernetes Node AgentLoaded: loaded (/usr/lib/systemd/system/kubelet.service; enabled; vendor preset: disabled)Drop-In: /usr/lib/systemd/system/kubelet.service.d└─10-kubeadm.confActive: activating (auto-restart) (Result: exit-code) since Tue 2024-06-25 20:40:33 PDT; 9s agoDocs: https://kubernetes.io/docs/Process: 24068 ExecStart/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS (codeexited, status1/FAILURE)Main PID: 24068 (codeexited, status1/FAILURE)Jun 25 20:40:33 docker104 systemd[1]: kubelet.service: main process exited, codeexited, status1/FAILURE
Jun 25 20:40:33 docker104 systemd[1]: Unit kubelet.service entered failed state.
Jun 25 20:40:33 docker104 systemd[1]: kubelet.service failed.
4. 初始化k8s master节点(master节点执行)
# 首先查看依赖的镜像
[rootdocker104 ~] kubeadm config images list
I0625 20:47:43.820940 24427 version.go:254] remote version is much newer: v1.30.2; falling back to: stable-1.21
k8s.gcr.io/kube-apiserver:v1.21.14
k8s.gcr.io/kube-controller-manager:v1.21.14
k8s.gcr.io/kube-scheduler:v1.21.14
k8s.gcr.io/kube-proxy:v1.21.14
k8s.gcr.io/pause:3.4.1
k8s.gcr.io/etcd:3.4.13-0
k8s.gcr.io/coredns/coredns:v1.8.0#封装成images.sh脚本文件
#!/bin/bash
images(kube-apiserver:v1.21.0kube-proxy:v1.21.0kube-controller-manager:v1.21.0kube-scheduler:v1.21.0coredns:v1.8.0etcd:3.4.13-0pause:3.4.1
)
for imageName in ${images[]} ; dodocker pull registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName
done
#脚本结束##注意1.21.0版本的k8s coredns镜像比较特殊结合阿里云需要特殊处理重新打标签
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.8.0 registry.cn-hangzhou.aliyuncs.com/google_containers/coredns/coredns:v1.8.0# 执行脚本
chmod x images.sh ./images.sh########kubeadm init 一个master########################
########kubeadm join 其他worker########################
kubeadm init \
--apiserver-advertise-address192.168.136.104 \
--image-repository registry.cn-hangzhou.aliyuncs.com/google_containers \
--kubernetes-version v1.21.0 \
--service-cidr10.96.0.0/16 \
--pod-network-cidr192.178.0.0/16
## 注意pod-cidr与service-cidr
# cidr 无类别域间路由Classless Inter-Domain Routing、CIDR
# 指定一个网络可达范围 pod的子网范围service负载均衡网络的子网范围本机ip的子网范围不能有重复域
# --apiserver-advertise-address apiserver广播地址为master机器的地址ip
# --image-repository 镜像仓库前缀
# --pod-network-cidr pod网络范围
# --service-cidr 负载均衡网络范围
# 两个网络范围和本地ip范围不能有重复域############初始化后有提示需要执行的语句##############
Your Kubernetes control-plane has initialized successfully!
# 复制相关文件夹
To start using your cluster, you need to run the following as a regular user:mkdir -p $HOME/.kubesudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/configsudo chown $(id -u):$(id -g) $HOME/.kube/configAlternatively, if you are the root user, you can run:# 声明环境变量export KUBECONFIG/etc/kubernetes/admin.conf# 部署一个pod网络安装指定的网络插件
You should now deploy a pod network to the cluster.
Run kubectl apply -f [podnetwork].yaml with one of the options listed at:https://kubernetes.io/docs/concepts/cluster-administration/addons/##############如下推荐安装calico#####################
kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml###可能会遇到calico镜像下载失败的情况直接去官网下载需要的版本号
wget https://docs.projectcalico.org/manifests/calico.yaml
# 查看版本号
[rootdocker104 k8s]# cat calico.yaml |grep image:image: docker.io/calico/cni:v3.25.0image: docker.io/calico/cni:v3.25.0image: docker.io/calico/node:v3.25.0image: docker.io/calico/node:v3.25.0image: docker.io/calico/kube-controllers:v3.25.0
# 替换版本号
[rootdocker104 k8s]# sed -i s#docker.io/##g calico.yaml
# 再次查看
[rootdocker104 k8s]# cat calico.yaml |grep image:image: calico/cni:v3.25.0image: calico/cni:v3.25.0image: calico/node:v3.25.0image: calico/node:v3.25.0image: calico/kube-controllers:v3.25.0# 官网地址下载并解压
https://github.com/projectcalico/calico/releases?page2
tar -vzxf release-v3.25.0.tgz
[rootdocker104 release-v3.25.0]# ls
bin images manifests# 进入到images目录将所需要的进行引入
docker load -i calico-kube-controllers.tar
docker load -i calico-cni.tar
docker load -i calico-node.tar # 重新执行命令
kubectl apply -f calico.yaml### 命令检查
kubectl get pod -A ##获取集群中所有部署好的应用Pod
[rootdocker104 images]# kubectl get pod -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-kube-controllers-77959b97b9-pgl2g 1/1 Running 0 25m
kube-system calico-node-h6bsk 1/1 Running 0 25m
kube-system coredns-57d4cbf879-s726m 1/1 Running 0 17h
kube-system coredns-57d4cbf879-trdbs 1/1 Running 0 17h
kube-system etcd-docker104 1/1 Running 0 17h
kube-system kube-apiserver-docker104 1/1 Running 0 17h
kube-system kube-controller-manager-docker104 1/1 Running 0 17h
kube-system kube-proxy-q9g8p 1/1 Running 0 17h
kube-system kube-scheduler-docker104 1/1 Running 0 17hkubectl get nodes ##查看集群所有机器的状态
5. 初始化woker节点
Then you can join any number of worker nodes by running the following on each as root:
# 其他节点执行以下命令加入k8s集群命令是在kubeadmin init以后给出的
kubeadm join 192.168.136.104:6443 --token tbjrly.cbmgi5g7nb366f1m \--discovery-token-ca-cert-hash sha256:a5f53bc7d06d595ae0e34fd92028e03bdbec8aa62b1041c9765f2739f59877fe #都执行完以后在k8s master节点执行
kubectl get nodes ##查看集群所有机器的状态
[rootdocker104 k8s]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
docker104 Ready control-plane,master 17h v1.21.0
docker105 NotReady none 92s v1.21.0
docker106 NotReady none 74s v1.21.0# worker节点是not ready状态可能是这个节点没装网络插件和master节点同样导入一下网络插件calico的镜像即可自动安装# 具体可以查看日志 tail -100f /var/log/messages
## 日志起始
Jun 26 18:21:08 docker105 kubelet: E0626 18:21:08.870465 91095 pod_workers.go:190] Error syncing pod, skipping errfailed to \StartContainer\ for \upgrade-ipam\ with ImagePullBackOff: \Back-off pulling image \\\calico/cni:v3.25.0\\\\ podkube-system/calico-node-2zq8d podUID78164c8e-8ba0-477e-90d1-76be4fd6965f
Jun 26 18:21:13 docker105 kubelet: I0626 18:21:13.264027 91095 cni.go:239] Unable to update cni config errno networks found in /etc/cni/net.d
Jun 26 18:21:13 docker105 kubelet: E0626 18:21:13.281034 91095 kubelet.go:2218] Container runtime network not ready networkReadyNetworkReadyfalse reason:NetworkPluginNotReady message:docker: network plugin is not ready: cni config uninitialized
## 日志结束
# 如上 cni config uninitialized就是未装网络插件的原因docker load -i calico-cni.tar
docker load -i calico-node.tar # 过会在master节点看到状态都是ready了
[rootdocker104 k8s]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
docker104 Ready control-plane,master 18h v1.21.0
docker105 Ready none 75m v1.21.0
docker106 Ready none 75m v1.21.0[rootdocker104 k8s]# kubectl get pod -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-kube-controllers-77959b97b9-pgl2g 1/1 Running 0 113m
kube-system calico-node-2zq8d 1/1 Running 0 80m
kube-system calico-node-b94mp 1/1 Running 0 80m
kube-system calico-node-h6bsk 1/1 Running 0 113m
kube-system coredns-57d4cbf879-s726m 1/1 Running 0 18h
kube-system coredns-57d4cbf879-trdbs 1/1 Running 0 18h
kube-system etcd-docker104 1/1 Running 0 18h
kube-system kube-apiserver-docker104 1/1 Running 0 18h
kube-system kube-controller-manager-docker104 1/1 Running 0 18h
kube-system kube-proxy-2djt9 1/1 Running 0 80m
kube-system kube-proxy-j5jkv 1/1 Running 0 80m
kube-system kube-proxy-q9g8p 1/1 Running 0 18h
kube-system kube-scheduler-docker104 1/1 Running 0 18h7、设置ipvs模式
k8s整个集群为了访问通默认是用iptables,性能下kube-proxy在集群之间同步iptables的内容
#1、查看默认kube-proxy 使用的模式
kubectl logs -n kube-system kube-proxy-28xv4
#2、需要修改 kube-proxy 的配置文件,修改mode 为ipvs。默认iptables但是集群大了以后就很慢
kubectl edit cm kube-proxy -n kube-system
修改如下ipvs:excludeCIDRs: nullminSyncPeriod: 0sscheduler: strictARP: falsesyncPeriod: 30skind: KubeProxyConfigurationmetricsBindAddress: 127.0.0.1:10249mode: ipvs###修改了kube-proxy的配置为了让重新生效需要杀掉以前的Kube-proxykubectl get pod -A|grep kube-proxykubectl delete pod kube-proxy-pqgnt -n kube-system
### 修改完成后可以重启kube-proxy以生效8、部署应用
# 测试部署一个Nginx应用
[rootdocker104 k8s]# kubectl create deploy my-nginx --imagenginx
deployment.apps/my-nginx created# 查看部署应用的ip
[rootdocker104 k8s]# kubectl get pod -A -o wide
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
default my-nginx-6b74b79f57-mg76v 1/1 Running 0 66s 192.178.70.193 docker105 none none# 访问测试nginx
[rootdocker104 k8s]# curl 192.178.70.193
!DOCTYPE html
html
head
titleWelcome to nginx!/title
style
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
/style
/head
body
h1Welcome to nginx!/h1
pIf you see this page, the nginx web server is successfully installed and
working. Further configuration is required./ppFor online documentation and support please refer to
a hrefhttp://nginx.org/nginx.org/a.br/
Commercial support is available at
a hrefhttp://nginx.com/nginx.com/a./ppemThank you for using nginx./em/p
/body
/html