30min搭建kubernetes

devopsec 等级 366 1 0

master and node 操作如下

yum源修改

// 备份本地yum源
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo_bak 

// 获取阿里yum源配置
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
// 或者
curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo

// 更新catch
yum clean all # 清除系统所有的yum缓存 
yum makecache # 生成yum缓存

// 查看
yum -y update 

// 调整时区为上海
timedatectl set-timezone Asia/Shanghai

升级内核为 4.4.4 版本

获取源
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-4.el7.elrepo.noarch.rpm

安装,装完成后检查 /boot/grub2/grub.cfg中对应内核menuentry中是否包含 initrd16 配置,如果没有,再安装一次!
yum --enablerepo=elrepo-kernel install -y kernel-lt 

查看系统的全部内核
sudo awk -F\' '$1=="menuentry " {print i++ " : " $2}' /etc/grub2.cfg
0 : CentOS Linux (4.4.234-1.el7.elrepo.x86_64) 7 (Core)
1 : CentOS Linux (3.10.0-1127.19.1.el7.x86_64) 7 (Core)
2 : CentOS Linux (3.10.0-1127.el7.x86_64) 7 (Core)
3 : CentOS Linux (0-rescue-a3c527d56cc044c1887c29a15fe92891) 7 (Core)


设置开机从新内核启动
grub2-set-default 0
生成grub配置文件
grub2-mkconfig -o /boot/grub2/grub.cfg
重启使配置有效
reboot

查看正在使用的内核
uname -r

使用本地软件包管理软件安装 kubectl 二进制文件

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
开始安装 kubelet kubeadm kubectl
$ yum install -y kubelet kubeadm kubectl

设置开机启动
$ systemctl enable kubelet && systemctl start kubelet

设置路由策略
lsmod | grep br_netfilter
使桥接流量对iptables可见
cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system

关闭swap
sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
swapoff -a

验证是否生效,均返回 1 即正确
sysctl -n net.bridge.bridge-nf-call-iptables
sysctl -n net.bridge.bridge-nf-call-ip6tables

echo "1" >/proc/sys/net/ipv4/ip_forward

关闭防火墙

systemctl stop firewalld.service   #停止firewall
systemctl disable firewalld.service #禁止firewall开机启动
firewall-cmd --state             #查看防火墙状态

禁用SELINUX

setenforce 0
vi /etc/selinux/config
#SELINUX修改为disabled
SELINUX=disabled 

安装 docker-ce,国内阿里仓库安装

安装所需的软件包。yum-utils 提供了 yum-config-manager ,并且 device mapper 存储驱动程序需要 device-mapper-persistent-data 和 lvm2。

yum install -y yum-utils \
  device-mapper-persistent-data \
  lvm2

yum-config-manager \
    --add-repo \
    http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

安装最新版本的 Docker Engine-Community 和 containerd

yum install docker-ce docker-ce-cli containerd.io

将普通用户可以执行docker命令

创建docker 用户组
sudo groupadd docker

普通用户加入docker用户组
sudo usermod -aG docker ${USER}
启动docker
sudo systemctl restart docker

## Create /etc/docker
mkdir /etc/docker
# Set up the Docker daemon
cat > /etc/docker/daemon.json <<EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"
  ]
}
EOF


mkdir -p /etc/systemd/system/docker.service.d


# Restart Docker
systemctl daemon-reload
systemctl restart docker

# 开机启动docker
systemctl enable docker

查看kubeadm、kubectl、cubelet 版本命令

[root@master ~]# kubeadm version
kubeadm version: &version.Info{Major:"1", Minor:"20", GitVersion:"v1.20.0", GitCommit:"af46c47ce925f4c4ad5cc8d1fca46c7b77d13b38", GitTreeState:"clean", BuildDate:"2020-12-08T17:57:36Z", GoVersion:"go1.15.5", Compiler:"gc", Platform:"linux/amd64"}

[root@master ~]# kubectl version --client
Client Version: version.Info{Major:"1", Minor:"20", GitVersion:"v1.20.0", GitCommit:"af46c47ce925f4c4ad5cc8d1fca46c7b77d13b38", GitTreeState:"clean", BuildDate:"2020-12-08T17:59:43Z", GoVersion:"go1.15.5", Compiler:"gc", Platform:"linux/amd64"}

[root@master ~]# kubelet --version
Kubernetes v1.20.0

master 节点初始化集群

[root@master ~]# kubeadm init --kubernetes-version=1.18.2 \
--apiserver-advertise-address=192.168.1.12 \
--ignore-preflight-errors=all \
--image-repository registry.aliyuncs.com/google_containers \
--service-cidr=10.1.0.0/16 \
--pod-network-cidr=10.244.0.0/16

POD的网段为: 10.122.0.0/16, api server地址就是master本机IP。

这一步很关键,由于kubeadm 默认从官网k8s.grc.io下载所需镜像,国内无法访问,因此需要通过–image-repository指定阿里云镜像仓库地址。

参数解释:

–kubernetes-version: 用于指定k8s版本;
–apiserver-advertise-address:用于指定kube-apiserver监听的ip地址,就是 master本机IP地址。
–pod-network-cidr:用于指定Pod的网络范围; 10.244.0.0/16
–service-cidr:用于指定SVC的网络范围;
–image-repository: 指定阿里云镜像仓库地址 

执行以下命令

 [root@master ~]#  mkdir -p $HOME/.kube
[root@master ~]#  cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master ~]# chown $(id -u):$(id -g) $HOME/.kube/config

使kubectl自动补全

source <(kubectl completion bash)
运行kubectl tab时出现以下报错

[root@master bin]# kubectl c-bash: _get_comp_words_by_ref: command not found
loud-controller-man^C

解决方法:
1.安装bash-completion
[root@master bin]# yum install bash-completion -y

2.执行bash_completion
[root@master bin]# source /usr/share/bash-completion/bash_completion

3.重新加载kubectl completion
[root@master bin]# source <(kubectl completion bash)

4.用tab就OK了
[root@master bin]# kkubectl get pod --all-namespaces -o wide

查看节点

kubectl get pod --all-namespaces -o wide

添加网络

kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml

或者

kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

worker 节点添加到集群中

生成加入集群命令
kubeadm token create --print-join-command

kubeadm join 192.168.253.11:6443 --token zz2iu9.ta51l53ajgai8rhx --discovery-token-ca-cert-hash sha256:e49bc0b32bd1f8ebdd8420bf5f29c4d8ab8b0f4abc21d0e9612b57cb8b0c41a8 

部署 dashbord 界面

下载 dashbord.yaml 文件到本地,可以在github上查看想要的版本,例如 2.0.0 地址如下

部署
wget -O dashbord.yaml https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0/aio/deploy/recommended.yaml
kubectl create -f dashbord.yaml
kubectl proxy

可以通过以下方式查看界面

https://www.helloworld.net/redirect?target=https://github.com/kubernetes/dashboard/blob/master/docs/user/accessing-dashboard/README.md

如果是虚拟机做实验,可以把dashbord端口暴露给node,然后浏览器在访问,如下

设置可以在外部访问dashboard,修改 dashboard以 nodePort 访问,编辑配置文件

kubectl -n kubernetes-dashboard edit service kubernetes-dashboard
修改类型
type: ClusterIP 
改为
type: NodePort

查看暴露端口
[root@master ~]# kubectl -n kubernetes-dashboard get service kubernetes-dashboard
NAME                   TYPE       CLUSTER-IP     EXTERNAL-IP   PORT(S)         AGE
kubernetes-dashboard   NodePort   10.1.228.144   <none>        443:30657/TCP   51m

此时可以通过31481端口访问
https://192.168.1.12:30657/#/login

查看dashboard.yaml 文件找到 , 表示创建了 kubernetes-dashbord 账户

为该账户创建登录 token

创建简单用户
这里需要创建一个简单的管理用户,通过这个用户来获得到token,来访问dashboard页面。

文件 dashboard-adminuser.yaml

apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kubernetes-dashboard

运行如下命令:

kubectl apply -f dashboard-adminuser.yaml
serviceaccount/admin-user created

文件 dashboard-cluster-role-binding.yaml

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kubernetes-dashboard

运行如下命令:

kubectl apply -f dashboard-cluster-role-binding.yaml
clusterrolebinding.rbac.authorization.k8s.io/admin-user created

获得Token
kubectl -n kubernetes-dashboard describe secret $(kubectl -n kubernetes-dashboard get secret | grep admin-user | awk '{print $1}')

此时将 token 复制到 dashbord 登录页 token 输入框

卸载kubenetes集群

kubectl delete node --all
kubeadm reset -f
modprobe -r ipip
lsmod
rm -rf ~/.kube/
rm -rf /etc/kubernetes/
rm -rf /etc/systemd/system/kubelet.service.d
rm -rf /etc/systemd/system/kubelet.service
rm -rf /usr/bin/kube*
rm -rf /etc/cni
rm -rf /opt/cni
rm -rf /var/lib/etcd
rm -rf /var/etcd
yum clean all
yum remove kube*

常见问题:

加入集群时报错: /etc/kubernetes/kubelet.conf already exists

原因: 上次的配置文件没有清理干净,删除即可

rm -rf /etc/kubernetes/kubelet.conf /etc/kubernetes/pki/ca.crt

加入集群时报错: [ERROR Port-10250]: Port 10250 is in use

原因:上次加入没有成功就关闭。重置kubeadm

kubeadm reset

加入集群报错:/proc/sys/net/ipv4/ip_forward contents are not set to 1

echo "1" >/proc/sys/net/ipv4/ip_forward

K8s kubectl error:c-bash: _get_comp_words_by_ref: command not found

source <(kubectl completion bash)
运行kubectl tab时出现以下报错

[root@master bin]# kubectl c-bash: _get_comp_words_by_ref: command not found
loud-controller-man^C

解决方法:
1.安装bash-completion
[root@master bin]# yum install bash-completion -y

2.执行bash_completion
[root@master bin]# source /usr/share/bash-completion/bash_completion

3.重新加载kubectl completion
[root@master bin]# source <(kubectl completion bash)

4.用tab就OK了
[root@master bin]# kkubectl get pod --all-namespaces -o wide
收藏
评论区