Kubernetes安装
2022-02-14 约 2325 字
预计阅读 5 分钟
minikube 创建集群
安装kubelet
添加rpm源
1
2
3
4
5
6
7
8
9
cat << EOF |tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
安装kubectl
1
yum install -y kubectl --nogpgcheck
添加自动补全
1
2
3
yum install -y bash-completion
echo 'source <(kubectl completion bash)' >>~/.bashrc
kubectl completion bash >/etc/bash_completion.d/kubectl
安装minikube
1
curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 && install minikube-linux-amd64 /usr/local/bin/minikube
1
2
3
4
yum install -y conntrack socat kubernetes-cni
echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables
minikube start --driver= none --network-plugin= cni --extra-config= kubeadm.ignore-preflight-errors= NumCPU --force --cpus 1
minikube start --driver= none --network-plugin= cni --image-mirror-country= cn --registry-mirror= https://f1z25q5p.mirror.aliyuncs.com
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
mv /root/.kube /root/.minikube $HOME
chown -R $USER $HOME /.kube $HOME /.minikube
cat >> ~/.bashrc <<- 'EOF'
alias kcp='kubectl get po -o wide -n kube-system'
alias kcdp='kubectl delete po -n kube-system'
alias kcl='kubectl logs -f -n kube-system'
alias kcs='kubectl get svc -n kube-system'
alias kcn='kubectl get nodes -o wide -n kube-system'
alias kce='kuebctl get endpoints -n kube-system'
alias kci='kuebctl get ing -n kube-system'
alias kcir='kubectl get ingressroute -n kube-system'
alias kca='kubectl apply -n kube-system'
alias kct='kubectl create -n kube-system'
alias kcd='kubectl describe po -n kube-system'
alias kexec='kubectl exec -ti -n kube-system'
alias kall='kubectl get svc,pods,nodes --all-namespaces -o wide -n kube-system'
alias kdel='kubectl delete -n kube-system'
EOF
source ~/.bashrc
1
2
minikube start --network-plugin= cni
kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml
kubeadm 创建集群
服务器初始化
创建密码
1
cat /dev/urandom | tr -dc 'a-zA-Z0-9' | head -c 24 | tee ~/.init/sshkey
初始化系统配置
1
2
3
setenforce 0 \
&& sed -i 's/^SELINUX=.*$/SELINUX=disabled/' /etc/selinux/config \
&& getenforce
1
2
3
4
5
systemctl stop firewalld \
&& systemctl daemon-reload \
&& systemctl disable firewalld \
&& systemctl daemon-reload \
&& systemctl status firewalld
1
2
3
4
yum install -y iptables-services \
&& systemctl stop iptables \
&& systemctl disable iptables \
&& systemctl status iptables
1
2
3
4
5
6
yum install wget -y
cp -r /etc/yum.repos.d /etc/yum.repos.d.bak
rm -f /etc/yum.repos.d/*.repo
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
yum clean all && yum makecache
1
2
3
4
5
6
7
cat >> /etc/security/limits.conf <<EOF
# End of file
* soft nproc 10240000
* hard nproc 10240000
* soft nofile 10240000
* hard nofile 10240000
EOF
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
[ ! -e "/etc/sysctl.conf_bk" ] && /bin/mv /etc/sysctl.conf{ ,_bk} \
&& cat > /etc/sysctl.conf << EOF
# fs.file-max=1000000
# fs.nr_open=20480000
# net.ipv4.tcp_max_tw_buckets = 180000
# net.ipv4.tcp_sack = 1
# net.ipv4.tcp_window_scaling = 1
# net.ipv4.tcp_rmem = 4096 87380 4194304
# net.ipv4.tcp_wmem = 4096 16384 4194304
# net.ipv4.tcp_max_syn_backlog = 16384
# net.core.netdev_max_backlog = 32768
# net.core.somaxconn = 32768
# net.core.wmem_default = 8388608
# net.core.rmem_default = 8388608
# net.core.rmem_max = 16777216
# net.core.wmem_max = 16777216
# net.ipv4.tcp_timestamps = 0
# net.ipv4.tcp_fin_timeout = 20
# net.ipv4.tcp_synack_retries = 2
# net.ipv4.tcp_syn_retries = 2
# net.ipv4.tcp_syncookies = 1
# #net.ipv4.tcp_tw_len = 1
# net.ipv4.tcp_tw_reuse = 1
# net.ipv4.tcp_mem = 94500000 915000000 927000000
# net.ipv4.tcp_max_orphans = 3276800
# net.ipv4.ip_local_port_range = 1024 65000
# #net.nf_conntrack_max = 6553500
# #net.netfilter.nf_conntrack_max = 6553500
# #net.netfilter.nf_conntrack_tcp_timeout_close_wait = 60
# #net.netfilter.nf_conntrack_tcp_timeout_fin_wait = 120
# #net.netfilter.nf_conntrack_tcp_timeout_time_wait = 120
# #net.netfilter.nf_conntrack_tcp_timeout_established = 3600
# EOF
1
2
swapoff -a| cp /etc/fstab /etc/fstab_bak
cat /etc/fstab_bak | grep -v swap > /etc/fstab
1
2
3
4
5
6
7
8
9
10
11
12
yum install -y chrony
cp -rf /etc/chrony.conf{ ,.bak}
sed -i 's/^server/#&/' /etc/chrony.conf
cat >> /etc/chrony.conf << EOF
server 0.asia.pool.ntp.org iburst
server 1.asia.pool.ntp.org iburst
server 2.asia.pool.ntp.org iburst
server 3.asia.pool.ntp.org iburst
EOF
timedatectl set-timezone Asia/Shanghai
systemctl enable chronyd && systemctl restart chronyd
timedatectl && chronyc sources
1
2
3
4
5
6
7
8
9
10
11
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
yum install ipset ipvsadm -y
sysctl --system
}
安装docker
1
curl -L https://gitee.com/YunFeiGuoJi/Cnblog/raw/master/shell/Scripts/docker_install.sh | sh -
配置环境
配置ipv4转发
1
2
3
4
5
6
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
sudo sysctl --system
添加kubernetes yum源
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
# cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
# [kubernetes]
# name=Kubernetes
# baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-\$basearch
# enabled=1
# gpgcheck=1
# repo_gpgcheck=1
# gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
# exclude=kubelet kubeadm kubectl
# EOF
cat << EOF |tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
安装kubernetes
1
2
3
4
sudo yum install -y kubelet-1.19.8 kubeadm-1.19.8 kubectl-1.19.8 --disableexcludes= kubernetes --nogpgcheck
sudo systemctl enable --now kubelet
sudo systemctl start kubelet
集群初始化
1
2
3
4
5
6
7
8
9
kubeadm init \
--apiserver-advertise-address 172.28.81.11 \
--image-repository registry.aliyuncs.com/google_containers \
--pod-network-cidr 10.244.0.0/16 \
--node-name k8s01
mkdir -p $HOME /.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME /.kube/config
sudo chown $( id -u) :$( id -g) $HOME /.kube/config
初始化node结点
1
2
kubeadm join 172.28.81.7:6443 --token 86n32f.kzmt9o2yxwehturv \
--discovery-token-ca-cert-hash sha256:92f3be0c4daf60820d96855ff9787bdb0ed9cb5cbb7bd012d1ad123e6a2c4ecf
安装网络插件
配置.bashrc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
# 配置.bashrc
cp /etc/kubernetes/admin.conf /root/.kube/admin.conf
yum install bash-completion -y
cat >> ~/.bashrc <<EOF
source <(kubectl completion bash)
source /usr/share/bash-completion/bash_completion
source <(helm completion bash)
#export KUBECONFIG=/etc/kubernetes/admin.conf
# 配置别名
alias kocp='kubectl get po -o wide --kubeconfig=/root/.kube/admin.conf -n kube-system'
alias kocdp='kubectl delete po --kubeconfig=/root/.kube/admin.conf -n kube-system'
alias kocl='kubectl logs -f --tail 200 --kubeconfig=/root/.kube/admin.conf -n kube-system'
alias kocs='kubectl get svc --kubeconfig=/root/.kube/admin.conf kube-system'
alias kocn='kubectl get nodes -o wide --kubeconfig=/root/.kube/admin.conf -n kube-system'
alias koce='kuebctl get endpoints --kubeconfig=/root/.kube/admin.conf -n kube-system'
alias koci='kuebctl get ing --kubeconfig=/root/.kube/admin.conf -n kube-system'
alias kocir='kubectl get ingressroute --kubeconfig=/root/.kube/admin.conf -n kube-system'
alias koca='kubectl apply --kubeconfig=/root/.kube/admin.conf -n kube-system'
alias koct='kubectl create --kubeconfig=/root/.kube/admin.conf -n kube-system'
alias kocd='kubectl describe po --kubeconfig=/root/.kube/admin.conf -n kube-system'
alias koexec='kubectl exec -ti --kubeconfig=/root/.kube/admin.conf -n kube-system'
alias koall='kubectl get svc,pods,nodes --all-namespaces -o wide -n kube-system'
alias kodel='kubectl delete --kubeconfig=/root/.kube/admin.conf -n kube-system'
EOF
source ~/.bashrc
安装flanel
1
koct -f https://gitee.com/YunFeiGuoJi/Cnblog/raw/master/kubernetes/yml/kube-flannel.yml --kubeconfig= /root/.kube/admin.conf
安装calicos
1
curl https://docs.projectcalico.org/manifests/calico-etcd.yaml -O
1
2
3
etcd_ca: "/calico-secrets/etcd-ca"
etcd_cert: "/calico-secrets/etcd-cert"
etcd_key: "/calico-secrets/etcd-key"
1
2
3
cat /etc/kubernetes/pki/etcd/ca.crt | base64 -w 0 > etcd_ca
cat /etc/kubernetes/pki/etcd/service.crt | base64 -w 0 > etcd_cert
cat /etc/kubernetes/pki/etcd/service.key | base64 -w 0 > etcd_key
1
2
3
4
cat > /etc/NetworkManager/conf.d/calico.conf <<- 'EOF'
[keyfile]
unmanaged-devices=interface-name:cali*;interface-name:tunl*
EOF
配置污点
Master允许调度
1
kubectl taint node k8s01 node-role.kubernetes.io/master-
Master 禁止调度
1
kubectl taint node localhost.localdomain node-role.kubernetes.io/master= "" :NoSchedule
kubectl 安装使用
1
2
3
4
curl -LO "https://storage.googleapis.com/kubernetes-release/release/ $( curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt) /bin/linux/amd64/kubectl"
# 安装指定版本
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.6/bin/linux/amd64/kubectl
curl -LO https://dl.k8s.io/release/v1.18.6/bin/linux/amd64/kubectl
Helm 安装使用
安装
1
wget https://get.helm.sh/helm-v3.3.3-linux-amd64.tar.gz && tar zxvf helm-v3.3.3-linux-amd64.tar.gz && mv linux-amd64/helm /usr/bin
使用
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# 添加repo
helm repo add elastic https://helm.elastic.co
helm repo add gitlab https://charts.gitlab.io
helm repo add harbor https://helm.goharbor.io
helm repo add bitnami https://charts.bitnami.com/bitnami
helm repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com
helm repo add stable https://charts.helm.sh/stable
helm repo add aliyuncs https://apphub.aliyuncs.com
helm repo add traefik https://containous.github.io/traefik-helm-chart
helm repo add loki https://grafana.github.io/loki/charts
helm repo add stakater https://stakater.github.io/stakater-charts
helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/
helm repo add jaegertractracing https://jaegertracing.github.io/helm-charts
helm repo update
kubernetes 维护管理
kubernetes 集群访问
通过kubectl config访问
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
cat > $HOME /.kube/config <<- 'EOF'
apiVersion: v1
clusters:
- cluster:
#certificate-authority: /Users/admin/.minikube/ca.crt
insecure-skip-tls-verify: true
server: https://47.243.34.122:8443
name: minikube
contexts:
- context:
cluster: minikube
namespace: default
user: minikube
name: minikube
current-context: minikube
kind: Config
preferences: {}
users:
- name: minikube
user:
client-certificate: /Users/admin/.minikube/client.crt
client-key: /Users/admin/.minikube/client.key
EOF
通过token访问
查看所有的集群,因为你的 .kubeconfig 文件中可能包含多个上下文
1
kubectl config view -o jsonpath = '{"Cluster name\tServer\n"}{range .clusters[*]}{.name}{"\t"}{.cluster.server}{"\n"}{end}'
1
export CLUSTER_NAME = "kubernetes"
1
APISERVER = $( kubectl config view -o jsonpath = "{.clusters[?(@.name==\" $CLUSTER_NAME \")].cluster.server}" )
1
TOKEN = $( kubectl get secrets -o jsonpath = "{.items[?(@.metadata.annotations['kubernetes\.io/service-account\.name']=='default')].data.token}" | base64 -d)
1
curl -X GET $APISERVER /api --header "Authorization: Bearer $TOKEN " --insecure
1
2
3
APISERVER = $( kubectl config view --minify -o jsonpath = '{.clusters[0].cluster.server}' )
TOKEN = $( kubectl get secret $( kubectl get serviceaccount default -o jsonpath = '{.secrets[0].name}' ) -o jsonpath = '{.data.token}' | base64 --decode )
curl $APISERVER /api --header "Authorization: Bearer $TOKEN " --insecure
通过serviceaccount来访问
1
kubectl create serviceaccount kubernetes-devops
创建ClusterRole、RoleBinding
1
2
3
4
5
6
7
8
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: kubernetes-devops
rules:
- apiGroups: [ "" ] # "" indicates the core API group
resources: [ "pods" , "services" , "pods/log" ]
verbs: [ "get" , "watch" , "list" ]
1
kubectl create rolebinding kubernetes-devops-read --clusterrole kubernetes-devops --serviceaccount kubernetes-devops -n default
1
2
SECRET = $( kubectl get serviceaccount ${ SERVICE_ACCOUNT } -o json \
| jq -Mr '.secrets[].name | select(contains("token"))' )
1
TOKEN = $( kubectl get secret ${ SECRET } -o json | jq -Mr '.data.token' | base64 -d)
1
2
kubectl get secret ${ SECRET } -o json | jq -Mr '.data["ca.crt"]' \
| base64 -d > /tmp/ca.crt
获取API Server URL,如果API Server部署在多台Master上,只需访问其中一台即可。
1
2
APISERVER = https://$( kubectl -n default get endpoints kubernetes --no-headers \
| awk '{ print $2 }' | cut -d "," -f 1)
1
2
curl -s $APISERVER /api/v1/namespaces/{ namespace} /pods/ \
--header "Authorization: Bearer $TOKEN " --cacert /tmp/ca.crt
通过useraccount 访问api server
托管版本
使用云厂商创建子账号,赋予rbac权限
自建版本
1
openssl genrsa -out devops.key 2048
1
openssl req -new -key devops.key -out devops-csr.pem -subj "/CN=devops/O=dev/O=test" # CN 用户名,O 用户组
1
openssl x509 -req -in wolken.csr -CA /etc/kubernetes/pki/ca.crt -CAkey /etc/kubernetes/pki/ca.key -CAcreateserial -out devops.crt -days 3650
1
kubectl config set-credentials wolken --client-certificate-data= ` cat devops.crt | base64 --wrap= 0` --client-key-data= ` cat devops.key | base64 --wrap= 0`
1
kubectl config set-context devops-context --cluster= kubernetes --namespace= test --user= devops
1
kubectl --context= devops-context get po
通过pod内部访问
1
APISERVER = https://kubernetes.default.svc
1
SERVICEACCOUNT = /var/run/secrets/kubernetes.io/serviceaccount
1
NAMESPACE = $( cat ${ SERVICEACCOUNT } /namespace)
1
TOKEN = $( cat ${ SERVICEACCOUNT } /token)
1
CACERT = ${ SERVICEACCOUNT } /ca.crt
1
curl --cacert ${ CACERT } --header "Authorization: Bearer ${ TOKEN } " -X GET ${ APISERVER } /api