目录

kubernetes安装(dockershim运行时)

系统初始化

环境

主机名 ip
kube-master 192.168.253.10
kube-node01 192.168.253.20
kube-node02 192.168.253.21

设置系统主机名以及hosts文件的相互解析

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
kube-master机器的操作
hostnamectl set-hostname kube-master
cat >> /etc/hosts << EOF
192.168.253.10 kube-master
192.168.253.20 kube-node01
192.168.253.21 kube-node02
EOF
scp /etc/hosts kube-node01:/etc/
scp /etc/hosts kube-node02:/etc/

kube-node01机器的操作
hostnamectl set-hostname kube-node01

kube-node02机器的操作
hostnamectl set-hostname kube-node02

安装相关依赖包

1
2
kube-master kube-node01 kube-node02都需要操作
yum -y install conntrack ntpdate ntp ipvsadm ipset jq iptables curl sysstat libseccomp wget vim net-tools git

设置防火墙为iptables并设置空规则

1
2
3
kube-master kube-node01 kube-node02都需要操作
systemctl stop   firewalld && systemctl disable firewalld
yum -y install iptables-services && systemctl start iptables && systemctl enable iptables && iptables -F && service iptables save

关闭SELINUX以及虚拟内存

1
2
3
kube-master kube-node01 kube-node02都需要操作
swapoff -a &&  sed -i '/swap/ d ' /etc/fstab
setenforce 0 && sed -ri 's/^(SELINUX=).*/\1disabled/' /etc/selinux/config

调整内核参数,对于K8S

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
kube-master kube-node01 kube-node02都需要操作
cat > kubernetes.conf <<EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0 #禁止使用swap空间,只有当系统OOM时才允许使用它
vm.overcommit_memory=1 #不检查物理内存是否够用
vm.panic_on_oom=0 #开启OOM
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF

cp kubernetes.conf /etc/sysctl.d/kubernetes.conf
sysctl -p /etc/sysctl.d/kubernetes.conf
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-ip6tables: No such file or directory
出现这个先暂时不用理会,需要升级内核

调整系统时区

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
设置系统时区为中国/上海
timedatectl  set-timezone Asia/Shanghai
ntp时间同步
ntpdate ntp.api.bz
将时间同步写入计划任务,每5分钟同步一次
[root@con-mas ~]# crontab -l
* * * * * /usr/sbin/ntpdate ntp.api.bz

将当前utc时间写入硬件时钟
timedatectl set-local-rtc 0

重启依赖于系统时间的服务
systemctl restart rsyslog
systemctl restart crond

关闭系统不需要的服务

1
systemctl stop postfix && systemctl disable postfix

设置rsyslogd和systemd journald

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
mkdir /var/log/journal #持久化保存日志的目录
mkdir /etc/systemd/journald.conf.d
cat >  /etc/systemd/journald.conf.d/99-prophet.conf << EOF
[Journal]
#持久化保存到磁盘
Storage=persistent

#压缩历史日志
Compress=yes
SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000

#最大占用空间10G
SystemMaxUse=10G

#单日志文件最大 200M
SystemMaxFileSize=200M

#日志保存时间2周
MaxRetentionSec=2week

#不将日志转发到syslog
ForwardToSyslog=no
EOF

systemctl restart systemd-journald

升级系统内核

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
Centos7.x系统自带的3.10内核存在一些bug,导致运行的Docker/kubernetes不稳定
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm

添加repository 后, 列出可以使用的kernel包版本
yum --disablerepo="*" --enablerepo="elrepo-kernel" list available

内核版本介绍:
lt:longterm的缩写:长期维护版;
ml:mainline的缩写:最新稳定版;

#安装完成后检查/boot/grub2/grub.cfg中对应内核menuentry中是否包含initrd16配置,
如果没有,再安装一次!
yum --enablerepo=elrepo-kernel install -y kernel-lt


#设置开机从新内核启动
grub2-set-default "'`egrep 'CentOS Linux (,*)' /boot/grub2/grub.cfg | sort -k 4 -rn | head -1 | cut -d \' -f 2  `'" && reboot

kubernetes部署安装

kube-proxy开启ipvs的前置条件

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
modprobe br_netfilter

cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod +x /etc/sysconfig/modules/ipvs.modules  && bash /etc/sysconfig/modules/ipvs.modules && lsmod && grep -e ip_vs -e nf_conntrack_ipv4

安装docker软件

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
yum -y install yum-utils device-mapper-persistent-data lvm2

yum-config-manager \
--add-repo \
http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

yum update -y && yum install -y docker-ce
#创建 /etc/docker/目录
mkdir /etc/docker
#配置daemon文件
cat > /etc/docker/daemon.json <<EOF
{
    "registry-mirrors": ["https://uyah70su.mirror.aliyuncs.com"],
    "exec-opts": ["native.cgroupdriver=systemd"],
    "log-driver": "json-file",
    "log-opts": {
        "max-size": "100m"
    },
     "insecure-registries": ["https://har.gtmsh.com"]
}
EOF

mkdir -p /etc/systemd/system/docker.service.d
systemctl daemon-reload && systemctl restart docker && systemctl enable docker

#如果需要更改docker的Root Dir目录,可以做这几步操作
systemctl stop docker
[root@kube-masterthr k8s_image]# mkdir /data/tools/docker -p
[root@kube-masterone ~]# mv /var/lib/docker /data/tools/docker
[root@kube-mastertwo data]# ln -s /data/tools/docker/ /var/lib/docker 
systemctl start docker

kubeadm 配置(所有节点)

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg 
https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

yum -y install kubectl-1.15.1
yum -y install  kubelet-1.15.1 
yum -y install kubeadm-1.15.1
systemctl enable kubelet

初始化主节点

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
mkdir -p /var/kubernetes
mv * /var/kubernetes/
cd /var/kubernetes/
mkdir config
cd !$

kubeadm config print init-defaults > kubeadm-config.yaml
vi  kubeadm-config.yaml 

apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint: 
  advertiseAddress: 192.168.253.10 #需要修改
  bindPort: 6443

nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: kube-master
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
#controlPlaneEndpoint: "192.168.253.10:6443" #集群部署,注意如果是做高可用集群,这里的ip不能是集群内的地址,必须要跟集群同网段内的一个ip地址,否则会初始化的时候会报错
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd

imageRepository: registry.aliyuncs.com/google_containers #可以更改镜像仓库地址为阿里云registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.15.1 #需要修改
networking:
  dnsDomain: cluster.local
  podSubnet: 10.244.0.0/16 #需要添加
  serviceSubnet: 10.96.0.0/12
scheduler: {}

--- #下面都需要添加
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
  SupportIPVSProxyMode: true
mode: ipvs

如果是后期更改ipvs可以通过如下方式
 kubectl edit  configmap -n kube-system  kube-proxy
 mode: 'ipvs'
 然后重启pod
 kubectl delete pod -n kube-system kube-proxy-{4zxk6,7slxt,k8n5v,x9kxj}
 
#注意15版本以上没有–experimental-upload-certs 需要替换为 --upload-certs
kubeadm init --config=kubeadm-config.yaml  --experimental-upload-certs | tee kubeadm-init.log
#如果初始化失败,可以通过以下查看日志
journalctl -u kubelet

将其余工作节点加入集群

1
2
3
node01 node02
kubeadm join 192.168.253.10:6443 --token abcdef.0123456789abcdef \
>     --discovery-token-ca-cert-hash sha256:69fc060e3f9dd4a9badc74e7d67fba1ff7fc70ee84e51206d1100996fd129a93 

部署网络

1
2
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
[root@kube-master config]# kubectl apply -f kube-flannel.yml

部署后注意事项

k8s组件的默认时间为UTC时间,所以用cronjob的时候会晚8个小时,这里可以再部署完成之后提前修改配置文件将组件时区修改为Shanghai时区。

修改 etcd组件的yaml

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
[root@kube-masterone manifests]# cat etcd.yaml 
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    component: etcd
    tier: control-plane
  name: etcd
  namespace: kube-system
spec:
  containers:
  - command:
    - etcd
    - --advertise-client-urls=https://10.10.60.10:2379
    - --cert-file=/etc/kubernetes/pki/etcd/server.crt
    - --client-cert-auth=true
    - --data-dir=/var/lib/etcd
    - --initial-advertise-peer-urls=https://10.10.60.10:2380
    - --initial-cluster=kube-masterone=https://10.10.60.10:2380
    #- --initial-cluster-state=new
    #- --force-new-cluster
    - --key-file=/etc/kubernetes/pki/etcd/server.key
    - --listen-client-urls=https://127.0.0.1:2379,https://10.10.60.10:2379
    - --listen-peer-urls=https://10.10.60.10:2380
    - --name=kube-masterone
    - --peer-cert-file=/etc/kubernetes/pki/etcd/peer.crt
    - --peer-client-cert-auth=true
    - --peer-key-file=/etc/kubernetes/pki/etcd/peer.key
    - --peer-trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt
    - --snapshot-count=10000
    - --trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt
    image: k8s.gcr.io/etcd:3.3.10
    imagePullPolicy: IfNotPresent
    livenessProbe:
      exec:
        command:
        - /bin/sh
        - -ec
        - ETCDCTL_API=3 etcdctl --endpoints=https://[127.0.0.1]:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt
          --cert=/etc/kubernetes/pki/etcd/healthcheck-client.crt --key=/etc/kubernetes/pki/etcd/healthcheck-client.key
          get foo
      failureThreshold: 8
      initialDelaySeconds: 15
      timeoutSeconds: 15
    name: etcd
    resources: {}
    volumeMounts:
    - mountPath: /var/lib/etcd
      name: etcd-data
    - mountPath: /etc/kubernetes/pki/etcd
      name: etcd-certs
    - name: host-time    #这一段是需要添加的
      mountPath: /etc/localtime
      readOnly: true
  hostNetwork: true
  priorityClassName: system-cluster-critical
  volumes:
  - hostPath:
      path: /etc/kubernetes/pki/etcd
      type: DirectoryOrCreate
    name: etcd-certs
  - hostPath:
      path: /var/lib/etcd
      type: DirectoryOrCreate
    name: etcd-data
  - hostPath:     #这一段是需要添加的
      path: /etc/localtime
    name: host-time
status: {}

修改apiserver组件的yaml

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
[root@kube-masterone manifests]# cat kube-apiserver.yaml 
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    component: kube-apiserver
    tier: control-plane
  name: kube-apiserver
  namespace: kube-system
spec:
  containers:
  - command:
    - kube-apiserver
    - --advertise-address=10.10.60.10
    - --allow-privileged=true
    - --authorization-mode=Node,RBAC
    - --client-ca-file=/etc/kubernetes/pki/ca.crt
    - --enable-admission-plugins=NodeRestriction
    - --enable-bootstrap-token-auth=true
    - --etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt
    - --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt
    - --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key
    - --etcd-servers=https://127.0.0.1:2379
    - --insecure-port=0
    - --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt
    - --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key
    - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
    - --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt
    - --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key
    - --requestheader-allowed-names=front-proxy-client
    - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
    - --requestheader-extra-headers-prefix=X-Remote-Extra-
    - --requestheader-group-headers=X-Remote-Group
    - --requestheader-username-headers=X-Remote-User
    - --secure-port=6443
    - --service-account-key-file=/etc/kubernetes/pki/sa.pub
    - --service-cluster-ip-range=10.96.0.0/12
    - --tls-cert-file=/etc/kubernetes/pki/apiserver.crt
    - --tls-private-key-file=/etc/kubernetes/pki/apiserver.key
    image: k8s.gcr.io/kube-apiserver:v1.15.1
    imagePullPolicy: IfNotPresent
    livenessProbe:
      failureThreshold: 8
      httpGet:
        host: 10.10.60.10
        path: /healthz
        port: 6443
        scheme: HTTPS
      initialDelaySeconds: 15
      timeoutSeconds: 15
    name: kube-apiserver
    resources:
      requests:
        cpu: 250m
    volumeMounts:
    - mountPath: /etc/ssl/certs
      name: ca-certs
      readOnly: true
    - mountPath: /etc/pki
      name: etc-pki
      readOnly: true
    - mountPath: /etc/kubernetes/pki
      name: k8s-certs
      readOnly: true
    - mountPath: /etc/localtime #这一段是需要添加的
      name: host-time
      readOnly: true
  hostNetwork: true
  priorityClassName: system-cluster-critical
  volumes:
  - hostPath:
      path: /etc/ssl/certs
      type: DirectoryOrCreate
    name: ca-certs
  - hostPath:
      path: /etc/pki
      type: DirectoryOrCreate
    name: etc-pki
  - hostPath:
      path: /etc/kubernetes/pki
      type: DirectoryOrCreate
    name: k8s-certs
  - name: host-time #这一段是需要添加的
    hostPath:
      path: /etc/localtime
status: {}

修改controller组件的yaml

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
[root@kube-masterone manifests]# cat kube-controller-manager.yaml
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    component: kube-controller-manager
    tier: control-plane
  name: kube-controller-manager
  namespace: kube-system
spec:
  containers:
  - command:
    - kube-controller-manager
    - --allocate-node-cidrs=true
    - --authentication-kubeconfig=/etc/kubernetes/controller-manager.conf
    - --authorization-kubeconfig=/etc/kubernetes/controller-manager.conf
    - --bind-address=127.0.0.1
    - --client-ca-file=/etc/kubernetes/pki/ca.crt
    - --cluster-cidr=10.244.0.0/16
    - --cluster-signing-cert-file=/etc/kubernetes/pki/ca.crt
    - --cluster-signing-key-file=/etc/kubernetes/pki/ca.key
    - --controllers=*,bootstrapsigner,tokencleaner
    - --kubeconfig=/etc/kubernetes/controller-manager.conf
    - --leader-elect=true
    - --node-cidr-mask-size=24
    - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
    - --root-ca-file=/etc/kubernetes/pki/ca.crt
    - --service-account-private-key-file=/etc/kubernetes/pki/sa.key
    - --use-service-account-credentials=true
    image: k8s.gcr.io/kube-controller-manager:v1.15.1
    imagePullPolicy: IfNotPresent
    livenessProbe:
      failureThreshold: 8
      httpGet:
        host: 127.0.0.1
        path: /healthz
        port: 10252
        scheme: HTTP
      initialDelaySeconds: 15
      timeoutSeconds: 15
    name: kube-controller-manager
    resources:
      requests:
        cpu: 200m
    volumeMounts:
    - mountPath: /etc/ssl/certs
      name: ca-certs
      readOnly: true
    - mountPath: /etc/pki
      name: etc-pki
      readOnly: true
    - mountPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
      name: flexvolume-dir
    - mountPath: /etc/kubernetes/pki
      name: k8s-certs
      readOnly: true
    - mountPath: /etc/kubernetes/controller-manager.conf
      name: kubeconfig
      readOnly: true
    - mountPath: /etc/localtime #这一段是需要添加的
      name: host-time
      readOnly: true
  hostNetwork: true
  priorityClassName: system-cluster-critical
  volumes:
  - hostPath:
      path: /etc/ssl/certs
      type: DirectoryOrCreate
    name: ca-certs
  - hostPath:
      path: /etc/pki
      type: DirectoryOrCreate
    name: etc-pki
  - hostPath:
      path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
      type: DirectoryOrCreate
    name: flexvolume-dir
  - hostPath:
      path: /etc/kubernetes/pki
      type: DirectoryOrCreate
    name: k8s-certs
  - hostPath: 
      path: /etc/kubernetes/controller-manager.conf
      type: FileOrCreate
    name: kubeconfig
  - hostPath: #这一段是需要添加的
      path: /etc/localtime
    name: host-time
status: {}

修改scheduler组件的yaml

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
[root@kube-masterone manifests]# cat kube-scheduler.yaml 
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    component: kube-scheduler
    tier: control-plane
  name: kube-scheduler
  namespace: kube-system
spec:
  #volumes:
  #  - name: config
  #     hostPath:
  #         path: /etc/localtime
  containers:
  #  volumeMounts:
  #  -  name: config
  #     mountPath: /etc/localtime
  #     readOnly: true
  - command:
    - kube-scheduler
    - --bind-address=127.0.0.1
    - --kubeconfig=/etc/kubernetes/scheduler.conf
    - --leader-elect=true
    image: k8s.gcr.io/kube-scheduler:v1.15.1
    imagePullPolicy: IfNotPresent
    livenessProbe:
      failureThreshold: 8
      httpGet:
        host: 127.0.0.1
        path: /healthz
        port: 10251
        scheme: HTTP
      initialDelaySeconds: 15
      timeoutSeconds: 15
    name: kube-scheduler
    resources:
      requests:
        cpu: 100m
    volumeMounts:
    - mountPath: /etc/kubernetes/scheduler.conf
      name: kubeconfig
      readOnly: true
    - mountPath: /etc/localtime  #这一段是需要添加的
      name: host-time
      readOnly: true
  hostNetwork: true
  priorityClassName: system-cluster-critical
  volumes:
  - hostPath:
      path: /etc/kubernetes/scheduler.conf
      type: FileOrCreate
    name: kubeconfig
  - hostPath:  #这一段是需要添加的
      path: /etc/localtime
    name: host-time
status: {}

重启kubelet

1
systemctl restart kubelet