KubeSphere 多节点安装
机器准备
- 4c8g (master)
- 8c16g * 2(worker)
- centos7.9
- 内网互通
- 每个机器有自己域名
- 防火墙开放30000~32767端口
前置操作
安装Docker
# 移除旧版docker
sudo yum remove docker*
# 安装yum-utils
sudo yum install -y yum-utils
# 配置docker的yum地址
sudo yum-config-manager \
--add-repo \
http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# 安装指定版本docker
sudo yum install -y docker-ce-20.10.7 docker-ce-cli-20.10.7 containerd.io-1.4.6
# 启动&开机启动docker
systemctl enable docker --now
# docker加速配置 (注意aliyun镜像仓库,部分镜像缺失)
sudo mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json <<-'EOF'
{
"registry-mirrors": ["https://tyl4mf91.mirror.aliyuncs.com"],
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2"
}
EOF
sudo systemctl daemon-reload
sudo systemctl restart docker
Kubernetes依赖
yum install -y conntrack socat
设置基本环境
# 设置每个机器自己的hostname
hostnamectl set-hostname klaus-*
# 查看集群中节点的信息
hostnamectl
# 关闭 SELinux
sudo setenforce 0
sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
# 禁用交换分区
swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
# 允许iptables 检查桥接流量
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
br_netfilter
EOF
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sudo sysctl --system
使用KubeKey创建集群
下载KubeKey
## 在主节点上执行
# 设置KubeKey的区域
export KKZONE=cn
# 下载KubeKey
curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh -
## 下载 KubeKey 后,如果您将其传输至访问 Googleapis 同样受限的新机器,请您在执行以下步骤之前务必再次执行 export KKZONE=cn 命令。
# 赋予执行权限
chmod +x kk
创建集群配置问件
./kk create config --with-kubernetes v1.20.4 --with-kubesphere v3.1.1
修改 config-sample.yaml
apiVersion: kubekey.kubesphere.io/v1alpha1
kind: Cluster
metadata:
name: sample
spec:
hosts:
- {name: klaus-1, address: 192.168.200.80, internalAddress: 192.168.200.80, user: root, password: c$YycswkQa9Q}
- {name: klaus-2, address: 192.168.200.75, internalAddress: 192.168.200.75, user: root, password: c$YycswkQa9Q}
- {name: klaus-3, address: 192.168.200.73, internalAddress: 192.168.200.73, user: root, password: c$YycswkQa9Q}
roleGroups:
etcd:
- klaus-1
master:
- klaus-1
worker:
- klaus-2
- klaus-3
controlPlaneEndpoint:
domain: lb.kubesphere.local
address: ""
port: 6443
kubernetes:
version: v1.20.4
imageRepo: kubesphere
clusterName: cluster.local
network:
plugin: calico
kubePodsCIDR: 10.233.64.0/18
kubeServiceCIDR: 10.233.0.0/18
registry:
registryMirrors: []
insecureRegistries: []
addons: []
---
apiVersion: installer.kubesphere.io/v1alpha1
kind: ClusterConfiguration
metadata:
name: ks-installer
namespace: kubesphere-system
labels:
version: v3.1.1
spec:
persistence:
storageClass: ""
authentication:
jwtSecret: ""
zone: ""
local_registry: ""
etcd:
monitoring: false
endpointIps: localhost
port: 2379
tlsEnable: true
common:
redis:
enabled: false
redisVolumSize: 2Gi
openldap:
enabled: false
openldapVolumeSize: 2Gi
minioVolumeSize: 20Gi
monitoring:
endpoint: http://prometheus-operated.kubesphere-monitoring-system.svc:9090
es:
elasticsearchMasterVolumeSize: 4Gi
elasticsearchDataVolumeSize: 20Gi
logMaxAge: 7
elkPrefix: logstash
basicAuth:
enabled: false
username: ""
password: ""
externalElasticsearchUrl: ""
externalElasticsearchPort: ""
console:
enableMultiLogin: true
port: 30880
alerting:
enabled: false
# thanosruler:
# replicas: 1
# resources: {}
auditing:
enabled: false
devops:
enabled: false
jenkinsMemoryLim: 2Gi
jenkinsMemoryReq: 1500Mi
jenkinsVolumeSize: 8Gi
jenkinsJavaOpts_Xms: 512m
jenkinsJavaOpts_Xmx: 512m
jenkinsJavaOpts_MaxRAM: 2g
events:
enabled: false
ruler:
enabled: true
replicas: 2
logging:
enabled: false
logsidecar:
enabled: true
replicas: 2
metrics_server:
enabled: false
monitoring:
storageClass: ""
prometheusMemoryRequest: 400Mi
prometheusVolumeSize: 20Gi
multicluster:
clusterRole: none
network:
networkpolicy:
enabled: false
ippool:
type: none
topology:
type: none
openpitrix:
store:
enabled: false
servicemesh:
enabled: false
kubeedge:
enabled: false
cloudCore:
nodeSelector: {"node-role.kubernetes.io/worker": ""}
tolerations: []
cloudhubPort: "10000"
cloudhubQuicPort: "10001"
cloudhubHttpsPort: "10002"
cloudstreamPort: "10003"
tunnelPort: "10004"
cloudHub:
advertiseAddress:
- ""
nodeLimit: "100"
service:
cloudhubNodePort: "30000"
cloudhubQuicNodePort: "30001"
cloudhubHttpsNodePort: "30002"
cloudstreamNodePort: "30003"
tunnelNodePort: "30004"
edgeWatcher:
nodeSelector: {"node-role.kubernetes.io/worker": ""}
tolerations: []
edgeWatcherAgent:
nodeSelector: {"node-role.kubernetes.io/worker": ""}
tolerations: []
创建集群
./kk create cluster -f config-sample.yaml
查看进度
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f