|
@@ -73,7 +73,7 @@ reboot
|
|
|
|
|
|
```
|
|
|
# 1.创建目录存放相应的安装包
|
|
|
-mkdir -p /opt/package/
|
|
|
+mkdir -p /opt/package/docker
|
|
|
|
|
|
# 2.上传安装包下的docker文件夹到上述的目录中
|
|
|
|
|
@@ -112,7 +112,11 @@ docker-compose --version
|
|
|
# 3.安装Harbor及相应的配置(只需要一台虚拟机安装)
|
|
|
|
|
|
## 3.1安装Harbor
|
|
|
-1. 将压缩包harbor-offline-installer-v2.3.2.tgz上传到```/opt/package/```目录下
|
|
|
+1. 将压缩包harbor-offline-installer-v2.3.2.tgz上传到
|
|
|
+```
|
|
|
+/opt/package/
|
|
|
+```
|
|
|
+目录下
|
|
|
2. 解压该压缩包
|
|
|
```
|
|
|
tar xf harbor-offline-installer-v2.3.2.tgz
|
|
@@ -121,55 +125,45 @@ tar xf harbor-offline-installer-v2.3.2.tgz
|
|
|
|
|
|
首先备份一份压缩包
|
|
|
```
|
|
|
-# cp harbor.yml.tmpl harbor.yml //复制配置文件内容到harbor.yml 中(安装时只识别harbor.yml)
|
|
|
-# mkdir -p /opt/application/harbor //用于存放harbor的持久化数据
|
|
|
+# 复制配置文件内容到harbor.yml 中(安装时只识别harbor.yml)
|
|
|
+cp harbor.yml.tmpl harbor.yml
|
|
|
+# 用于存放harbor的持久化数据
|
|
|
+mkdir -p /home/mkcloud/software/harbor/data
|
|
|
+
|
|
|
+# 用于存放harbor的日志
|
|
|
+mkdir -p /home/mkcloud/software/harbor/log
|
|
|
```
|
|
|
|
|
|
其次对harbor.yml文件进行修改配置
|
|
|
|
|
|
```
|
|
|
-#设置访问地址,可以使用ip、域名,不可以设置为127.0.0.1或localhost。默认情况下,harbor使用的端口是80,若使用自定义的端口,除了要改docker-compose.yml文件中的配置外,这里的hostname也要加上自定义的端口,否则在docker login、push时会报错
|
|
|
-hostname: 192.168.0.8:9999
|
|
|
-#http配置
|
|
|
-http:
|
|
|
-# port for http, default is 80. If https enabled, this port will redirect to https port
|
|
|
-port: 9999
|
|
|
+# 需要修改为本机的ip
|
|
|
+hostname: 10.168.59.60
|
|
|
|
|
|
-# https配置(如不需要可不配置,注释掉)
|
|
|
+# http related config
|
|
|
+http:
|
|
|
+ port: 80
|
|
|
+# 需要全部注释
|
|
|
# https related config
|
|
|
# https:
|
|
|
-# https port for harbor, default is 443
|
|
|
-# port: 443
|
|
|
-# The path of cert and key files for nginx
|
|
|
-# certificate: /your/certificate/path
|
|
|
-# private_key: /your/private/key/path
|
|
|
-
|
|
|
-# external_url: https://reg.mydomain.com:8433
|
|
|
-# 如果要启用外部代理,比如外层的NGINX、LB等,请取消注释external_url,当它启用时,hostname将不再使用。
|
|
|
-# admin密码
|
|
|
-harbor_admin_password: Harbor12345
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-#数据库配置
|
|
|
-database:
|
|
|
-# The password for the root user of Harbor DB. Change this before any production use.
|
|
|
-password: root123
|
|
|
-# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
|
|
|
-max_idle_conns: 50
|
|
|
-# The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
|
|
|
-# Note: the default number of connections is 100 for postgres.
|
|
|
-max_open_conns: 100
|
|
|
-
|
|
|
-
|
|
|
-#持久化数据目录
|
|
|
-data_volume: /opt/application/harbor
|
|
|
-
|
|
|
+ # https port for harbor, default is 443
|
|
|
+ # port: 443
|
|
|
+ # The path of cert and key files for nginx
|
|
|
+ # certificate: /your/certificate/path
|
|
|
+ # private_key: /your/private/key/path
|
|
|
+
|
|
|
+data_volume: /home/mkcloud/software/harbor/data # 需要添加一个自己的目录
|
|
|
+
|
|
|
+log:
|
|
|
+ location: /home/mkcloud/software/harbor/log # 需要添加一个自己的目录
|
|
|
```
|
|
|
|
|
|
4. 安装并启动Harbor
|
|
|
|
|
|
-保证此时在harbor安装文件中,执行install.sh文件进行安装,命令为:``` ./install.sh```
|
|
|
+保证此时在harbor安装文件中,执行install.sh文件进行安装,命令为:
|
|
|
+```
|
|
|
+./install.sh
|
|
|
+```
|
|
|
|
|
|
5. 访问harbor Web界面
|
|
|
|
|
@@ -198,12 +192,250 @@ EOF
|
|
|
systemctl daemon-reload && systemctl restart docker
|
|
|
```
|
|
|
|
|
|
-输入命令```docker login server.harbor.com:80```
|
|
|
+输入命令
|
|
|
+```
|
|
|
+docker login server.harbor.com:80
|
|
|
+```
|
|
|
输入用户名:admin
|
|
|
密码:Harbor12345
|
|
|

|
|
|
至此,harbor配置完成
|
|
|
|
|
|
+# 4.K8S 离线安装及NFS配置(三台虚拟机都需要安装)
|
|
|
+
|
|
|
+
|
|
|
+## 4.1 上传kube1.9.0.tar.gz(以下简称kube1.9)到服务器
|
|
|
+
|
|
|
+```
|
|
|
+# 1.三台虚拟机创建目录
|
|
|
+mkdir -p /opt/package/k8s
|
|
|
+# 2.上传文件到指定目录
|
|
|
+scp -r kube1.9.0.tar.gz root@192.168.238.20:/opt/package/k8s
|
|
|
+scp -r kube1.9.0.tar.gz root@192.168.238.21:/opt/package/k8s
|
|
|
+scp -r kube1.9.0.tar.gz root@192.168.238.22:/opt/package/k8s
|
|
|
+```
|
|
|
+
|
|
|
+## 4.2 解压安装主从
|
|
|
+
|
|
|
+```
|
|
|
+# 1.master下,进入/opt/package/k8s目录下解压,执行脚本
|
|
|
+tar -zxvf kube1.9.0.tar.gz
|
|
|
+cd kube/shell
|
|
|
+init.sh
|
|
|
+
|
|
|
+# 2.等待init.sh执行完毕,之后执行
|
|
|
+master.sh
|
|
|
+
|
|
|
+# 3. node1和node2执行下面的命令
|
|
|
+cd /opt/package/k8s
|
|
|
+tar -zxvf kube1.9.0.tar.gz
|
|
|
+cd kube/shell
|
|
|
+init.sh
|
|
|
+
|
|
|
+# 4. 进入master将/etc/kubernetes/admin.conf文件复制到node1节点和node2节点
|
|
|
+scp -r /etc/kubernetes/admin.conf root@192.168.238.21:/etc/kubernetes
|
|
|
+scp -r /etc/kubernetes/admin.conf root@192.168.238.22:/etc/kubernetes
|
|
|
+
|
|
|
+```
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+## 4.3 从节点加入主节点
|
|
|
+
|
|
|
+ 1. 在master节点生成token
|
|
|
+
|
|
|
+ ```shell
|
|
|
+ kubeadm token create --print-join-command
|
|
|
+ ```
|
|
|
+
|
|
|
+ 效果如下
|
|
|
+
|
|
|
+ ```shell
|
|
|
+ [root@master shell]# kubeadm token create --print-join-command
|
|
|
+ W1009 17:15:31.782757 37720 configset.go:348] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
|
|
|
+ kubeadm join 192.168.0.90:6443 --token ul68zs.dkkvpwfex9rpzo0d --discovery-token-ca-cert-hash sha256:3e3ee481f5603621f216e707321aa26a68834939e440be91322c62eb8540ffce
|
|
|
+ ```
|
|
|
+
|
|
|
+ 2. 在node1和node2中执行下面的命令------注意这里要上面生产的命令
|
|
|
+
|
|
|
+ ```shell
|
|
|
+ kubeadm join 192.168.0.90:6443 --token ul68zs.dkkvpwfex9rpzo0d --discovery-token-ca-cert-hash sha256:3e3ee481f5603621f216e707321aa26a68834939e440be91322c62eb8540ffce
|
|
|
+ ```
|
|
|
+
|
|
|
+ 结果如下
|
|
|
+
|
|
|
+ ```shell
|
|
|
+ [root@node1 shell]# kubeadm join 192.168.0.90:6443 --token ul68zs.dkkvpwfex9rpzo0d --discovery-token-ca-cert-hash sha256:3e3ee481f5603621f216e707321aa26a68834939e440be91322c62eb8540ffce
|
|
|
+ [preflight] Running pre-flight checks
|
|
|
+ [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
|
|
|
+ [WARNING FileExisting-socat]: socat not found in system path
|
|
|
+ [preflight] Reading configuration from the cluster...
|
|
|
+ [preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
|
|
|
+ [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
|
|
|
+ [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
|
|
|
+ [kubelet-start] Starting the kubelet
|
|
|
+ [kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
|
|
|
+
|
|
|
+ This node has joined the cluster:
|
|
|
+ * Certificate signing request was sent to apiserver and a response was received.
|
|
|
+ * The Kubelet was informed of the new secure connection details.
|
|
|
+
|
|
|
+ Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
|
|
|
+ ```
|
|
|
+
|
|
|
+## 4.4 验证集群pod是否running,各节点是否ready
|
|
|
+
|
|
|
+watch kubectl get pod -n kube-system -o wide
|
|
|
+效果如下
|
|
|
+
|
|
|
+```shell
|
|
|
+ [root@master shell]# watch kubectl get pod -n kube-system -o wide
|
|
|
+ Every 2.0s: kubectl get pod -n kube-system -o wide Fri Oct 9 17:45:03 2020
|
|
|
+ NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
|
|
+ calico-kube-controllers-5d7686f694-94fcc 1/1 Running 0 48m 100.89.161.131 master <none> <none>
|
|
|
+ calico-node-42bwj 1/1 Running 0 48m 192.168.0.90 master <none> <none>
|
|
|
+ calico-node-k6k6d 1/1 Running 0 27m 192.168.0.189 node2 <none> <none>
|
|
|
+ calico-node-lgwwj 1/1 Running 0 29m 192.168.0.68 node1 <none> <none>
|
|
|
+ coredns-f9fd979d6-2ncmm 1/1 Running 0 48m 100.89.161.130 master <none> <none>
|
|
|
+ coredns-f9fd979d6-5s4nw 1/1 Running 0 48m 100.89.161.129 master <none> <none>
|
|
|
+ etcd-master 1/1 Running 0 48m 192.168.0.90 master <none> <none>
|
|
|
+ kube-apiserver-master 1/1 Running 0 48m 192.168.0.90 master <none> <none>
|
|
|
+ kube-controller-manager-master 1/1 Running 0 48m 192.168.0.90 master <none> <none>
|
|
|
+ kube-proxy-5g2ht 1/1 Running 0 29m 192.168.0.68 node1 <none> <none>
|
|
|
+ kube-proxy-wpf76 1/1 Running 0 27m 192.168.0.189 node2 <none> <none>
|
|
|
+ kube-proxy-zgcft 1/1 Running 0 48m 192.168.0.90 master <none> <none>
|
|
|
+ kube-scheduler-master 1/1 Running 0 48m 192.168.0.90 master <none> <none>
|
|
|
+```
|
|
|
+kubectl get nodes
|
|
|
+效果如下
|
|
|
+```shell
|
|
|
+ [root@master shell]# kubectl get nodes
|
|
|
+ NAME STATUS ROLES AGE VERSION
|
|
|
+ master Ready master 22m v1.19.0
|
|
|
+ node1 Ready <none> 2m17s v1.19.0
|
|
|
+ node2 Ready <none> 24s v1.19.0
|
|
|
+```
|
|
|
+
|
|
|
+## 4.5配置NFS
|
|
|
+
|
|
|
+| master | NFS服务端+NFS客户端 |
|
|
|
+| ------ | ------------------- |
|
|
|
+| node1 | NFS客户端 |
|
|
|
+| node2 | NFS客户端 |
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+1. 将本地nfs离线包上传至服务器及各个节点的/opt/package/nfs文件夹下
|
|
|
+
|
|
|
+ ```shell
|
|
|
+ scp -r nfs root@192.168.238.20:/opt/package
|
|
|
+ scp -r nfs root@192.168.238.21:/opt/package
|
|
|
+ scp -r nfs root@192.168.238.22:/opt/package
|
|
|
+ ```
|
|
|
+
|
|
|
+2. 安装服务端(master节点操作)
|
|
|
+
|
|
|
+ ```shell
|
|
|
+ # 进入master节点内/opt/package/nfs文件夹内执行以下命令
|
|
|
+ # 进入/opt/package/nfs
|
|
|
+ cd /opt/package/nfs
|
|
|
+ # 安装nfs
|
|
|
+ yum rpm -ivh nfs-utils-1.3.0-0.68.el7.2.x86_64.rpm
|
|
|
+ # 执行命令 vi /etc/exports,创建 exports 文件,文件内容如下:
|
|
|
+ echo "/nfs/data/ *(insecure,rw,sync,no_root_squash)" > /etc/exports
|
|
|
+ # 执行以下命令,启动 nfs 服务
|
|
|
+ # 创建共享目录
|
|
|
+ mkdir -p /nfs/data
|
|
|
+ systemctl enable rpcbind
|
|
|
+ systemctl enable nfs-server
|
|
|
+ systemctl start rpcbind
|
|
|
+ systemctl start nfs-server
|
|
|
+ exportfs -r
|
|
|
+ # 检查配置是否生效
|
|
|
+ exportfs
|
|
|
+ # 输出结果如下所示
|
|
|
+ /nfs/data *
|
|
|
+ ```
|
|
|
+3. 安装NFS客户端(三台虚拟机操作)
|
|
|
+```shell
|
|
|
+# 进入node1,node2节点内/opt/package/nfs文件夹内执行以下命令
|
|
|
+cd /opt/package/nfs
|
|
|
+yum rpm -ivh nfs-utils-1.3.0-0.68.el7.2.x86_64.rpm
|
|
|
+systemctl start nfs && systemctl enable nfs
|
|
|
+```
|
|
|
+
|
|
|
+4. K8S中安装NFS(任意K8S节点,这里选择master节点)
|
|
|
+```shell
|
|
|
+# 1.进入/opt/package/nfs目录
|
|
|
+cd /opt/package/nfs
|
|
|
+# 2.载入docker镜像
|
|
|
+docker load < nfs-client-provisioner.tar.gz
|
|
|
+# 3.修改deployment.yaml文件
|
|
|
+vim /root/nfs/deployment.yaml
|
|
|
+
|
|
|
+apiVersion: apps/v1
|
|
|
+kind: Deployment
|
|
|
+metadata:
|
|
|
+ name: nfs-client-provisioner
|
|
|
+ labels:
|
|
|
+ app: nfs-client-provisioner
|
|
|
+ # replace with namespace where provisioner is deployed
|
|
|
+ namespace: default
|
|
|
+spec:
|
|
|
+ replicas: 1
|
|
|
+ strategy:
|
|
|
+ type: Recreate
|
|
|
+ selector:
|
|
|
+ matchLabels:
|
|
|
+ app: nfs-client-provisioner
|
|
|
+ template:
|
|
|
+ metadata:
|
|
|
+ labels:
|
|
|
+ app: nfs-client-provisioner
|
|
|
+ spec:
|
|
|
+ serviceAccountName: nfs-client-provisioner
|
|
|
+ containers:
|
|
|
+ - name: nfs-client-provisioner
|
|
|
+ image: quay.io/external_storage/nfs-client-provisioner:latest ##默认是latest版本
|
|
|
+ volumeMounts:
|
|
|
+ - name: nfs-client-root
|
|
|
+ mountPath: /persistentvolumes
|
|
|
+ env:
|
|
|
+ - name: PROVISIONER_NAME
|
|
|
+ value: fuseim.pri/ifs
|
|
|
+ - name: NFS_SERVER
|
|
|
+ value: 192.168.238.20 ##这里写NFS服务器的IP地址
|
|
|
+ - name: NFS_PATH
|
|
|
+ value: /nfs/data ##这里写NFS服务器中的共享挂载目录(强调:这里的路径必须是目录中最后一层的文件夹,否则部署的应用将无权限创建目录导致Pending)
|
|
|
+ volumes:
|
|
|
+ - name: nfs-client-root
|
|
|
+ nfs:
|
|
|
+ server: 192.168.238.20 ##这里写NFS服务器的IP地址
|
|
|
+ path: /nfs/data ##NFS服务器中的共享挂载目录(强调:这里的路径必须是目录中最后一层的文件夹,否则部署的应用将无权限创建目录导致Pending)
|
|
|
+
|
|
|
+# 4、部署yaml文件
|
|
|
+[root@k8s-client nfs]# kubectl apply -f .
|
|
|
+
|
|
|
+# 5、查看服务
|
|
|
+[root@k8s-client nfs]# kubectl get pods
|
|
|
+NAME READY STATUS RESTARTS AGE
|
|
|
+nfs-client-provisioner-78697f488-5p52r 1/1 Running 0 16h
|
|
|
+
|
|
|
+# 6、列出你的集群中的StorageClass
|
|
|
+[root@k8s-client nfs]# kubectl get storageclass
|
|
|
+NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
|
|
|
+managed-nfs-storage fuseim.pri/ifs Delete Immediate false 16h
|
|
|
+
|
|
|
+# 7、标记一个StorageClass为默认的 (是storageclass的名字也就是你部署的StorageClass名字是啥就写啥)
|
|
|
+[root@k8s-client nfs]# kubectl patch storageclass managed-nfs-storage -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
|
|
|
+storageclass.storage.k8s.io/managed-nfs-storage patched
|
|
|
+
|
|
|
+# 8、验证你选用为默认的StorageClass
|
|
|
+[root@k8s-client nfs]# kubectl get storageclass
|
|
|
+NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
|
|
|
+managed-nfs-storage (default) fuseim.pri/ifs Delete Immediate false
|
|
|
+```
|
|
|
+
|
|
|
# 5.安装kubesphere及其相应的插件
|
|
|
|
|
|
## 5.1将离线包上传至harbor仓库(该操作只需在master节点进行)
|
|
@@ -303,7 +535,7 @@ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=
|
|
|
|
|
|
| 软件 | 版本 |
|
|
|
| -------------- | ------- |
|
|
|
-| centos | 7.5 |
|
|
|
+| centos | 7.7 |
|
|
|
| docker | 19.03.7 |
|
|
|
| docker-compose | 2.1.0 |
|
|
|
| Harbor | 2.3.2 |
|