主機(jī)規(guī)劃
安裝步驟
FROM openjdk:8
WORKDIR /usr/local
ADD apache-zookeeper-3.8.0-bin.tar.gz /usr/local/
# 修改docker時區(qū)為東八區(qū),規(guī)避應(yīng)用程序和北京時間相差8小時問題
ENV TZ=Asia/Shanghai
# 下一步可以刪除,在pod中設(shè)置command來操作也可以
CMD /usr/local/apache-zookeeper-3.8.0-bin/bin/zkServer.sh start-foreground
Docker
[root@node01 ~]# ls -rlt
總用量 12888
-rw-------. 1 root root 1698 3月 28 16:37 anaconda-ks.cfg
-rw-r--r-- 1 root root 13185104 4月 27 14:34 apache-zookeeper-3.8.0-bin.tar.gz
-rw-r--r-- 1 root root 226 4月 27 18:05 dockerfile
# 構(gòu)建鏡像名為myzk:1.0的鏡像
[root@node01 ~]# docker build -f dockerfile -t myzk:1.0 .
Bash
# 在master上安裝nfs服務(wù),設(shè)置開機(jī)自啟動
[root@nfs ~]# yum install nfs-utils -y
[root@nfs ~]# systemctl restart nfs
[root@nfs ~]# systemctl enable nfs
# 在node上安裝nfs服務(wù),注意不需要啟動
[root@k8s-master01 ~]# yum install nfs-utils -y
Bash
# 創(chuàng)建共享目錄
for x in $(seq 1 3);
> do
> mkdir -p /data/pv/zk${x}
> done
# 將共享目錄暴露,暴露給指定主機(jī)將“192.168.158.0/24”改成主機(jī)ip即可
[root@master ~]# vim /etc/exports
/data/pv/zk1 192.168.13.0/24(rw,no_root_squash)
/data/pv/zk2 192.168.13.0/24(rw,no_root_squash)
/data/pv/zk3 192.168.13.0/24(rw,no_root_squash)
Bash
apiVersion: v1
kind: PersistentVolume
metadata:
name: k8s-pv-zk1
namespace: zookeeper
spec:
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
storageClassName: "zookeeper"
nfs:
path: /data/pv/zk1
server: 192.168.13.129
persistentVolumeReclaimPolicy: Recycle
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: k8s-pv-zk2
namespace: zookeeper
spec:
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
storageClassName: "zookeeper"
nfs:
path: /data/pv/zk2
server: 192.168.13.129
persistentVolumeReclaimPolicy: Recycle
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: k8s-pv-zk3
namespace: zookeeper
spec:
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
storageClassName: "zookeeper"
nfs:
path: /data/pv/zk3
server: 192.168.13.129
persistentVolumeReclaimPolicy: Recycle
YAML
[root@master ~]# kubectl apply -f pv-zk.yaml
# 創(chuàng)建成功可以通過kubectl命令看到下列3個pv
[root@master ~]# kubectl get pv -n zookeeper
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
k8s-pv-zk1 2Gi RWO Recycle Bound zookeeper/data-zookeeper-cluster-1 zookeeper 17m
k8s-pv-zk2 2Gi RWO Recycle Bound zookeeper/data-zookeeper-cluster-0 zookeeper 17m
k8s-pv-zk3 2Gi RWO Recycle Bound zookeeper/data-zookeeper-cluster-2 zookeeper 17m
Bash
apiVersion: v1
kind: Service # 該svc用于zk集群內(nèi)部通信
metadata:
name: zk-hs
namespace: zookeeper
labels:
app: zk
spec:
ports:
- port: 2888
name: server
- port: 3888
name: leader-election
clusterIP: None
selector:
app: zk
---
apiVersion: v1
kind: Service # 該svc用于與zk集群外部通信
metadata:
name: zk-cs
namespace: zookeeper
labels:
app: zk
spec:
type: ClusterIP # 僅允許k8s集群內(nèi)部機(jī)器訪問,若需要外部機(jī)器或瀏覽器訪問,需要將該類型改為NodePort
ports:
- port: 2181
name: client
selector:
app: zk
---
apiVersion: v1
kind: ConfigMap # 用于保存zk配置文件
metadata:
name: zoo-conf
namespace: zookeeper
data:
zoo.cfg: |+
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/usr/local/apache-zookeeper-3.8.0-bin/data
dataLogDir=/usr/local/apache-zookeeper-3.8.0-bin/data/log
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1
## Metrics Providers
#
# https://prometheus.io Metrics Exporter
#metricsProvider.className=org.apache.zookeeper.metrics.prometheus.PrometheusMetricsProvider
#metricsProvider.httpHost=0.0.0.0
#metricsProvider.httpPort=7000
#metricsProvider.exportJvmInfo=true
# 由于pod重啟后容器ip會改變,將ip轉(zhuǎn)換為DNS域名 格式"pod名.內(nèi)部通信的服務(wù)名.命名空間名.svc.cluster.local"
server.1=zookeeper-cluster-0.zk-hs.zookeeper.svc.cluster.local:2888:3888
server.2=zookeeper-cluster-1.zk-hs.zookeeper.svc.cluster.local:2888:3888
server.3=zookeeper-cluster-2.zk-hs.zookeeper.svc.cluster.local:2888:3888
---
apiVersion: apps/v1
kind: StatefulSet # 由于每次pod重啟容器ip會改變,所以需要使用StatefulSet控制器保證pod名不變,從而可以使用DNS域名替代ip
metadata:
name: zookeeper-cluster
namespace: zookeeper
spec:
selector:
matchLabels:
app: zk
serviceName: zk-hs
replicas: 3
template:
metadata:
labels:
app: zk
spec:
containers:
- name: zookeeper
imagePullPolicy: IfNotPresent
image: "myzk:1.0"
ports:
- containerPort: 2181
name: client
- containerPort: 2888
name: server
- containerPort: 3888
name: leader-election
volumeMounts:
- name: conf
mountPath: /usr/local/apache-zookeeper-3.8.0-bin/conf
- name: data
mountPath: /usr/local/apache-zookeeper-3.8.0-bin/data
command: [ "/usr/local/apache-zookeeper-3.8.0-bin/bin/zkServer.sh","start-foreground" ] # 這一步可以在dockerfile中用CMD實現(xiàn)
volumes:
- name: conf
configMap:
name: zoo-conf
defaultMode: 0755
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 2Gi
storageClassName: "zookeeper"
YAML
[root@master ~]# kubectl apply -f zk-cluster.yaml
# 查看
[root@master ~]# kubectl get pods -n zookeeper
NAME READY STATUS RESTARTS AGE
zookeeper-cluster-0 0/1 CrashLoopBackOff 1 17s
Bash
[root@master ~]# kubectl logs zookeeper-cluster-0 -n zookeeper
21:20:18.807 [main] ERROR org.apache.zookeeper.server.quorum.QuorumPeerMain - Invalid config, exiting abnormally
org.apache.zookeeper.server.quorum.QuorumPeerConfig$ConfigException: Error processing /usr/local/apache-zookeeper-3.8.0-bin/bin/../conf/zoo.cfg
at org.apache.zookeeper.server.quorum.QuorumPeerConfig.parse(QuorumPeerConfig.java:198)
at org.apache.zookeeper.server.quorum.QuorumPeerMain.initializeAndRun(QuorumPeerMain.java:125)
at org.apache.zookeeper.server.quorum.QuorumPeerMain.main(QuorumPeerMain.java:91)
Caused by: java.lang.IllegalArgumentException: myid file is missing
at org.apache.zookeeper.server.quorum.QuorumPeerConfig.checkValidity(QuorumPeerConfig.java:792)
at org.apache.zookeeper.server.quorum.QuorumPeerConfig.setupQuorumPeerConfig(QuorumPeerConfig.java:663)
at org.apache.zookeeper.server.quorum.QuorumPeerConfig.parseProperties(QuorumPeerConfig.java:487)
at org.apache.zookeeper.server.quorum.QuorumPeerConfig.parse(QuorumPeerConfig.java:194)
... 2 common frames omitted
YAML
[root@master ~]# kubectl get pv -n zookeeper
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
k8s-pv-zk1 2Gi RWO Recycle Bound zookeeper/data-zookeeper-cluster-1 zookeeper 38m
k8s-pv-zk2 2Gi RWO Recycle Bound zookeeper/data-zookeeper-cluster-0 zookeeper 38m
k8s-pv-zk3 2Gi RWO Recycle Bound zookeeper/data-zookeeper-cluster-2 zookeeper 38m
[root@master zk1]# echo 2 > /data/pv/zk1/myid
[root@master zk1]# echo 1 > /data/pv/zk1/myid
[root@master zk1]# echo 3 > /data/pv/zk1/myid
Bash
[root@master ~]# kubectl get pod -n zookeeper
NAME READY STATUS RESTARTS AGE
zookeeper-cluster-0 1/1 Running 4 41m
zookeeper-cluster-1 1/1 Running 4 40m
zookeeper-cluster-2 1/1 Running 0 39m
Bash
[root@master ~]# kubectl exec -it -n zookeeper zookeeper-cluster-0 -- /usr/local/apache-zookeeper-3.8.0-bin/bin/zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /usr/local/apache-zookeeper-3.8.0-bin/bin/../conf/zoo.cfg
Client port found: 2181. Client address: localhost. Client SSL: false.
Mode: follower
[root@master ~]# kubectl exec -it -n zookeeper zookeeper-cluster-1 -- /usr/local/apache-zookeeper-3.8.0-bin/bin/zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /usr/local/apache-zookeeper-3.8.0-bin/bin/../conf/zoo.cfg
Client port found: 2181. Client address: localhost. Client SSL: false.
Mode: leader
[root@master ~]# kubectl exec -it -n zookeeper zookeeper-cluster-2 -- /usr/local/apache-zookeeper-3.8.0-bin/bin/zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /usr/local/apache-zookeeper-3.8.0-bin/bin/../conf/zoo.cfg
Client port found: 2181. Client address: localhost. Client SSL: false.
Mode: follower
Bash
https://blog.csdn.net/zhoujianhui008/article/details/114259416
文章版權(quán)歸作者所有,未經(jīng)允許請勿轉(zhuǎn)載,若此文章存在違規(guī)行為,您可以聯(lián)系管理員刪除。
轉(zhuǎn)載請注明本文地址:http://specialneedsforspecialkids.com/yun/129416.html
摘要:宋體本文從拉勾網(wǎng)的業(yè)務(wù)架構(gòu)日志采集監(jiān)控服務(wù)暴露調(diào)用等方面介紹了其基于的容器化改造實踐。宋體此外,拉勾網(wǎng)還有一套自研的環(huán)境的業(yè)務(wù)發(fā)布系統(tǒng),不過這套發(fā)布系統(tǒng)未適配容器環(huán)境。寫在前面 拉勾網(wǎng)于 2019 年 3 月份開始嘗試將生產(chǎn)環(huán)境的業(yè)務(wù)從 UHost 遷移到 UK8S,截至 2019 年 9 月份,QA 環(huán)境的大部分業(yè)務(wù)模塊已經(jīng)完成容器化改造,生產(chǎn)環(huán)境中,后臺管理服務(wù)已全部遷移到 UK8...
摘要:馬拉松會匹配每個和提供的資源,然后通過將任務(wù)下發(fā)下去。對外暴露的就是負(fù)載均衡的某個服務(wù),后面自動將流量轉(zhuǎn)發(fā)到某個容器的端口上。還有一直辦法是用內(nèi)網(wǎng)的,這個會維護(hù)現(xiàn)有的容器列表端口,并且返回任意一個的端口,頁實現(xiàn)了負(fù)載均衡和服務(wù)發(fā)現(xiàn)功能。 演講嘉賓 數(shù)人云COO 謝樂冰 在德國工作十年,回國后加入惠普電信運(yùn)營商部門,擁有多年項目經(jīng)驗和創(chuàng)業(yè)公司工作經(jīng)驗。在數(shù)人云負(fù)責(zé)產(chǎn)品售前和運(yùn)營,專注行...
摘要:背景原來學(xué)習(xí)時我是在本地搭建的偽集群雖然說使用起來沒有什么問題但是總感覺部署起來有點麻煩剛好我發(fā)現(xiàn)了已經(jīng)有了的鏡像了于是就嘗試了一下發(fā)現(xiàn)真是爽爆了幾個命令就可以搭建一個完整的集群下面我簡單記錄一下使用搭建集群的一些步驟鏡像下載上有不少鏡像不 背景 原來學(xué)習(xí) ZK 時, 我是在本地搭建的偽集群, 雖然說使用起來沒有什么問題, 但是總感覺部署起來有點麻煩. 剛好我發(fā)現(xiàn)了 ZK 已經(jīng)有了 D...
摘要:搭建系列環(huán)境搭建集群搭建集群環(huán)境搭建搭建集群環(huán)境搭建序?qū)τ趥€人開發(fā)者而言,學(xué)習(xí)分布式的好多東東,都比較費勁,因為手頭的機(jī)器不夠。本文主要是記錄使用搭建集群的過程。鳴謝使用不同網(wǎng)絡(luò)模型搭建集群這篇文章總結(jié)的很好有坑,沒嘗試成功 docker搭建系列 docker環(huán)境搭建zk集群 docker搭建redis集群 docker環(huán)境搭建elasticsearch docker搭建rabbit...
閱讀 1346·2023-01-11 13:20
閱讀 1684·2023-01-11 13:20
閱讀 1132·2023-01-11 13:20
閱讀 1858·2023-01-11 13:20
閱讀 4100·2023-01-11 13:20
閱讀 2704·2023-01-11 13:20
閱讀 1385·2023-01-11 13:20
閱讀 3597·2023-01-11 13:20