Clickhouse集群模式安装
Clickhouse集群模式安装前言1.集群节点信息2.搭建zookeeper集群3.搭建clickhouse集群前言安装clickhouse集群模式前需得安装zookeeper集群1.集群节点信息1.1.1.11 node11.1.1.12 node21.1.1.13 node32.搭建zookeeper集群安装zookeeper前,先安装jdk# 下载 zookeeper-3.4.12.tar
·
Clickhouse集群模式安装
前言
安装clickhouse集群模式前需得安装zookeeper集群
1.集群节点信息
- 1.1.1.11 node1
- 1.1.1.12 node2
- 1.1.1.13 node3
2.搭建zookeeper集群
安装zookeeper前,先安装jdk
# 下载 zookeeper-3.4.12.tar.gz 安装包,并上传至三台服务器
tar -zxvf zookeeper-3.4.12.tar.gz
mv zookeeper-3.4.12.tar.gz zookeeper
# 修改用户组和用户名
chown -R clickhouse:clickhouse zookeeper/
# 配置环境变量
export ZOOKEEPER_HOME=zookeeper
export PATH=$PATH:$ZOOKEEPER_HOME/bin
# 进入zookeeper的conf目录,拷贝zoo_sample.cfg为zoo.cfg
cp zoo_sample.cfg zoo.cfg
# 修改zoo.cfg文件:
tickTime=2000
initLimit=10
syncLimit=5
dataDir= /tmp/zookeeper/data/zookeeper
dataLogDir= /tmp/zookeeper/log/zookeeper
clientPort=2181
autopurge.purgeInterval=0
globalOutstandingLimit=200
server.1=node1:2888:3888
server.2=node2:2888:3888
server.3=node3:2888:3888
# 注:server.1中的1为服务器id,需要与myid文件中id一致
touch /tmp/zookeeper/data/zookeeper/myid
echo 1 > /tmp/zookeeper/data/zookeeper/myid
#node2中echo 2 , node3中echo 3
# 启动服务器
zkServer.sh start
# 连接客户端
zkCli.sh -server localhost:2181
每个node都要执行一次上述操作
3.搭建clickhouse集群
# 下载clickhouse安装包,并上传至三台服务器
# clickhouse-client-19.9.5.36-1.el6.x86_64.rpm
# clickhouse-common-static-19.9.5.36-1.el6.x86_64.rpm
# clickhouse-server-19.9.5.36-1.el6.x86_64.rpm
rpm -ivh *.rpm
# 修改配置文件,集群模式每个节点都得配置三个文件 config.xml,metrika.xml,users.xml
cd /etc/clickhouse-server
<!-- config.xml -->
<yandex>
<!-- 日志 -->
<logger>
<level>trace</level>
<log>/data/clickhouse/log/server.log</log>
<errorlog>/data/clickhouse/log/error.log</errorlog>
<size>1000M</size>
<count>10</count>
</logger>
<!-- 端口 -->
<http_port>8123</http_port>
<tcp_port>9000</tcp_port>
<interserver_http_port>9009</interserver_http_port>
<!-- 监听IP -->
<listen_host>0.0.0.0</listen_host>
<!-- 最大连接数 -->
<max_connections>64</max_connections>
<!-- 没搞懂的参数 -->
<keep_alive_timeout>3</keep_alive_timeout>
<!-- 最大并发查询数 -->
<max_concurrent_queries>16</max_concurrent_queries>
<!-- 单位是B -->
<uncompressed_cache_size>8589934592</uncompressed_cache_size>
<mark_cache_size>10737418240</mark_cache_size>
<!-- 存储路径 -->
<path>/data1/clickhouse/</path>
<tmp_path>/data1/clickhouse/tmp/</tmp_path>
<!-- user配置 -->
<users_config>users.xml</users_config>
<default_profile>default</default_profile>
<distributed_ddl>
<!-- Path in ZooKeeper to queue with DDL queries -->
<path>/clickhouse/task_queue/ddl</path>
<cleanup_delay_period>60</cleanup_delay_period>
<task_max_lifetime>86400</task_max_lifetime>
<max_tasks_in_queue>1000</max_tasks_in_queue>
</distributed_ddl>
<default_database>default</default_database>
<remote_servers incl="clickhouse_remote_servers" />
<zookeeper incl="zookeeper-servers" optional="true" />
<macros incl="macros" optional="true" />
<builtin_dictionaries_reload_interval>3600</builtin_dictionaries_reload_interval>
<!-- 控制大表的删除 -->
<max_table_size_to_drop>0</max_table_size_to_drop>
<include_from>/etc/clickhouse-server/metrika.xml</include_from>
</yandex>
<!-- metrika.xml -->
<yandex>
<!--ck集群节点-->
<clickhouse_remote_servers>
<ck_cluster>
<!--分片1-->
<shard>
<internal_replication>true</internal_replication>
<replica>
<host>node1</host>
<port>9000</port>
<user>default</user>
<password></password>
</replica>
<replica>
<host>node2</host>
<port>9000</port>
<user>default</user>
<password></password>
</replica>
<replica>
<host>node3</host>
<port>9000</port>
<user>default</user>
<password></password>
</replica>
</shard>
<!--分片2-->
</ck_cluster>
</clickhouse_remote_servers>
<!--zookeeper相关配置-->
<zookeeper-servers>
<node index="1">
<host>node1</host>
<port>2181</port>
</node>
<node index="2">
<host>node2</host>
<port>2181</port>
</node>
<node index="3">
<host>node3</host>
<port>2181</port>
</node>
</zookeeper-servers>
<!-- 宏设置,与建表时zk路径有关 -->
<macros>
<!-- shard_name:分片标识符 每个服务器的配置文件必须相同,否则复制表将无法操作-->
<shard_name>01</shard_name>
<!-- replica:副本名称 服务器各自的ip-->
<replica>node1</replica>
<!-- 集群名称 -->
<cluster>ck_cluster</cluster>
</macros>
<networks>
<ip>::/0</ip>
</networks>
<!--压缩相关配置-->
<clickhouse_compression>
<case>
<min_part_size>10000000000</min_part_size>
<min_part_size_ratio>0.01</min_part_size_ratio>
<method>lz4</method> <!--压缩算法lz4压缩比zstd快, 更占磁盘-->
</case>
</clickhouse_compression>
</yandex>
<!-- users.xml -->
<yandex>
<profiles>
<default>
<max_memory_usage>10000000000</max_memory_usage>
<use_uncompressed_cache>0</use_uncompressed_cache>
<load_balancing>random</load_balancing>
</default>
</profiles>
<quotas>
<!-- Name of quota. -->
<default>
<interval>
<duration>3600</duration>
<queries>0</queries>
<errors>0</errors>
<result_rows>0</result_rows>
<read_rows>0</read_rows>
<execution_time>0</execution_time>
</interval>
</default>
</quotas>
<users>
<default>
<password></password>
<networks>
<ip>::/0</ip>
</networks>
<profile>default</profile>
<quota>default</quota>
</default>
</users>
</yandex>
# 给clickhouse用户赋予操作/data文件夹的权限
chown -R clickhouse:clickhouse /data
集群启动
# 启动服务
cd /etc/clickhouse-server/ && sudo -su clickhouse-server start &
# 连接客户端
clickhouse-client
上述操作每个节点都得执行一次
更多推荐
已为社区贡献1条内容
所有评论(0)