172.16.120.100 ck_server_100(shard1)
172.16.120.101 ck_server_101(shard1) zk_server_101
172.16.120.102 ck_server_102(shard2) zk_server_102
172.16.120.103 ck_server_103(shard2) zk_server_103
1. 安装jdk(101/102/103上执行)
[root@node101 /usr/local/src]# tar xf jdk-8u171-linux-x64.tar.gz -C /usr/local/
[root@node101 /usr/local/src]# ln -s /usr/local/jdk1.8.0_171/ /usr/local/jdk1.8
[root@node101 /usr/local/src]# vim /etc/profile.d/jdk.sh #添加以下内容
export JAVA_HOME=/usr/local/jdk1.8
export JRE_HOME=/usr/local/jdk1.8
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar:$JRE_HOME/lib
export PATH=$JAVA_HOME/bin:$JRE_HOME/bin:$PATH
[root@node101 /usr/local/src]# source /etc/profile
[root@node101 /usr/local/src]# java -version
java version "1.8.0_171"
Java(TM) SE Runtime Environment (build 1.8.0_171-b11)
Java HotSpot(TM) 64-Bit Server VM (build 25.171-b11, mixed mode)
2. 安装zk(101/102/103上执行)
[root@node101 /usr/local/src]# tar xf zookeeper-3.4.12.tar.gz -C /usr/local/
[root@node101 /usr/local/src]# ln -s /usr/local/zookeeper-3.4.12/ /usr/local/zookeeper
[root@node101 /usr/local/src]# mv /usr/local/zookeeper/conf/zoo_sample.cfg /usr/local/zookeeper/conf/zoo.cfg
[root@node101 /usr/local/src]# vim /usr/local/zookeeper/conf/zoo.cfg
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/data/zookeeper
dataLogDir=/data/zookeeper
clientPort=2181
autopurge.purgeInterval=0
globalOutstandingLimit=200
server.1=zk_server_101:2888:3888
server.2=zk_server_102:2888:3888
server.3=zk_server_103:2888:3888
[root@node101 /usr/local/src]# vim /usr/local/zookeeper/conf/zookeeper-env.sh
#!/bin/bash
ZOO_LOG_DIR=/data/zookeeper #日志文件放置的路径
ZOO_LOG4J_PROP="INFO,ROLLINGFILE" #设置日志轮转
JVMFLAGS="-d64 -Xmx512m -Xms512m -Xmn128m -Xloggc:${ZOO_LOG_DIR}/gc.log -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=${ZOO_LOG_DIR}/oom.log"
[root@node101 /usr/local/src]# mkdir /data/
[root@node101 /usr/local/src]# mkdir /data/zookeeper
[root@node101 /usr/local/src]# vim /data/zookeeper/myid #101为1,102为2,103为3
1
[root@node101 /usr/local/src]# chown keung.keung /data/zookeeper -R
[root@node101 /usr/local/src]# su - keung -c "/usr/local/zookeeper/bin/zkServer.sh start"
ZooKeeper JMX enabled by default
Using config: /usr/local/zookeeper/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
[root@node101 /usr/local/src]# su - keung -c "/usr/local/zookeeper/bin/zkServer.sh status"
ZooKeeper JMX enabled by default
Using config: /usr/local/zookeeper/bin/../conf/zoo.cfg
Mode: follower
3. 安装ck(100/101/102/103上执行)
(1). 安装yum源
[root@node101 /usr/local/src]# curl -s https://packagecloud.io/install/repositories/altinity/clickhouse/script.rpm.sh | bash
The repository is setup! You can now install packages.
(2). 安装ck
[root@node101 /usr/local/src]# yum -y install clickhouse-server clickhouse-client clickhouse-server-common clickhouse-compressor
[root@node101 /usr/local/src]# mkdir /data/clickhouse -p
(3). 配置config.xml
[root@node101 /usr/local/src]# vim /data/clickhouse/config.xml
<?xml version="1.0"?>
<yandex>
<!--日志-->
<logger>
<level>trace</level>
<log>/data/clickhouse/logs/clickhouse.log</log>
<errorlog>/data/clickhouse/error.log</errorlog>
<size>1000M</size>
<count>10</count>
</logger>
<!--本地节点信息-->
<http_port>8123</http_port>
<tcp_port>9000</tcp_port>
<interserver_http_port>9009</interserver_http_port>
<interserver_http_host>ck_server_01</interserver_http_host> <!--本机域名-->
<!--本地配置-->
<listen_host>0.0.0.0</listen_host>
<max_connections>4096</max_connections>
<keep_alive_timeout>3</keep_alive_timeout>
<max_concurrent_queries>100</max_concurrent_queries>
<uncompressed_cache_size>8589934592</uncompressed_cache_size>
<mark_cache_size>5368709120</mark_cache_size>
<path>/data/clickhouse/</path>
<tmp_path>/data/clickhouse/tmp/</tmp_path>
<users_config>users.xml</users_config>
<default_profile>default</default_profile>
<log_queries>1</log_queries>
<default_database>default</default_database>
<!--集群相关配置-->
<remote_servers incl="clickhouse_remote_servers" />
<zookeeper incl="zookeeper-servers" optional="true" />
<macros incl="macros" optional="true" />
<builtin_dictionaries_reload_interval>3600</builtin_dictionaries_reload_interval>
<max_table_size_to_drop>0</max_table_size_to_drop>
<include_from>/data/clickhouse/metrika.xml</include_from>
</yandex>
(4). 配置metrika.xml, 注意如果设置了密码,集群配置中也要写上用户名和明文密码
[root@node101 /usr/local/src]# vim /data/clickhouse/metrika.xml
<yandex>
<!--ck集群节点-->
<clickhouse_remote_servers>
<test_ck_cluster>
<!--分片1-->
<shard>
<internal_replication>true</internal_replication>
<replica>
<host>172.16.120.100</host>
<port>9000</port>
<user>default</user>
<password>UHXQQmhb</password>
</replica>
<replica>
<host>172.16.120.101</host>
<port>9000</port>
<user>default</user>
<password>UHXQQmhb</password>
</replica>
</shard>
<!--分片2-->
<shard>
<internal_replication>true</internal_replication>
<replica>
<host>172.16.120.102</host>
<port>9000</port>
<user>default</user>
<password>UHXQQmhb</password>
</replica>
<replica>
<host>172.16.120.103</host>
<port>9000</port>
<user>default</user>
<password>UHXQQmhb</password>
</replica>
</shard>
<!--分片3-->
</test_ck_cluster>
</clickhouse_remote_servers>
<!--zookeeper相关配置-->
<zookeeper-servers>
<node index="1">
<host>172.16.120.101</host>
<port>2181</port>
</node>
<node index="2">
<host>172.16.120.102</host>
<port>2181</port>
</node>
<node index="3">
<host>172.16.120.103</host>
<port>2181</port>
</node>
</zookeeper-servers>
<macros>
<replica>172.16.120.101</replica> <!--当前节点IP-->
</macros>
<networks>
<ip>::/0</ip>
</networks>
<!--压缩相关配置-->
<clickhouse_compression>
<case>
<min_part_size>10000000000</min_part_size>
<min_part_size_ratio>0.01</min_part_size_ratio>
<method>lz4</method> <!--压缩算法lz4压缩比zstd快, 更占磁盘-->
</case>
</clickhouse_compression>
</yandex>
(5). 配置users.xml
[root@node101 /usr/local/src]# vim /data/clickhouse/users.xml
<?xml version="1.0"?>
<yandex>
<profiles>
<default>
<max_memory_usage>10000000000</max_memory_usage>
<use_uncompressed_cache>0</use_uncompressed_cache>
<load_balancing>random</load_balancing>
</default>
<readonly>
<max_memory_usage>10000000000</max_memory_usage>
<use_uncompressed_cache>0</use_uncompressed_cache>
<load_balancing>random</load_balancing>
<readonly>1</readonly>
</readonly>
</profiles>
<quotas>
<!-- Name of quota. -->
<default>
<interval>
<duration>3600</duration>
<queries>0</queries>
<errors>0</errors>
<result_rows>0</result_rows>
<read_rows>0</read_rows>
<execution_time>0</execution_time>
</interval>
</default>
</quotas>
<users>
<default>
<!-- PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-' -->
<!-- password UHXQQmhb -->
<password_sha256_hex>858bef20bf7369697e572e31ee41a23b4524973289f3988f29f3e4d4be540487</password_sha256_hex>
<networks>
<ip>::/0</ip>
</networks>
<profile>default</profile>
<quota>default</quota>
</default>
<ck>
<password_sha256_hex>858bef20bf7369697e572e31ee41a23b4524973289f3988f29f3e4d4be540487</password_sha256_hex>
<networks>
<ip>::/0</ip>
</networks>
<profile>readonly</profile>
<quota>default</quota>
</ck>
</users>
</yandex>
(6). 修改启动脚本
[root@node101 /usr/local/src]# vim /etc/init.d/clickhouse-server
CLICKHOUSE_LOGDIR=/data/clickhouse/logs
LOCALSTATEDIR=/data/clickhouse
CLICKHOUSE_CONFIG=/data/clickhouse/config.xml
LOCKFILE=/data/clickhouse/clickhouse.lock
CLICKHOUSE_PIDDIR=/data/clickhouse
CLICKHOUSE_PIDFILE="/data/clickhouse/clickhouse.pid"
(7). 修改权限
[root@node101 /usr/local/src]# mkdir /data/clickhouse/logs
[root@node101 /usr/local/src]# chown clickhouse.clickhouse /data/clickhouse/ -R
(8). 启动ck
[root@node101 /usr/local/src]# /etc/init.d/clickhouse-server start
Start clickhouse-server service: Path to data directory in /data/clickhouse/config.xml: /data/clickhouse/
DONE
您好,大神我根据你的clickhouse集群安装文档,用四台虚拟机安装了clickhouse,但是启动clickhouse-client -m --password ****,出现报错:Code: 210. DB::NetException: Connection refused (localhost:9000, ::1)
;我查看了错误日志发现: Application: DB::Exception: Cannot lock file /data/clickhouse/status. Another server instance in same directory is already running.(不能锁定这个 /data/clickhouse/status 目录下的status文件,在相同目录下有另一个服务器实例已经在运行),我不明白报错是什么意思?另外我看了status这个文件里是在运行clickhouse-server的进程。
您能告诉我我的配置是哪里错了,为什么拒绝连接?我是按照您给的配置呀
应该是端口没起来,建议你看一下日志详细报什么错~
您好,我看了error.log,报错信息是: Application: DB::Exception: Cannot lock file /data/clickhouse/status. Another server instance in same directory is already running.
我的/etc/init.d/clickhouse-server start 这个都能启动成功,就是clickhouse-client -m --password ****,出现报错:Code: 210. DB::NetException: Connection refused (localhost:9000, ::1)
上面的问题解决了,还是clickhouse-client -m --password ****出现报错:Code: 210. DB::NetException: Connection refused (localhost:9000, ::1),我看了下错误日志: void DB::DDLWorker::run(): Code: 999, e.displayText() = Coordination::Exception: All connection tries failed while connecting to ZooKeeper. Addresses: 192.xxx.xx.xxx:2181, 192.xxx.xx.xxx:2181, 192.xxx.xx.xxx:2181,报错说不能连接zookeeper,
然后我查看 cat zookeeper.out 文件,hdp02这台机子的zookeeper.out文件内容觉得有问题:2019-01-07 15:43:49,623 [myid:2] - INFO [ProcessThread(sid:2 cport:-1)::PrepRequestProcessor@645] - Got user-level KeeperException when processing sessionid:0x168273a00510000 type:create cxid:0x1 zxid:0x250000000f txntype:-1 reqpath:n/a Error Path:/clickhouse Error:KeeperErrorCode = NodeExists for /clickhouse。 我不知道为什么连接zookeeper2181端口失败?您能帮我分析下吗?
检查一下防火墙看下端口通不通,selinux关了没有
Code: 210. DB::NetException: Connection refused (localhost:9000, ::1)这个问题是添加0.0.0.0造成的吗,怎么解决
参考一下这个试试https://github.com/yandex/ClickHouse/issues/2299
建议看一下具体的日志
大神你好,我按照你的方法都安装好了,但是怎么用啊?如何测试高可用呢?
chickhouse把数据分片进行储存,也就是划分为一个或多shard,每个shard里面一般包含两个或以上的副本,你可以把其他的一个副本停掉,看一下是否还可以使用
大神你好,我有设置权限,chown clickhouse.clickhouse /data/clickhouse/ -R,但是sudo -u clickhouse clickhouse-server --config-file=/etc/clickhouse-server/config.xml的时候仍然提示Couldn't save preprocessed config to /data/clickhouse/clickhousedata//preprocessed_configs/config.xml: Access to file denied: /data/clickhouse
Poco::Exception. Code: 1000, e.code() = 13, e.displayText() = Access to file denied: /data/clickhouse,请问这是什么原因呢?
你看下/data/clickhouse/clickhousedata//preprocessed_configs/目录是否存在,可能需要事先创建目录