#查看所有分區
SELECT
database,
table,
partition,
name,
active
FROM system.parts
WHERE table = 'table_name'
Clickhouse刪除分區命令: 分區name
alter table sip.ngfw_access_tuple_all_20y DROP PARTITION '2020-05-01';
Clickhouse統計當日數據:
SELECT count() FROM log.netflow WHERE toDate(record_time) = '{}';
#查看庫表容量,壓縮率等
select
sum(rows) as row,--總行數
formatReadableSize(sum(data_uncompressed_bytes)) as ysq,--原始大小
formatReadableSize(sum(data_compressed_bytes)) as ysh,--壓縮大小
round(sum(data_compressed_bytes) / sum(data_uncompressed_bytes) * 100, 0) ys_rate--壓縮率
from system.parts
#查看各庫表指標(字節顯示):大小,行數,日期,落盤數據大小,壓縮前,壓縮后大小
select database,
table,
sum(bytes) as size,
sum(rows) as rows,
min(min_date) as min_date,
max(max_date) as max_date,
sum(bytes_on_disk) as bytes_on_disk,
sum(data_uncompressed_bytes) as data_uncompressed_bytes,
sum(data_compressed_bytes) as data_compressed_bytes,
(data_compressed_bytes / data_uncompressed_bytes) * 100 as compress_rate,
max_date - min_date as days,
size / (max_date - min_date) as avgDaySize
from system.parts
where active
and database = 'db_name'
and table = 'table_name'
group by database, table
#查看各庫表指標(GB顯示):大小,行數,日期,落盤數據大小,壓縮前,壓縮后大小
select
database,
table,
formatReadableSize(size) as size,
formatReadableSize(bytes_on_disk) as bytes_on_disk,
formatReadableSize(data_uncompressed_bytes) as data_uncompressed_bytes,
formatReadableSize(data_compressed_bytes) as data_compressed_bytes,
compress_rate,
rows,
days,
formatReadableSize(avgDaySize) as avgDaySize
from
(
select
database,
table,
sum(bytes) as size,
sum(rows) as rows,
min(min_date) as min_date,
max(max_date) as max_date,
sum(bytes_on_disk) as bytes_on_disk,
sum(data_uncompressed_bytes) as data_uncompressed_bytes,
sum(data_compressed_bytes) as data_compressed_bytes,
(data_compressed_bytes / data_uncompressed_bytes) * 100 as compress_rate,
max_date - min_date as days,
size / (max_date - min_date) as avgDaySize
from system.parts
where active
and database = 'db_name'
and table = 'tb_name'
group by
database,
table
)
#查看表中數據大小:
SELECT column,
any(type),
sum(column_data_compressed_bytes) AS compressed,
sum(column_data_uncompressed_bytes) AS uncompressed,
sum(rows)
FROM system.parts_columns
WHERE database = 'db_name'
and table = 'table_name'
AND active
GROUP BY column
ORDER BY column ASC
#python 模塊地址
/usr/lib64/python2.7/site-packages/clickhouse
#刪除表
DROP table db.tb
#全流量元數據建表
"CREATE TABLE IF NOT EXISTS slb.netflow_25E_io_test (src_ip IPv6, src_port UInt16, dst_ip IPv6, dst_port UInt16, app_crc UInt32, request_flow Int64, response_flow Int64, record_time DateTime) ENGINE = MergeTree() PARTITION BY toDate(record_time) ORDER BY record_time SETTINGS index_granularity = 8192"
#批處理 SQL 語句執行,文件插入
#cat 讀取文件流,作為 INSERT 數據輸入
cat /data/test_fetch.tsv | clickhouse-client --query "INSERT INTO test_fetch FORMAT TSV"
#重定向輸出
clickhouse-client --query="SELECT * FROM test_fetch" > /data/test_fetch.tsv"
#多條SQL語句,分號間隔,依次輸出
clickhouse-client -h 127.0.0.1 --multiquery --query="SELECT 1;SELECT 2;SELECT 3;"
--host -h 地址
--port 端口
--user -u
--password
--database -d
--query
--multiquery -n
--time -t 打印每條sql執行時間
#建庫
CREATE DATABASE IF NOT EXISTS db_name [ENGINE = engine]
#數據庫支持的五種引擎
Ordinary 默認
Dictionary 字典引擎
Memory 內存引擎,存放臨時數據,此庫下的數據表只停留在內存中,不涉及磁盤操作,重啟數據消失
Lazy 日志引擎,該數據庫下只能使用Log 系列的表引擎
MySQL mysql引擎,該數據庫會自動拉取遠端MySQL中的數據,並為他們創建MySQL的表引擎的數據表
CREATE DATABASE DB_TEST;
默認數據庫實質是磁盤的一個文件目錄,建庫語句執行后 ck 會在安裝路徑下創建 DB_TEST 數據庫的文件目錄
#pwd
/chbase/data
#ls
DB_TEST default system
#刪庫
DROP DATABASE [IF EXISTS] db_name;
#建表
CREATE TABLE [IF NOT EXISTS] [db_name.]table_name (
name1 [type] [DEFAULT | MATERIALIZED | ALIAS expr],
name2....
.....
) ENGINE = engine;
#復制其他表結構
CREATE TABLE [IF NOT EXISTS] [db_name.]new_tb AS [db_name2.]old_tb [ENGINE = engine]
#eg:
create table if not exists new_tb as default.hits_v1 engine = TinyLog;
#SELECT 語句復制表,並 copy 數據
CREATE TABLE IF NOT EXISTS [db_name.]new_tb ENGINE = engine AS SELECT .....
#eg:
create table if not exists new_tb engine=Memory as select * from default.hits_v1
#刪除表
DROP TABLE [IF EXISTS] [db_name.]tb_name;
#按照分區表查詢,提高查詢速度
SELECT * FROM partition_name WHERE record_time = '2020-06-17';
#刪除字段
ALTER TABLE tb_name DROP COLUMN [IF EXISTS] name
alter table test_v1 drop column URL;
#移動表/重命名表 - 類 Linux mv 命令
RENAME TABLE [db_name11.]tb_name11 TO [db_name12.]tb_name12, [db_name21.]tb_name21 TO [db_name22.]tb_name22,.....
#eg:
rename table default.test_v1 to db_test.test_v2;
#清空數據表
TRUNCATE TABLE [IF EXISTS] [db_name.]tb_name
#eg:
truncate table db_test.test_v2
#查詢分區信息
SELECT partition_id,name,table,database FROM system.parts where table = 'partition_name';
#刪除分區
ALTER TABLE tb_name DROP PARTITION partition_expr
#卸載分區 DETACH 語句
ALTER TABLE tb_name DETACH PARTITION partition_expr;
#eg: 如下語句將卸載整個2020年6月的數據
alter table tb_nama detach partition '202006';
#被卸載的數據移動到
#pwd
/chbase/data/data/default/partition_v2/detached 目錄下
分區一旦移動到 detached 子目錄,代表它脫離了 Clickhouse 的管理,clickhouse 並不會主動清理這些文件,只能自己刪除,除非重新裝載它們
#重新裝載分區
ALTER TABLE partition_v2 ATTACH PARTITION '202006';
#分布式DDL執行 只需要加上 ON CLUSTER cluster_name 即可:
一條普通DDL語句轉換分布式執行,如下語句將會對 ch_cluster 集群內的所有節點廣播這條 DDL 語句:
CREATE TABLE partition_v3 ON CLUSTER ch_cluster(
ID String,
URL String,
EventTime Data
) ENGINE = MergeTree()
PARTITION BY toYYYYMM(EventTime)
ORDER BY ID
#數據寫入 INSERT 語句,三種方式
1.常規,多行數據后面逗號依次展開
INSERT INTO [db.]table [(c1,c2,c3...)] values (v11,v12,v13...),(v21,v22,v23...),.....
同時支持表達式及函數
insert into partition_v2 values ('a0014',toString(1+2),now());
2.使用指定格式的語法
INSERT INTO [db.]table [(c1,c2,c3...)] FORMAT format_name data_set;
#eg CSV 格式為例:
INSERT INTO partition_v2 FORMAT CSV \
'A0017','url1','2020-06-01'\
'A0018','url2','2020-06-03'\
3.使用 SELECT 子句
INSERT INTO [db.]table [(c1,c2,c3...)] SELECT * FROM partition_v1