# 顯示分區
SHOW PARTITIONS from stg_pay_dt;
# 刪除分區
ALTER TABLE stg_pay_dt DROP PARTITION p20211011;
DELETE FROM user.dailyitemsold PARTITION p20210611 WHERE clt_date="2021-06-11";
# 添加分區 ALTER TABLE stg_pay_dt ADD PARTITION IF NOT EXISTS p20211011 VALUES LESS THAN('20211012'); # 刪除表 DROP TABLE IF EXISTS `stg_kafka_device_message_data`; # 創建表 CREATE TABLE stg_kafka_device_message_data ( dt INT COMMENT '分區時間(采用的數據時間而非消費數據的時間)', message VARCHAR(4000) COMMENT '原始數據', receive_time DATETIME REPLACE COMMENT '接收時間' ) AGGREGATE KEY ( dt, message ) PARTITION BY RANGE ( dt ) ( PARTITION p20211229 VALUES LESS THAN ( '20211230' ) ) DISTRIBUTED BY HASH ( dt ) buckets 10 PROPERTIES ( "replication_num" = "3" ); # 創建消費kafka的導入任務OFFSET_BEGINNING 、 OFFSET_END
CREATE ROUTINE LOAD load_kafka_device_message_data ON stg_kafka_device_message_data COLUMNS
( dt=from_unixtime( CAST( get_json_string ( message, '$.timestamp' ) AS BIGINT ) / 1000, '%Y%m%d' ),
message,
receive_time=DATE_FORMAT(now(),'%Y-%m-%d %H:%i:%s') )
PROPERTIES ( "desired_concurrent_number" = "1", "max_error_number" = "1000" )
FROM KAFKA ( "kafka_broker_list" = "172.17.46.202:9092",
"kafka_topic" = "device.message",
"property.group.id" = "kafka-consumer2-data-import",
"property.kafka_default_offsets" = "OFFSET_BEGINNING" );
-- 查看導入任務 SHOW routine Load;
-- 查看導入任務的創建語句 show CREATE routine load for stg_kafka_device_message_data;
-- 暫停導入kafka任務 PAUSE ROUTINE LOAD FOR load_kafka_iot_realtime_data;
-- 恢復導入任務 RESUME ROUTINE LOAD FOR load_kafka_iot_realtime_data;
-- 刪除導入kafka任務 STOP ROUTINE LOAD FOR load_kafka_iot_realtime_data;
-- 查看建表語句 show create table ods_fireHydrant_dt;
-- 修改表字段信息 ALTER TABLE ods_fireHydrant_dt MODIFY COLUMN address VARCHAR(400) REPLACE COMMENT "設備地址";
-- 創建動態分區, HOUR、DAY、WEEK、MONTH(當指定為 HOUR 時,動態創建的分區名后綴格式為 yyyyMMddHH,例如2020032501。小時為單位的分區列數據類型不能為 DATE,要用datetime。)
CREATE TABLE tb1 ( k1 DATE ) PARTITION BY RANGE(k1) () DISTRIBUTED BY HASH(k1)
PROPERTIES ( "replication_num" = "3", "dynamic_partition.enable" = "true", "dynamic_partition.time_unit" = "DAY", "dynamic_partition.end" = "1", "dynamic_partition.prefix" = "p", "dynamic_partition.buckets" = "8" );
-- 動態分區的屬性可以修改,例如需要起/停動態分區的功能,可以通過ALTER TABLE來完成。
ALTER TABLE site_access SET("dynamic_partition.enable"="false");
ALTER TABLE site_access SET("dynamic_partition.enable"="true");
-- 動態分區線程的執行頻率,默認為600秒(10分鍾),即每10分鍾進行一次調度 ADMIN SET FRONTEND CONFIG ("dynamic_partition_check_interval_seconds" = "3600"); -- 修改表名 alter TABLE tb1 RENAME tb2;
-- 新增默認為空的字段
ALTER TABLE 表名 ADD COLUMN 字段名 字段類型 DEFAULT NULL;
-- 刪除表字段
ALTER TABLE 表名 DROP COLUMN 字段名;
-- 替換表示例(表結構需保持一致) ALTER TABLE raw_mongo_fi_iot_hld_history_record REPLACE WITH TABLE stg_kafka_mongo_history_data PROPERTIES('swap' = 'true'); -- 清空表數據 TRUNCATE table stg_strem_mango_history_ws; -- 修改副本數 /*(最大副本數量取決於集群中獨立 IP 的數量(注意不是 BE 數量)。Doris 中副本分布的原則是, 不允許同一個 Tablet 的副本分布在同一台物理機上,而識別物理機即通過 IP。 所以,即使在同一台物理機上部署了 3 個或更多 BE 實例,如果這些 BE 的 IP 相同,則依然只能設置副本數為 1)*/ alter TABLE tableName set ("default.replication_num" = "3"); -- 查看服務器BE節點 SHOW BACKENDS;
-- 查看表詳情
DESC TABLE_NAME;
--doris消費kafka數據示例:OFFSET_BEGINNING 、 OFFSET_END
CREATE ROUTINE LOAD stg_kafka_device_message_data
ON stg_kafka_device_message_data
COLUMNS (
dt = from_unixtime( CAST( get_json_string ( message, '$.timestamp' ) AS BIGINT ) / 1000, '%Y%m%d' )
,message_id = get_json_string ( message, '$.messageId' )
,message_type = get_json_string ( message, '$.messageType' )
,product_id = get_json_string ( message, '$.headers.productId' )
,device_id = get_json_string ( message, '$.deviceId' )
,device_name = get_json_string ( message, '$.headers.deviceName' )
,collect_time = from_unixtime( CAST( get_json_string ( message, '$.timestamp' ) AS BIGINT ) / 1000, '%Y-%m-%d %H:%i:%s' )
,receive_time = DATE_FORMAT(now(),'%Y-%m-%d %H:%i:%s')
,headers = get_json_string ( message, '$.headers' )
,properties = get_json_string ( message, '$.properties' )
,message
)
PROPERTIES ( "desired_concurrent_number" = "50", "max_error_number" = "2000", "strict_mode" = "true" )
FROM KAFKA ( "kafka_broker_list" = "172.17.46.202:9092",
"kafka_topic" = "device.message",
"property.group.id" = "stg_kafka_device_message_data",
"property.kafka_default_offsets" = "OFFSET_BEGINNING" );
-- 查看導入任務
SHOW routine Load;
-- 暫停導入kafka任務
PAUSE ROUTINE LOAD FOR load_kafka_iot_realtime_data;
-- 恢復導入任務
RESUME ROUTINE LOAD FOR load_kafka_iot_realtime_data;
-- 刪除導入kafka任務
STOP ROUTINE LOAD FOR load_kafka_iot_realtime_data;
-- 刪除所有表語句
select concat('DROP TABLE IF EXISTS ', table_name, ';')
FROM information_schema.tables
WHERE table_schema = '數據庫名';
-- 查看表所有列
show full COLUMNS from table_name;
-- 查看數據庫或表的數據量
show data from stg_kafka_device_message_data;
【1】創建索引
CREATE INDEX idx_device_id ON stg_kafka_device_message_data (device_id) USING BITMAP COMMENT 'device_id索引';
【2】查看表配置的索引
SHOW INDEX FROM site_access_duplicate;
【3】刪除索引
DROP INDEX idx_device_id ON stg_kafka_device_message_data_back;
-- 創建了一個名為:test 密碼為:1234 的用戶
create user 'test'@'%' identified by '1234';
-- 刪除用戶“test”
drop user test@localhost ;
-- 若創建的用戶允許任何電腦登陸,刪除用戶如下
drop user test@'%';
-- 全部權限:授予用戶test通過外網IP對於指定數據庫db_name的全部庫表權限
grant all on db_name.* to 'test'@'%';
-- 全部權限:授予用戶系統自帶的相關數據庫的權限information_schema
grant all on information_schema.* to 'test'@'%';
-- 修改密碼,密碼實時更新;修改用戶“test”的密碼為“1122”
set password for test =password('1122');
-- 刷新
flush privileges;
Apache Doris分頁SQL語法
需要 order by 字段
收到客戶端{pageNo:1,pagesize:10}
select * from table order by ID limit pageSize offset (pageNo-1)*pageSize;
select * from table order by ID limit 10 offset 0;