自定義flink的RedisSource,實現從redis中讀取數據,這里借鑒了flink-connector-redis_2.11的實現邏輯,實現對redis讀取的邏輯封裝,flink-connector-redis_2.11的使用和介紹可參考之前的博客,項目中需要引入flink-connector-redis_2.11依賴
Flink讀寫Redis(一)-寫入Redis
Flink讀寫Redis(二)-flink-redis-connector代碼學習
抽象redis數據
定義MyRedisRecord類,封裝redis數據類型和數據對象
package com.jike.flink.examples.redis;
import org.apache.flink.streaming.connectors.redis.common.mapper.RedisDataType;
import java.io.Serializable;
public class MyRedisRecord implements Serializable {
private Object data;
private RedisDataType redisDataType;
public MyRedisRecord(Object data, RedisDataType redisDataType) {
this.data = data;
this.redisDataType = redisDataType;
}
public Object getData() {
return data;
}
public void setData(Object data) {
this.data = data;
}
public RedisDataType getRedisDataType() {
return redisDataType;
}
public void setRedisDataType(RedisDataType redisDataType) {
this.redisDataType = redisDataType;
}
}
定義Redis數據讀取類
首先定義接口類,定義redis的讀取操作,目前這里只寫了哈希表的get操作,可以增加更多的操作
package com.jike.flink.examples.redis;
import java.io.Serializable;
import java.util.Map;
public interface MyRedisCommandsContainer extends Serializable {
Map<String,String> hget(String key);
void close();
}
定義一個實現類,實現對redis的讀取操作
package com.jike.flink.examples.redis;
import org.apache.flink.util.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.JedisPool;
import redis.clients.jedis.JedisSentinelPool;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
public class MyRedisContainer implements MyRedisCommandsContainer,Cloneable{
private static final long serialVersionUID = 1L;
private static final Logger LOG = LoggerFactory.getLogger(MyRedisContainer.class);
private final JedisPool jedisPool;
private final JedisSentinelPool jedisSentinelPool;
public MyRedisContainer(JedisPool jedisPool) {
Preconditions.checkNotNull(jedisPool, "Jedis Pool can not be null");
this.jedisPool = jedisPool;
this.jedisSentinelPool = null;
}
public MyRedisContainer(JedisSentinelPool sentinelPool) {
Preconditions.checkNotNull(sentinelPool, "Jedis Sentinel Pool can not be null");
this.jedisPool = null;
this.jedisSentinelPool = sentinelPool;
}
@Override
public Map<String,String> hget(String key) {
Jedis jedis = null;
try {
jedis = this.getInstance();
Map<String,String> map = new HashMap<String,String>();
Set<String> fieldSet = jedis.hkeys(key);
for(String s : fieldSet){
map.put(s,jedis.hget(key,s));
}
return map;
} catch (Exception e) {
if (LOG.isErrorEnabled()) {
LOG.error("Cannot get Redis message with command HGET to key {} error message {}", new Object[]{key, e.getMessage()});
}
throw e;
} finally {
this.releaseInstance(jedis);
}
}
private Jedis getInstance() {
return this.jedisSentinelPool != null ? this.jedisSentinelPool.getResource() : this.jedisPool.getResource();
}
private void releaseInstance(Jedis jedis) {
if (jedis != null) {
try {
jedis.close();
} catch (Exception var3) {
LOG.error("Failed to close (return) instance to pool", var3);
}
}
}
public void close() {
if (this.jedisPool != null) {
this.jedisPool.close();
}
if (this.jedisSentinelPool != null) {
this.jedisSentinelPool.close();
}
}
}
定義redis讀取操作對象的創建者類
該類用來根據不同的配置生成不同的對象,這里考慮了直連redis和哨兵模式兩張情況,后續還可以考慮redis集群的情形
package com.jike.flink.examples.redis;
import org.apache.commons.pool2.impl.GenericObjectPoolConfig;
import org.apache.flink.streaming.connectors.redis.common.config.FlinkJedisConfigBase;
import org.apache.flink.streaming.connectors.redis.common.config.FlinkJedisPoolConfig;
import org.apache.flink.streaming.connectors.redis.common.config.FlinkJedisSentinelConfig;
import org.apache.flink.util.Preconditions;
import redis.clients.jedis.JedisPool;
import redis.clients.jedis.JedisSentinelPool;
public class MyRedisCommandsContainerBuilder {
public MyRedisCommandsContainerBuilder(){
}
public static MyRedisCommandsContainer build(FlinkJedisConfigBase flinkJedisConfigBase) {
if (flinkJedisConfigBase instanceof FlinkJedisPoolConfig) {
FlinkJedisPoolConfig flinkJedisPoolConfig = (FlinkJedisPoolConfig)flinkJedisConfigBase;
return build(flinkJedisPoolConfig);
} else if (flinkJedisConfigBase instanceof FlinkJedisSentinelConfig) {
FlinkJedisSentinelConfig flinkJedisSentinelConfig = (FlinkJedisSentinelConfig)flinkJedisConfigBase;
return build(flinkJedisSentinelConfig);
} else {
throw new IllegalArgumentException("Jedis configuration not found");
}
}
public static MyRedisCommandsContainer build(FlinkJedisPoolConfig jedisPoolConfig) {
Preconditions.checkNotNull(jedisPoolConfig, "Redis pool config should not be Null");
GenericObjectPoolConfig genericObjectPoolConfig = new GenericObjectPoolConfig();
genericObjectPoolConfig.setMaxIdle(jedisPoolConfig.getMaxIdle());
genericObjectPoolConfig.setMaxTotal(jedisPoolConfig.getMaxTotal());
genericObjectPoolConfig.setMinIdle(jedisPoolConfig.getMinIdle());
JedisPool jedisPool = new JedisPool(genericObjectPoolConfig, jedisPoolConfig.getHost(), jedisPoolConfig.getPort(), jedisPoolConfig.getConnectionTimeout(), jedisPoolConfig.getPassword(), jedisPoolConfig.getDatabase());
return new MyRedisContainer(jedisPool);
}
public static MyRedisCommandsContainer build(FlinkJedisSentinelConfig jedisSentinelConfig) {
Preconditions.checkNotNull(jedisSentinelConfig, "Redis sentinel config should not be Null");
GenericObjectPoolConfig genericObjectPoolConfig = new GenericObjectPoolConfig();
genericObjectPoolConfig.setMaxIdle(jedisSentinelConfig.getMaxIdle());
genericObjectPoolConfig.setMaxTotal(jedisSentinelConfig.getMaxTotal());
genericObjectPoolConfig.setMinIdle(jedisSentinelConfig.getMinIdle());
JedisSentinelPool jedisSentinelPool = new JedisSentinelPool(jedisSentinelConfig.getMasterName(), jedisSentinelConfig.getSentinels(), genericObjectPoolConfig, jedisSentinelConfig.getConnectionTimeout(), jedisSentinelConfig.getSoTimeout(), jedisSentinelConfig.getPassword(), jedisSentinelConfig.getDatabase());
return new MyRedisContainer(jedisSentinelPool);
}
}
redis操作描述類
package com.jike.flink.examples.redis;
import org.apache.flink.streaming.connectors.redis.common.mapper.RedisDataType;
public enum MyRedisCommand {
HGET(RedisDataType.HASH);
private RedisDataType redisDataType;
private MyRedisCommand(RedisDataType redisDataType) {
this.redisDataType = redisDataType;
}
public RedisDataType getRedisDataType() {
return this.redisDataType;
}
}
package com.jike.flink.examples.redis;
import org.apache.flink.streaming.connectors.redis.common.mapper.RedisDataType;
import org.apache.flink.util.Preconditions;
import java.io.Serializable;
public class MyRedisCommandDescription implements Serializable {
private static final long serialVersionUID = 1L;
private MyRedisCommand redisCommand;
private String additionalKey;
public MyRedisCommandDescription(MyRedisCommand redisCommand, String additionalKey) {
Preconditions.checkNotNull(redisCommand, "Redis command type can not be null");
this.redisCommand = redisCommand;
this.additionalKey = additionalKey;
if ((redisCommand.getRedisDataType() == RedisDataType.HASH || redisCommand.getRedisDataType() == RedisDataType.SORTED_SET) && additionalKey == null) {
throw new IllegalArgumentException("Hash and Sorted Set should have additional key");
}
}
public MyRedisCommandDescription(MyRedisCommand redisCommand) {
this(redisCommand, (String)null);
}
public MyRedisCommand getCommand() {
return this.redisCommand;
}
public String getAdditionalKey() {
return this.additionalKey;
}
}
RedisSource
定義flink redis source的實現,該類構造方法接收兩個參數,包括redis配置信息以及要讀取的redis數據類型信息;open方法會在source打開執行,用了完成redis操作類對象的創建;run方法會一直讀取redis數據,並根據數據類型調用對應的redis操作,封裝成MyRedisRecord對象,夠后續處理
package com.jike.flink.examples.redis;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.functions.source.RichSourceFunction;
import org.apache.flink.streaming.connectors.redis.common.config.FlinkJedisConfigBase;
import org.apache.flink.util.Preconditions;
public class RedisSource extends RichSourceFunction<MyRedisRecord>{
private static final long serialVersionUID = 1L;
private String additionalKey;
private MyRedisCommand redisCommand;
private FlinkJedisConfigBase flinkJedisConfigBase;
private MyRedisCommandsContainer redisCommandsContainer;
private volatile boolean isRunning = true;
public RedisSource(FlinkJedisConfigBase flinkJedisConfigBase, MyRedisCommandDescription redisCommandDescription) {
Preconditions.checkNotNull(flinkJedisConfigBase, "Redis connection pool config should not be null");
Preconditions.checkNotNull(redisCommandDescription, "MyRedisCommandDescription can not be null");
this.flinkJedisConfigBase = flinkJedisConfigBase;
this.redisCommand = redisCommandDescription.getCommand();
this.additionalKey = redisCommandDescription.getAdditionalKey();
}
@Override
public void open(Configuration parameters) throws Exception {
this.redisCommandsContainer = MyRedisCommandsContainerBuilder.build(this.flinkJedisConfigBase);
}
@Override
public void run(SourceContext sourceContext) throws Exception {
while (isRunning){
switch(this.redisCommand) {
case HGET:
sourceContext.collect(new MyRedisRecord(this.redisCommandsContainer.hget(this.additionalKey), this.redisCommand.getRedisDataType()));
break;
default:
throw new IllegalArgumentException("Cannot process such data type: " + this.redisCommand);
}
}
}
@Override
public void cancel() {
isRunning = false;
if (this.redisCommandsContainer != null) {
this.redisCommandsContainer.close();
}
}
}
使用
redis中的哈希表保存個各個單詞的詞頻,統計詞頻最大的單詞
package com.jike.flink.examples.redis;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.connectors.redis.common.mapper.RedisDataType;
import org.apache.flink.util.Collector;
import java.util.Map;
public class MyMapRedisRecordSplitter implements FlatMapFunction<MyRedisRecord, Tuple2<String,Integer>> {
@Override
public void flatMap(MyRedisRecord myRedisRecord, Collector<Tuple2<String, Integer>> collector) throws Exception {
assert myRedisRecord.getRedisDataType() == RedisDataType.HASH;
Map<String,String> map = (Map<String,String>)myRedisRecord.getData();
for(Map.Entry<String,String> e : map.entrySet()){
collector.collect(new Tuple2<>(e.getKey(),Integer.valueOf(e.getValue())));
}
}
}
package com.jike.flink.examples.redis;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.streaming.connectors.redis.common.config.FlinkJedisPoolConfig;
public class MaxCount{
public static void main(String[] args) throws Exception {
StreamExecutionEnvironment executionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment();
FlinkJedisPoolConfig conf = new FlinkJedisPoolConfig.Builder().setHost("ip").setPort(30420).setPassword("passwd").build();
DataStreamSource<MyRedisRecord> source = executionEnvironment.addSource(new RedisSource(conf,new MyRedisCommandDescription(MyRedisCommand.HGET,"flink")));
DataStream<Tuple2<String, Integer>> max = source.flatMap(new MyMapRedisRecordSplitter()).timeWindowAll(Time.milliseconds(5000)).maxBy(1);
max.print().setParallelism(1);
executionEnvironment.execute();
}
}
結果