package com.flink.test;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.typeutils.TupleTypeInfo;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.Types;
import org.apache.flink.table.api.java.StreamTableEnvironment;
import java.util.Arrays;
public class Table1 {
public static void main(String[] args) throws Exception{
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
StreamTableEnvironment tEnv = StreamTableEnvironment.getTableEnvironment(env);
DataStream<Tuple2<Integer, String>> stream1 = env.fromCollection(Arrays.asList(
new Tuple2<>(1, "hello"),
new Tuple2<>(2, "hello")
));
//将数据类型映射到表模式
Table table1 = tEnv.fromDataStream(stream1, "count1, word");
// 注册表
tEnv.registerTable("table1", table1);
/**
* table api
*/
Table tapiResult = tEnv.scan("table1").select("count1");
// toRetractStream 始终可以使用此模式。它使用布尔标志对插入和删除更改进行编码。
DataStream<Tuple2<Boolean, Integer>> dsRow = tEnv.toRetractStream(tapiResult, Integer.class);
dsRow.print();
/**
* sql api
**/
TupleTypeInfo<Tuple2<Integer, String>> tupleType = new TupleTypeInfo<>(
Types.INT(),
Types.STRING());
Table sqlResult = tEnv.sqlQuery("SELECT count1,word FROM table1");
// toAppendStream只追加,以前发出的结果永远不会更新。
DataStream<Tuple2<Integer, String>> dsRow1 = tEnv.toAppendStream(sqlResult, tupleType);
dsRow1.print();
env.execute();
}
}
执行代码:
1> 2
2> 1
2> (1,hello)
1> (2,hello)
pom.xml
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>FlinkDataHandle</groupId>
<artifactId>FlinkDataHandle</artifactId>
<version>1.0-SNAPSHOT</version>
<packaging>jar</packaging>
<name>kafka-flink-hbase</name>
<url>http://maven.apache.org</url>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<hbase.version>1.2.0</hbase.version>
<flink.version>1.7.2</flink.version>
</properties>
<repositories>
<!-- 指定该项目可以从哪些地方下载依赖包 -->
<repository>
<id>aliyun</id>
<url>http://maven.aliyun.com/nexus/content/groups/public/</url>
</repository>
<repository>
<id>cloudera</id>
<url>https://repository.cloudera.com/artifactory/cloudera-repos/</url>
</repository>
<repository>
<id>jboss</id>
<url>http://repository.jboss.org/nexus/content/groups/public</url>
</repository>
</repositories>
<dependencies>
<!-- This dependency provides the implementation of compiler "jdt": -->
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-streaming-java_${scala.compat.version}</artifactId>
<version>${flink.version}</version>
</dependency>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-table_${scala.compat.version}</artifactId>
<version>${flink.version}</version>
</dependency>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-streaming-scala_${scala.compat.version}</artifactId>
<version>${flink.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>2.6.0</version>
</dependency>
</dependencies>
</project>