calcite-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jh...@apache.org
Subject [1/2] calcite git commit: [CALCITE-1598] Pig adapter (Eli Levine)
Date Wed, 01 Mar 2017 19:17:11 GMT
Repository: calcite
Updated Branches:
  refs/heads/master 0372d23b8 -> fdbb81cf2


http://git-wip-us.apache.org/repos/asf/calcite/blob/fdbb81cf/pig/src/test/java/org/apache/calcite/test/PigRelBuilderStyleTest.java
----------------------------------------------------------------------
diff --git a/pig/src/test/java/org/apache/calcite/test/PigRelBuilderStyleTest.java b/pig/src/test/java/org/apache/calcite/test/PigRelBuilderStyleTest.java
new file mode 100644
index 0000000..abd5ebf
--- /dev/null
+++ b/pig/src/test/java/org/apache/calcite/test/PigRelBuilderStyleTest.java
@@ -0,0 +1,269 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.calcite.test;
+
+import org.apache.calcite.adapter.pig.PigAggregate;
+import org.apache.calcite.adapter.pig.PigFilter;
+import org.apache.calcite.adapter.pig.PigRel;
+import org.apache.calcite.adapter.pig.PigRelFactories;
+import org.apache.calcite.adapter.pig.PigRules;
+import org.apache.calcite.adapter.pig.PigTable;
+import org.apache.calcite.plan.RelOptPlanner;
+import org.apache.calcite.plan.RelOptRule;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.JoinRelType;
+import org.apache.calcite.rel.rules.FilterAggregateTransposeRule;
+import org.apache.calcite.rel.rules.FilterJoinRule;
+import org.apache.calcite.rel.rules.FilterJoinRule.FilterIntoJoinRule;
+import org.apache.calcite.schema.Schema;
+import org.apache.calcite.schema.SchemaPlus;
+import org.apache.calcite.tools.FrameworkConfig;
+import org.apache.calcite.tools.Frameworks;
+import org.apache.calcite.tools.RelBuilder;
+import org.apache.calcite.tools.RelBuilderFactory;
+
+import org.apache.pig.pigunit.PigTest;
+
+import org.junit.After;
+import org.junit.Test;
+
+import static org.apache.calcite.rel.rules.FilterJoinRule.TRUE_PREDICATE;
+import static org.apache.calcite.sql.fun.SqlStdOperatorTable.EQUALS;
+import static org.apache.calcite.sql.fun.SqlStdOperatorTable.GREATER_THAN;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Tests for the {@code org.apache.calcite.adapter.pig} package that tests the
+ * building of {@link PigRel} relational expressions using {@link RelBuilder} and
+ * associated factories in {@link PigRelFactories}.
+ */
+public class PigRelBuilderStyleTest extends AbstractPigTest {
+
+  @Test
+  public void testScanAndFilter() throws Exception {
+    final SchemaPlus schema = createTestSchema();
+    final RelBuilder builder = createRelBuilder(schema);
+    final RelNode node = builder.scan("t")
+        .filter(builder.call(GREATER_THAN, builder.field("tc0"), builder.literal("abc"))).build();
+    final RelNode optimized = optimizeWithVolcano(node);
+    assertScriptAndResults("t", getPigScript(optimized, schema),
+        "t = LOAD '" + getFullPathForTestDataFile("data.txt")
+            + "' USING PigStorage() AS (tc0:chararray, tc1:chararray);\n"
+            + "t = FILTER t BY (tc0 > 'abc');",
+        new String[] { "(b,2)", "(c,3)" });
+  }
+
+  @Test
+  public void testImplWithMltipleFilters() {
+    final SchemaPlus schema = createTestSchema();
+    final RelBuilder builder = createRelBuilder(schema);
+    final RelNode node = builder.scan("t")
+        .filter(
+            builder.and(builder.call(GREATER_THAN, builder.field("tc0"), builder.literal("abc")),
+                builder.call(EQUALS, builder.field("tc1"), builder.literal("3"))))
+        .build();
+    final RelNode optimized = optimizeWithVolcano(node);
+    assertScriptAndResults("t", getPigScript(optimized, schema),
+        "t = LOAD '" + getFullPathForTestDataFile("data.txt")
+            + "' USING PigStorage() AS (tc0:chararray, tc1:chararray);\n"
+            + "t = FILTER t BY (tc0 > 'abc') AND (tc1 == '3');",
+        new String[] { "(c,3)" });
+  }
+
+  @Test
+  public void testImplWithGroupByAndCount() {
+    final SchemaPlus schema = createTestSchema();
+    final RelBuilder builder = createRelBuilder(schema);
+    final RelNode node = builder.scan("t")
+        .aggregate(builder.groupKey("tc0"), builder.count(false, "c", builder.field("tc1")))
+        .build();
+    final RelNode optimized = optimizeWithVolcano(node);
+    assertScriptAndResults("t", getPigScript(optimized, schema),
+        "t = LOAD '" + getFullPathForTestDataFile("data.txt")
+            + "' USING PigStorage() AS (tc0:chararray, tc1:chararray);\n"
+            + "t = GROUP t BY (tc0);\n"
+            + "t = FOREACH t {\n"
+            + "  GENERATE group AS tc0, COUNT(t.tc1) AS c;\n"
+            + "};",
+        new String[] { "(a,1)", "(b,1)", "(c,1)" });
+  }
+
+  @Test
+  public void testImplWithCountWithoutGroupBy() {
+    final SchemaPlus schema = createTestSchema();
+    final RelBuilder builder = createRelBuilder(schema);
+    final RelNode node = builder.scan("t")
+        .aggregate(builder.groupKey(), builder.count(false, "c", builder.field("tc0"))).build();
+    final RelNode optimized = optimizeWithVolcano(node);
+    assertScriptAndResults("t", getPigScript(optimized, schema),
+        "t = LOAD '" + getFullPathForTestDataFile("data.txt")
+            + "' USING PigStorage() AS (tc0:chararray, tc1:chararray);\n"
+            + "t = GROUP t ALL;\n"
+            + "t = FOREACH t {\n"
+            + "  GENERATE COUNT(t.tc0) AS c;\n"
+            + "};",
+        new String[] { "(3)" });
+  }
+
+  @Test
+  public void testImplWithGroupByMultipleFields() {
+    final SchemaPlus schema = createTestSchema();
+    final RelBuilder builder = createRelBuilder(schema);
+    final RelNode node = builder.scan("t")
+        .aggregate(builder.groupKey("tc1", "tc0"), builder.count(false, "c", builder.field("tc1")))
+        .build();
+    final RelNode optimized = optimizeWithVolcano(node);
+    assertScriptAndResults("t", getPigScript(optimized, schema),
+        "t = LOAD '" + getFullPathForTestDataFile("data.txt")
+            + "' USING PigStorage() AS (tc0:chararray, tc1:chararray);\n"
+            + "t = GROUP t BY (tc0, tc1);\n"
+            + "t = FOREACH t {\n"
+            + "  GENERATE group.tc0 AS tc0, group.tc1 AS tc1, COUNT(t.tc1) AS c;\n"
+            + "};",
+        new String[] { "(a,1,1)", "(b,2,1)", "(c,3,1)" });
+  }
+
+  @Test
+  public void testImplWithGroupByCountDistinct() {
+    final SchemaPlus schema = createTestSchema();
+    final RelBuilder builder = createRelBuilder(schema);
+    final RelNode node = builder.scan("t")
+        .aggregate(builder.groupKey("tc1", "tc0"), builder.count(true, "c", builder.field("tc1")))
+        .build();
+    final RelNode optimized = optimizeWithVolcano(node);
+    assertScriptAndResults("t", getPigScript(optimized, schema),
+        "t = LOAD '" + getFullPathForTestDataFile("data.txt")
+            + "' USING PigStorage() AS (tc0:chararray, tc1:chararray);\n"
+            + "t = GROUP t BY (tc0, tc1);\n"
+            + "t = FOREACH t {\n"
+            + "  tc1_DISTINCT = DISTINCT t.tc1;\n"
+            + "  GENERATE group.tc0 AS tc0, group.tc1 AS tc1, COUNT(tc1_DISTINCT) AS c;\n"
+            + "};",
+        new String[] { "(a,1,1)", "(b,2,1)", "(c,3,1)" });
+  }
+
+  @Test
+  public void testImplWithJoin() throws Exception {
+    final SchemaPlus schema = createTestSchema();
+    final RelBuilder builder = createRelBuilder(schema);
+    final RelNode node = builder.scan("t").scan("s")
+        .join(JoinRelType.INNER,
+            builder.equals(builder.field(2, 0, "tc1"), builder.field(2, 1, "sc0")))
+        .filter(builder.call(GREATER_THAN, builder.field("tc0"), builder.literal("a"))).build();
+    final RelNode optimized = optimizeWithVolcano(node);
+    assertScriptAndResults("t", getPigScript(optimized, schema),
+        "t = LOAD '" + getFullPathForTestDataFile("data.txt")
+            + "' USING PigStorage() AS (tc0:chararray, tc1:chararray);\n"
+            + "t = FILTER t BY (tc0 > 'a');\n"
+            + "s = LOAD '"
+            + getFullPathForTestDataFile("data2.txt")
+            + "' USING PigStorage() AS (sc0:chararray, sc1:chararray);\n"
+            + "t = JOIN t BY tc1 , s BY sc0;",
+        new String[] { "(b,2,2,label2)" });
+  }
+
+  @Test
+  public void testImplWithJoinAndGroupBy() throws Exception {
+    final SchemaPlus schema = createTestSchema();
+    final RelBuilder builder = createRelBuilder(schema);
+    final RelNode node = builder.scan("t").scan("s")
+        .join(JoinRelType.LEFT,
+            builder.equals(builder.field(2, 0, "tc1"), builder.field(2, 1, "sc0")))
+        .filter(builder.call(GREATER_THAN, builder.field("tc0"), builder.literal("abc")))
+        .aggregate(builder.groupKey("tc1"), builder.count(false, "c", builder.field("sc1")))
+        .build();
+    final RelNode optimized = optimizeWithVolcano(node);
+    assertScriptAndResults("t", getPigScript(optimized, schema),
+        "t = LOAD '" + getFullPathForTestDataFile("data.txt")
+            + "' USING PigStorage() AS (tc0:chararray, tc1:chararray);\n"
+            + "t = FILTER t BY (tc0 > 'abc');\n"
+            + "s = LOAD '"
+            + getFullPathForTestDataFile("data2.txt")
+            + "' USING PigStorage() AS (sc0:chararray, sc1:chararray);\n"
+            + "t = JOIN t BY tc1 LEFT, s BY sc0;\n"
+            + "t = GROUP t BY (tc1);\n"
+            + "t = FOREACH t {\n"
+            + "  GENERATE group AS tc1, COUNT(t.sc1) AS c;\n"
+            + "};",
+        new String[] { "(2,1)", "(3,0)" });
+  }
+
+  private SchemaPlus createTestSchema() {
+    SchemaPlus result = Frameworks.createRootSchema(false);
+    result.add("t",
+        new PigTable(getFullPathForTestDataFile("data.txt"),
+        new String[] { "tc0", "tc1" }));
+    result.add("s",
+        new PigTable(getFullPathForTestDataFile("data2.txt"),
+        new String[] { "sc0", "sc1" }));
+    return result;
+  }
+
+  private RelBuilder createRelBuilder(SchemaPlus schema) {
+    final FrameworkConfig config = Frameworks.newConfigBuilder().defaultSchema(schema)
+        .context(PigRelFactories.ALL_PIG_REL_FACTORIES)
+        .build();
+    return RelBuilder.create(config);
+  }
+
+  private RelNode optimizeWithVolcano(RelNode root) {
+    RelOptPlanner planner = getVolcanoPlanner(root);
+    return planner.findBestExp();
+  }
+
+  private RelOptPlanner getVolcanoPlanner(RelNode root) {
+    final RelBuilderFactory builderFactory =
+        RelBuilder.proto(PigRelFactories.ALL_PIG_REL_FACTORIES);
+    final RelOptPlanner planner = root.getCluster().getPlanner(); // VolcanoPlanner
+    for (RelOptRule r : PigRules.ALL_PIG_OPT_RULES) {
+      planner.addRule(r);
+    }
+    planner.removeRule(FilterAggregateTransposeRule.INSTANCE);
+    planner.removeRule(FilterJoinRule.FILTER_ON_JOIN);
+    planner.addRule(
+        new FilterAggregateTransposeRule(PigFilter.class, builderFactory, PigAggregate.class));
+    planner.addRule(new FilterIntoJoinRule(true, builderFactory, TRUE_PREDICATE));
+    planner.setRoot(root);
+    return planner;
+  }
+
+  private void assertScriptAndResults(String relAliasForStore, String script, String expectedScript,
+      String[] expectedResults) {
+    try {
+      assertEquals(expectedScript, script);
+      script = script + "\nSTORE " + relAliasForStore + " INTO 'myoutput';";
+      PigTest pigTest = new PigTest(script.split("[\\r\\n]+"));
+      pigTest.assertOutputAnyOrder(expectedResults);
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  private String getPigScript(RelNode root, Schema schema) {
+    PigRel.Implementor impl = new PigRel.Implementor();
+    impl.visitChild(0, root);
+    return impl.getScript();
+  }
+
+  @After
+  public void shutdownPigServer() {
+    PigTest.getPigServer().shutdown();
+  }
+}
+
+// End PigRelBuilderStyleTest.java

http://git-wip-us.apache.org/repos/asf/calcite/blob/fdbb81cf/pig/src/test/resources/data.txt
----------------------------------------------------------------------
diff --git a/pig/src/test/resources/data.txt b/pig/src/test/resources/data.txt
new file mode 100644
index 0000000..eb8f732
--- /dev/null
+++ b/pig/src/test/resources/data.txt
@@ -0,0 +1,3 @@
+a	1
+b	2
+c	3

http://git-wip-us.apache.org/repos/asf/calcite/blob/fdbb81cf/pig/src/test/resources/data2.txt
----------------------------------------------------------------------
diff --git a/pig/src/test/resources/data2.txt b/pig/src/test/resources/data2.txt
new file mode 100644
index 0000000..499b46f
--- /dev/null
+++ b/pig/src/test/resources/data2.txt
@@ -0,0 +1,2 @@
+1	label1
+2	label2

http://git-wip-us.apache.org/repos/asf/calcite/blob/fdbb81cf/pig/src/test/resources/model.json
----------------------------------------------------------------------
diff --git a/pig/src/test/resources/model.json b/pig/src/test/resources/model.json
new file mode 100644
index 0000000..851888e
--- /dev/null
+++ b/pig/src/test/resources/model.json
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+{
+  "version": "1.0",
+  "defaultSchema": "PIG",
+  "schemas": [
+    {
+      "name": "PIG",
+      "type": "custom",
+      "factory": "org.apache.calcite.adapter.pig.PigSchemaFactory",
+      "tables": [
+        {
+          "name": "t",
+          "type": "custom",
+          "factory": "org.apache.calcite.adapter.pig.PigTableFactory",
+          "operand": {
+            "file": "data.txt",
+            "columns": ["tc0", "tc1"]
+          }
+        },
+        {
+          "name": "s",
+          "type": "custom",
+          "factory": "org.apache.calcite.adapter.pig.PigTableFactory",
+          "operand": {
+            "file": "data2.txt",
+            "columns": ["sc0", "sc1"]
+          }
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/calcite/blob/fdbb81cf/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 9297506..6cda20f 100644
--- a/pom.xml
+++ b/pom.xml
@@ -74,7 +74,7 @@ limitations under the License.
     <guava.version>20.0</guava.version>
     <joda.version>2.8.1</joda.version>
     <h2.version>1.4.185</h2.version>
-    <hadoop.version>2.6.0</hadoop.version>
+    <hadoop.version>2.7.0</hadoop.version>
     <hamcrest.version>1.3</hamcrest.version>
     <hppc.version>0.7.1</hppc.version>
     <hsqldb.version>2.3.1</hsqldb.version>
@@ -108,6 +108,7 @@ limitations under the License.
     <natty.version>0.13</natty.version>
     <opencsv.version>2.3</opencsv.version>
     <oracle-jdbc6-driver.version>11.2.0.2.0</oracle-jdbc6-driver.version>
+    <pig.version>0.16.0</pig.version>
     <aggdesigner.version>6.0</aggdesigner.version>
     <postgresql.version>9.3-1102-jdbc3</postgresql.version>
     <quidem.version>0.8</quidem.version>
@@ -142,6 +143,7 @@ limitations under the License.
     <module>file</module>
     <module>linq4j</module>
     <module>mongodb</module>
+    <module>pig</module>
     <module>piglet</module>
     <module>plus</module>
     <module>spark</module>
@@ -316,6 +318,11 @@ limitations under the License.
         <version>${hadoop.version}</version>
       </dependency>
       <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-client</artifactId>
+        <version>${hadoop.version}</version>
+      </dependency>
+      <dependency>
         <groupId>org.apache.httpcomponents</groupId>
         <artifactId>httpclient</artifactId>
         <version>${httpclient.version}</version>
@@ -326,6 +333,18 @@ limitations under the License.
         <version>${httpcore.version}</version>
       </dependency>
       <dependency>
+        <groupId>org.apache.pig</groupId>
+        <artifactId>pig</artifactId>
+        <version>${pig.version}</version>
+        <classifier>h2</classifier>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.pig</groupId>
+        <artifactId>pigunit</artifactId>
+        <version>${pig.version}</version>
+        <scope>test</scope>
+      </dependency>
+      <dependency>
         <groupId>org.jsoup</groupId>
         <artifactId>jsoup</artifactId>
         <version>${jsoup.version}</version>

http://git-wip-us.apache.org/repos/asf/calcite/blob/fdbb81cf/site/_docs/adapter.md
----------------------------------------------------------------------
diff --git a/site/_docs/adapter.md b/site/_docs/adapter.md
index 819740b..7597fad 100644
--- a/site/_docs/adapter.md
+++ b/site/_docs/adapter.md
@@ -34,11 +34,16 @@ presenting the data as tables within a schema.
 * [File adapter](file.html) (<a href="{{ site.apiRoot }}/org/apache/calcite/adapter/file/package-summary.html">calcite-file</a>)
 * JDBC adapter (part of <a href="{{ site.apiRoot }}/org/apache/calcite/adapter/jdbc/package-summary.html">calcite-core</a>)
 * MongoDB adapter (<a href="{{ site.apiRoot }}/org/apache/calcite/adapter/mongodb/package-summary.html">calcite-mongodb</a>)
+* [Pig adapter](pig.html) (<a href="{{ site.apiRoot }}/org/apache/calcite/adapter/pig/package-summary.html">calcite-pig</a>)
 * Solr cloud adapter (<a href="https://github.com/bluejoe2008/solr-sql">solr-sql</a>)
 * Spark adapter (<a href="{{ site.apiRoot }}/org/apache/calcite/adapter/spark/package-summary.html">calcite-spark</a>)
 * Splunk adapter (<a href="{{ site.apiRoot }}/org/apache/calcite/adapter/splunk/package-summary.html">calcite-splunk</a>)
 * Eclipse Memory Analyzer (MAT) adapter (<a href="https://github.com/vlsi/mat-calcite-plugin">mat-calcite-plugin</a>)
 
+### Other language interfaces
+
+* Piglet (<a href="{{ site.apiRoot }}/org/apache/calcite/piglet/package-summary.html">calcite-piglet</a>)
runs queries in a subset of <a href="https://pig.apache.org/docs/r0.7.0/piglatin_ref1.html">Pig
Latin</a>
+
 ## Engines
 
 Many projects and products use Apache Calcite for SQL parsing,

http://git-wip-us.apache.org/repos/asf/calcite/blob/fdbb81cf/site/_docs/pig.md
----------------------------------------------------------------------
diff --git a/site/_docs/pig.md b/site/_docs/pig.md
new file mode 100644
index 0000000..4a85330
--- /dev/null
+++ b/site/_docs/pig.md
@@ -0,0 +1,90 @@
+---
+layout: docs
+title: Pig adapter
+permalink: /docs/pig.html
+---
+<!--
+{% comment %}
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to you under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+{% endcomment %}
+-->
+
+# Overview
+
+The Pig adapter allows you to write queries in SQL and execute them using
+<a href="http://pig.apache.org">Apache Pig</a>.
+
+# A simple example
+
+Let's start with a simple example. First, we need a
+[model definition]({{ site.baseurl }}/docs/model.html),
+as follows.
+
+{% highlight json %}
+{
+  "version": "1.0",
+  "defaultSchema": "SALES",
+  "schemas": [ {
+    "name": "PIG",
+    "type": "custom",
+    "factory": "org.apache.calcite.adapter.pig.PigSchemaFactory",
+    "tables": [ {
+      "name": "t",
+      "type": "custom",
+      "factory": "org.apache.calcite.adapter.pig.PigTableFactory",
+      "operand": {
+        "file": "data.txt",
+        "columns": ["tc0", "tc1"]
+      }
+    }, {
+      "name": "s",
+      "type": "custom",
+      "factory": "org.apache.calcite.adapter.pig.PigTableFactory",
+      "operand": {
+        "file": "data2.txt",
+        "columns": ["sc0", "sc1"]
+      }
+    } ]
+  } ]
+}
+{% endhighlight %}
+
+Now, if you write the SQL query
+
+{% highlight sql %}
+select *
+from "t"
+join "s" on "tc1" = "sc0"
+{% endhighlight %}
+
+the Pig adapter will generate the Pig Latin script
+
+{% highlight sql %}
+t = LOAD 'data.txt' USING PigStorage() AS (tc0:chararray, tc1:chararray);
+s = LOAD 'data2.txt' USING PigStorage() AS (sc0:chararray, sc1:chararray);
+t = JOIN t BY tc1, s BY sc0;
+{% endhighlight %}
+
+which is then executed using Pig's runtime, typically MapReduce on
+<a href="http://hadoop.apache.org/">Apache Hadoop</a>.
+
+# Relationship to Piglet
+
+Calcite has another component called
+<a href="{{ site.apiRoot }}/org/apache/calcite/piglet/package-summary.html">Piglet</a>.
+It allows you to write queries in a subset of Pig Latin,
+and execute them using any applicable Calcite adapter.
+So, Piglet is basically the opposite of the Pig adapter.


Mime
View raw message