flink-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hsapu...@apache.org
Subject flink git commit: [FLINK-1460] fix typos
Date Fri, 30 Jan 2015 17:51:36 GMT
Repository: flink
Updated Branches:
  refs/heads/master 9849990d9 -> f7e84c157


[FLINK-1460] fix typos

Fix some typos. Also fix some inconsistent uses of **partition operator** and **partitioning
operator** in the codebase.

Author: Shuo Xiang <shuoxiangpub@gmail.com>
Author: Shuo Xiang <sxiang@pinterest.com>

Closes #346 from coderxiang/typo and squashes the following commits:

acf5274 [Shuo Xiang] change partitioningoperator to partitionoperator
89b460a [Shuo Xiang] fix typos


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/f7e84c15
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/f7e84c15
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/f7e84c15

Branch: refs/heads/master
Commit: f7e84c157bcb272625b9859d74b85611c881d490
Parents: 9849990
Author: Shuo Xiang <shuoxiangpub@gmail.com>
Authored: Fri Jan 30 09:51:31 2015 -0800
Committer: Henry Saputra <henry.saputra@gmail.com>
Committed: Fri Jan 30 09:51:31 2015 -0800

----------------------------------------------------------------------
 .../compiler/java/PartitionOperatorTest.java    | 70 ++++++++++++++++++++
 .../compiler/java/PartitioningOperatorTest.java | 70 --------------------
 .../examples/scala/ml/LinearRegression.scala    |  4 +-
 .../PartitionOperatorTranslationTest.scala      |  2 +-
 4 files changed, 73 insertions(+), 73 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/f7e84c15/flink-compiler/src/test/java/org/apache/flink/compiler/java/PartitionOperatorTest.java
----------------------------------------------------------------------
diff --git a/flink-compiler/src/test/java/org/apache/flink/compiler/java/PartitionOperatorTest.java
b/flink-compiler/src/test/java/org/apache/flink/compiler/java/PartitionOperatorTest.java
new file mode 100644
index 0000000..3104493
--- /dev/null
+++ b/flink-compiler/src/test/java/org/apache/flink/compiler/java/PartitionOperatorTest.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.compiler.java;
+
+import static org.junit.Assert.*;
+
+import java.util.Collections;
+
+import org.apache.flink.api.common.Plan;
+import org.apache.flink.api.common.functions.Partitioner;
+import org.apache.flink.api.java.DataSet;
+import org.apache.flink.api.java.ExecutionEnvironment;
+import org.apache.flink.api.java.tuple.Tuple2;
+import org.apache.flink.compiler.CompilerTestBase;
+import org.apache.flink.compiler.plan.OptimizedPlan;
+import org.apache.flink.compiler.plan.SingleInputPlanNode;
+import org.apache.flink.compiler.plan.SinkPlanNode;
+import org.apache.flink.compiler.testfunctions.IdentityGroupReducer;
+import org.apache.flink.runtime.operators.shipping.ShipStrategyType;
+import org.junit.Test;
+
+@SuppressWarnings("serial")
+public class PartitionOperatorTest extends CompilerTestBase {
+
+	@Test
+	public void testPartitionOperatorPreservesFields() {
+		try {
+			ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
+			
+			DataSet<Tuple2<Long, Long>> data = env.fromCollection(Collections.singleton(new
Tuple2<Long, Long>(0L, 0L)));
+			
+			data.partitionCustom(new Partitioner<Long>() {
+					public int partition(Long key, int numPartitions) { return key.intValue(); }
+				}, 1)
+				.groupBy(1)
+				.reduceGroup(new IdentityGroupReducer<Tuple2<Long,Long>>())
+				.print();
+			
+			Plan p = env.createProgramPlan();
+			OptimizedPlan op = compileNoStats(p);
+			
+			SinkPlanNode sink = op.getDataSinks().iterator().next();
+			SingleInputPlanNode reducer = (SingleInputPlanNode) sink.getInput().getSource();
+			SingleInputPlanNode partitioner = (SingleInputPlanNode) reducer.getInput().getSource();
+
+			assertEquals(ShipStrategyType.FORWARD, reducer.getInput().getShipStrategy());
+			assertEquals(ShipStrategyType.PARTITION_CUSTOM, partitioner.getInput().getShipStrategy());
+		}
+		catch (Exception e) {
+			e.printStackTrace();
+			fail(e.getMessage());
+		}
+	}
+}

http://git-wip-us.apache.org/repos/asf/flink/blob/f7e84c15/flink-compiler/src/test/java/org/apache/flink/compiler/java/PartitioningOperatorTest.java
----------------------------------------------------------------------
diff --git a/flink-compiler/src/test/java/org/apache/flink/compiler/java/PartitioningOperatorTest.java
b/flink-compiler/src/test/java/org/apache/flink/compiler/java/PartitioningOperatorTest.java
deleted file mode 100644
index 26185c7..0000000
--- a/flink-compiler/src/test/java/org/apache/flink/compiler/java/PartitioningOperatorTest.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.flink.compiler.java;
-
-import static org.junit.Assert.*;
-
-import java.util.Collections;
-
-import org.apache.flink.api.common.Plan;
-import org.apache.flink.api.common.functions.Partitioner;
-import org.apache.flink.api.java.DataSet;
-import org.apache.flink.api.java.ExecutionEnvironment;
-import org.apache.flink.api.java.tuple.Tuple2;
-import org.apache.flink.compiler.CompilerTestBase;
-import org.apache.flink.compiler.plan.OptimizedPlan;
-import org.apache.flink.compiler.plan.SingleInputPlanNode;
-import org.apache.flink.compiler.plan.SinkPlanNode;
-import org.apache.flink.compiler.testfunctions.IdentityGroupReducer;
-import org.apache.flink.runtime.operators.shipping.ShipStrategyType;
-import org.junit.Test;
-
-@SuppressWarnings("serial")
-public class PartitioningOperatorTest extends CompilerTestBase {
-
-	@Test
-	public void testPartitiongOperatorPreservesFields() {
-		try {
-			ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
-			
-			DataSet<Tuple2<Long, Long>> data = env.fromCollection(Collections.singleton(new
Tuple2<Long, Long>(0L, 0L)));
-			
-			data.partitionCustom(new Partitioner<Long>() {
-					public int partition(Long key, int numPartitions) { return key.intValue(); }
-				}, 1)
-				.groupBy(1)
-				.reduceGroup(new IdentityGroupReducer<Tuple2<Long,Long>>())
-				.print();
-			
-			Plan p = env.createProgramPlan();
-			OptimizedPlan op = compileNoStats(p);
-			
-			SinkPlanNode sink = op.getDataSinks().iterator().next();
-			SingleInputPlanNode reducer = (SingleInputPlanNode) sink.getInput().getSource();
-			SingleInputPlanNode partitioner = (SingleInputPlanNode) reducer.getInput().getSource();
-
-			assertEquals(ShipStrategyType.FORWARD, reducer.getInput().getShipStrategy());
-			assertEquals(ShipStrategyType.PARTITION_CUSTOM, partitioner.getInput().getShipStrategy());
-		}
-		catch (Exception e) {
-			e.printStackTrace();
-			fail(e.getMessage());
-		}
-	}
-}

http://git-wip-us.apache.org/repos/asf/flink/blob/f7e84c15/flink-examples/flink-scala-examples/src/main/scala/org/apache/flink/examples/scala/ml/LinearRegression.scala
----------------------------------------------------------------------
diff --git a/flink-examples/flink-scala-examples/src/main/scala/org/apache/flink/examples/scala/ml/LinearRegression.scala
b/flink-examples/flink-scala-examples/src/main/scala/org/apache/flink/examples/scala/ml/LinearRegression.scala
index ea96e7c..508d677 100644
--- a/flink-examples/flink-scala-examples/src/main/scala/org/apache/flink/examples/scala/ml/LinearRegression.scala
+++ b/flink-examples/flink-scala-examples/src/main/scala/org/apache/flink/examples/scala/ml/LinearRegression.scala
@@ -31,8 +31,8 @@ import scala.collection.JavaConverters._
  * This example implements a basic Linear Regression  to solve the y = theta0 + theta1*x
problem
  * using batch gradient descent algorithm.
  *
- * Linear Regression with BGD(batch gradient descent) algorithm is an iterative clustering
- * algorithm and works as follows:
+ * Linear Regression with BGD(batch gradient descent) algorithm is an iterative algorithm
and
+ * works as follows:
  *
  * Giving a data set and target set, the BGD try to find out the best parameters for the
data set
  * to fit the target set.

http://git-wip-us.apache.org/repos/asf/flink/blob/f7e84c15/flink-tests/src/test/scala/org/apache/flink/api/scala/compiler/PartitionOperatorTranslationTest.scala
----------------------------------------------------------------------
diff --git a/flink-tests/src/test/scala/org/apache/flink/api/scala/compiler/PartitionOperatorTranslationTest.scala
b/flink-tests/src/test/scala/org/apache/flink/api/scala/compiler/PartitionOperatorTranslationTest.scala
index a83d728..6eb193d 100644
--- a/flink-tests/src/test/scala/org/apache/flink/api/scala/compiler/PartitionOperatorTranslationTest.scala
+++ b/flink-tests/src/test/scala/org/apache/flink/api/scala/compiler/PartitionOperatorTranslationTest.scala
@@ -29,7 +29,7 @@ import org.apache.flink.api.common.functions.Partitioner
 class PartitionOperatorTranslationTest extends CompilerTestBase {
 
   @Test
-  def testPartitiongOperatorPreservesFields() {
+  def testPartitionOperatorPreservesFields() {
     try {
       val env = ExecutionEnvironment.getExecutionEnvironment
       


Mime
View raw message