spark-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sro...@apache.org
Subject spark git commit: [SPARK-15468][SQL] fix some typos
Date Sun, 22 May 2016 13:10:57 GMT
Repository: spark
Updated Branches:
  refs/heads/master 7920296bf -> 72288fd67


[SPARK-15468][SQL] fix some typos

## What changes were proposed in this pull request?

Fix some typos while browsing the codes.

## How was this patch tested?

None and obvious.

Author: Bo Meng <mengbo@hotmail.com>
Author: bomeng <bmeng@us.ibm.com>

Closes #13246 from bomeng/typo.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/72288fd6
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/72288fd6
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/72288fd6

Branch: refs/heads/master
Commit: 72288fd67edc00f56e2e47eab2ef58fe4ff8c177
Parents: 7920296
Author: Bo Meng <mengbo@hotmail.com>
Authored: Sun May 22 08:10:54 2016 -0500
Committer: Sean Owen <sowen@cloudera.com>
Committed: Sun May 22 08:10:54 2016 -0500

----------------------------------------------------------------------
 core/src/main/scala/org/apache/spark/util/AccumulatorV2.scala    | 2 +-
 .../catalyst/expressions/codegen/GenerateSafeProjection.scala    | 2 +-
 .../scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala  | 2 +-
 .../scala/org/apache/spark/sql/catalyst/planning/patterns.scala  | 4 ++--
 .../main/scala/org/apache/spark/sql/types/UDTRegistration.scala  | 2 +-
 5 files changed, 6 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/72288fd6/core/src/main/scala/org/apache/spark/util/AccumulatorV2.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/util/AccumulatorV2.scala b/core/src/main/scala/org/apache/spark/util/AccumulatorV2.scala
index 21ba460..0b9a47c 100644
--- a/core/src/main/scala/org/apache/spark/util/AccumulatorV2.scala
+++ b/core/src/main/scala/org/apache/spark/util/AccumulatorV2.scala
@@ -55,7 +55,7 @@ abstract class AccumulatorV2[IN, OUT] extends Serializable {
 
   /**
    * Returns true if this accumulator has been registered.  Note that all accumulators must
be
-   * registered before ues, or it will throw exception.
+   * registered before use, or it will throw exception.
    */
   final def isRegistered: Boolean =
     metadata != null && AccumulatorContext.get(metadata.id).isDefined

http://git-wip-us.apache.org/repos/asf/spark/blob/72288fd6/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GenerateSafeProjection.scala
----------------------------------------------------------------------
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GenerateSafeProjection.scala
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GenerateSafeProjection.scala
index b0b1212..214dc40 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GenerateSafeProjection.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GenerateSafeProjection.scala
@@ -48,7 +48,7 @@ object GenerateSafeProjection extends CodeGenerator[Seq[Expression], Projection]
     val tmp = ctx.freshName("tmp")
     val output = ctx.freshName("safeRow")
     val values = ctx.freshName("values")
-    // These expressions could be splitted into multiple functions
+    // These expressions could be split into multiple functions
     ctx.addMutableState("Object[]", values, s"this.$values = null;")
 
     val rowClass = classOf[GenericInternalRow].getName

http://git-wip-us.apache.org/repos/asf/spark/blob/72288fd6/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
----------------------------------------------------------------------
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
index cace026..a13c03a 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
@@ -771,7 +771,7 @@ class AstBuilder extends SqlBaseBaseVisitor[AnyRef] with Logging {
    * ********************************************************************************************
*/
   /**
    * Create an expression from the given context. This method just passes the context on
to the
-   * vistor and only takes care of typing (We assume that the visitor returns an Expression
here).
+   * visitor and only takes care of typing (We assume that the visitor returns an Expression
here).
    */
   protected def expression(ctx: ParserRuleContext): Expression = typedVisit(ctx)
 

http://git-wip-us.apache.org/repos/asf/spark/blob/72288fd6/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/patterns.scala
----------------------------------------------------------------------
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/patterns.scala
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/patterns.scala
index 0065619..f42e67c 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/patterns.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/patterns.scala
@@ -227,8 +227,8 @@ object IntegerIndex {
  *  - Unnamed grouping expressions are named so that they can be referred to across phases
of
  *    aggregation
  *  - Aggregations that appear multiple times are deduplicated.
- *  - The compution of the aggregations themselves is separated from the final result. For
example,
- *    the `count` in `count + 1` will be split into an [[AggregateExpression]] and a final
+ *  - The computation of the aggregations themselves is separated from the final result.
For
+ *    example, the `count` in `count + 1` will be split into an [[AggregateExpression]] and
a final
  *    computation that computes `count.resultAttribute + 1`.
  */
 object PhysicalAggregation {

http://git-wip-us.apache.org/repos/asf/spark/blob/72288fd6/sql/catalyst/src/main/scala/org/apache/spark/sql/types/UDTRegistration.scala
----------------------------------------------------------------------
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/UDTRegistration.scala
b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/UDTRegistration.scala
index 0f24e51..20ec75c 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/UDTRegistration.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/UDTRegistration.scala
@@ -27,7 +27,7 @@ import org.apache.spark.util.Utils
  * This object keeps the mappings between user classes and their User Defined Types (UDTs).
  * Previously we use the annotation `SQLUserDefinedType` to register UDTs for user classes.
  * However, by doing this, we add SparkSQL dependency on user classes. This object provides
- * alterntive approach to register UDTs for user classes.
+ * alternative approach to register UDTs for user classes.
  */
 private[spark]
 object UDTRegistration extends Serializable with Logging {


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org


Mime
View raw message