spark-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From andrewo...@apache.org
Subject spark git commit: [MINOR][MLLIB][STREAMING][SQL] Fix typos
Date Wed, 25 May 2016 17:54:22 GMT
Repository: spark
Updated Branches:
  refs/heads/master d6d3e5071 -> 02c8072ee


[MINOR][MLLIB][STREAMING][SQL] Fix typos

fixed typos for source code for components [mllib] [streaming] and [SQL]

None and obvious.

Author: lfzCarlosC <lfz.carlos@gmail.com>

Closes #13298 from lfzCarlosC/master.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/02c8072e
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/02c8072e
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/02c8072e

Branch: refs/heads/master
Commit: 02c8072eea72425e89256347e1f373a3e76e6eba
Parents: d6d3e50
Author: lfzCarlosC <lfz.carlos@gmail.com>
Authored: Wed May 25 10:53:53 2016 -0700
Committer: Andrew Or <andrew@databricks.com>
Committed: Wed May 25 10:53:57 2016 -0700

----------------------------------------------------------------------
 mllib/src/main/scala/org/apache/spark/mllib/feature/IDF.scala    | 2 +-
 .../org/apache/spark/mllib/stat/test/KolmogorovSmirnovTest.scala | 2 +-
 .../spark/sql/catalyst/plans/logical/basicLogicalOperators.scala | 2 +-
 .../org/apache/spark/sql/execution/vectorized/ColumnVector.java  | 2 +-
 .../spark/sql/execution/streaming/state/StateStoreSuite.scala    | 2 +-
 sql/hive-thriftserver/if/TCLIService.thrift                      | 4 ++--
 .../java/org/apache/hive/service/ServiceStateChangeListener.java | 2 +-
 .../java/org/apache/hive/service/cli/operation/SQLOperation.java | 2 +-
 .../apache/hive/service/cli/session/HiveSessionHookContext.java  | 2 +-
 .../main/scala/org/apache/spark/sql/hive/HiveSessionState.scala  | 2 +-
 .../apache/spark/streaming/util/WriteAheadLogRecordHandle.java   | 2 +-
 11 files changed, 12 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/02c8072e/mllib/src/main/scala/org/apache/spark/mllib/feature/IDF.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/feature/IDF.scala b/mllib/src/main/scala/org/apache/spark/mllib/feature/IDF.scala
index 9457c6e..bb4b37e 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/feature/IDF.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/feature/IDF.scala
@@ -204,7 +204,7 @@ private object IDFModel {
    * Transforms a term frequency (TF) vector to a TF-IDF vector with a IDF vector
    *
    * @param idf an IDF vector
-   * @param v a term frequence vector
+   * @param v a term frequency vector
    * @return a TF-IDF vector
    */
   def transform(idf: Vector, v: Vector): Vector = {

http://git-wip-us.apache.org/repos/asf/spark/blob/02c8072e/mllib/src/main/scala/org/apache/spark/mllib/stat/test/KolmogorovSmirnovTest.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/stat/test/KolmogorovSmirnovTest.scala
b/mllib/src/main/scala/org/apache/spark/mllib/stat/test/KolmogorovSmirnovTest.scala
index 9748fbf..c3de5d7 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/stat/test/KolmogorovSmirnovTest.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/stat/test/KolmogorovSmirnovTest.scala
@@ -45,7 +45,7 @@ import org.apache.spark.rdd.RDD
  * many elements are in each partition. Once these three values have been returned for every
  * partition, we can collect and operate locally. Locally, we can now adjust each distance
by the
  * appropriate constant (the cumulative sum of number of elements in the prior partitions
divided by
- * thedata set size). Finally, we take the maximum absolute value, and this is the statistic.
+ * the data set size). Finally, we take the maximum absolute value, and this is the statistic.
  */
 private[stat] object KolmogorovSmirnovTest extends Logging {
 

http://git-wip-us.apache.org/repos/asf/spark/blob/02c8072e/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala
----------------------------------------------------------------------
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala
index 0a9250b..8b7e21b 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala
@@ -591,7 +591,7 @@ case class Expand(
   }
 
   // This operator can reuse attributes (for example making them null when doing a roll up)
so
-  // the contraints of the child may no longer be valid.
+  // the constraints of the child may no longer be valid.
   override protected def validConstraints: Set[Expression] = Set.empty[Expression]
 }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/02c8072e/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnVector.java
----------------------------------------------------------------------
diff --git a/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnVector.java
b/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnVector.java
index e7dccd1..3f94255 100644
--- a/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnVector.java
+++ b/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnVector.java
@@ -504,7 +504,7 @@ public abstract class ColumnVector implements AutoCloseable {
 
   /**
    * Returns a utility object to get structs.
-   * provided to keep API compabilitity with InternalRow for code generation
+   * provided to keep API compatibility with InternalRow for code generation
    */
   public ColumnarBatch.Row getStruct(int rowId, int size) {
     resultStruct.rowId = rowId;

http://git-wip-us.apache.org/repos/asf/spark/blob/02c8072e/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateStoreSuite.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateStoreSuite.scala
b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateStoreSuite.scala
index f8f8bc7..984b84f 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateStoreSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateStoreSuite.scala
@@ -188,7 +188,7 @@ class StateStoreSuite extends SparkFunSuite with BeforeAndAfter with PrivateMeth
       provider.getStore(-1)
     }
 
-    // Prepare some data in the stoer
+    // Prepare some data in the store
     val store = provider.getStore(0)
     put(store, "a", 1)
     assert(store.commit() === 1)

http://git-wip-us.apache.org/repos/asf/spark/blob/02c8072e/sql/hive-thriftserver/if/TCLIService.thrift
----------------------------------------------------------------------
diff --git a/sql/hive-thriftserver/if/TCLIService.thrift b/sql/hive-thriftserver/if/TCLIService.thrift
index baf583f..7cd6fa3 100644
--- a/sql/hive-thriftserver/if/TCLIService.thrift
+++ b/sql/hive-thriftserver/if/TCLIService.thrift
@@ -661,7 +661,7 @@ union TGetInfoValue {
 // The function returns general information about the data source
 // using the same keys as ODBC.
 struct TGetInfoReq {
-  // The sesssion to run this request against
+  // The session to run this request against
   1: required TSessionHandle sessionHandle
 
   2: required TGetInfoType infoType
@@ -1032,7 +1032,7 @@ enum TFetchOrientation {
   FETCH_PRIOR,
 
   // Return the rowset at the given fetch offset relative
-  // to the curren rowset.
+  // to the current rowset.
   // NOT SUPPORTED
   FETCH_RELATIVE,
 

http://git-wip-us.apache.org/repos/asf/spark/blob/02c8072e/sql/hive-thriftserver/src/main/java/org/apache/hive/service/ServiceStateChangeListener.java
----------------------------------------------------------------------
diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/ServiceStateChangeListener.java
b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/ServiceStateChangeListener.java
index 16ad9a9..d1aadad 100644
--- a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/ServiceStateChangeListener.java
+++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/ServiceStateChangeListener.java
@@ -29,7 +29,7 @@ public interface ServiceStateChangeListener {
    * have changed state before this callback is invoked.
    *
    * This operation is invoked on the thread that initiated the state change,
-   * while the service itself in in a sychronized section.
+   * while the service itself in in a synchronized section.
    * <ol>
    *   <li>Any long-lived operation here will prevent the service state
    *   change from completing in a timely manner.</li>

http://git-wip-us.apache.org/repos/asf/spark/blob/02c8072e/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/SQLOperation.java
----------------------------------------------------------------------
diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/SQLOperation.java
b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/SQLOperation.java
index a354054..5014ced 100644
--- a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/SQLOperation.java
+++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/SQLOperation.java
@@ -456,7 +456,7 @@ public class SQLOperation extends ExecuteStatementOperation {
   private HiveConf getConfigForOperation() throws HiveSQLException {
     HiveConf sqlOperationConf = getParentSession().getHiveConf();
     if (!getConfOverlay().isEmpty() || shouldRunAsync()) {
-      // clone the partent session config for this query
+      // clone the parent session config for this query
       sqlOperationConf = new HiveConf(sqlOperationConf);
 
       // apply overlay query specific settings, if any

http://git-wip-us.apache.org/repos/asf/spark/blob/02c8072e/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/HiveSessionHookContext.java
----------------------------------------------------------------------
diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/HiveSessionHookContext.java
b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/HiveSessionHookContext.java
index 0a10dba..c56a107 100644
--- a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/HiveSessionHookContext.java
+++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/HiveSessionHookContext.java
@@ -22,7 +22,7 @@ import org.apache.hadoop.hive.conf.HiveConf;
 /**
  * HiveSessionHookContext.
  * Interface passed to the HiveServer2 session hook execution. This enables
- * the hook implementation to accesss session config, user and session handle
+ * the hook implementation to access session config, user and session handle
  */
 public interface HiveSessionHookContext {
 

http://git-wip-us.apache.org/repos/asf/spark/blob/02c8072e/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala
----------------------------------------------------------------------
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala
index 46579ec..081d85a 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala
@@ -143,7 +143,7 @@ private[hive] class HiveSessionState(sparkSession: SparkSession)
    * converted to a data source table, using the data source set by spark.sql.sources.default.
    * The table in CTAS statement will be converted when it meets any of the following conditions:
    *   - The CTAS does not specify any of a SerDe (ROW FORMAT SERDE), a File Format (STORED
AS), or
-   *     a Storage Hanlder (STORED BY), and the value of hive.default.fileformat in hive-site.xml
+   *     a Storage Handler (STORED BY), and the value of hive.default.fileformat in hive-site.xml
    *     is either TextFile or SequenceFile.
    *   - The CTAS statement specifies TextFile (STORED AS TEXTFILE) as the file format and
no SerDe
    *     is specified (no ROW FORMAT SERDE clause).

http://git-wip-us.apache.org/repos/asf/spark/blob/02c8072e/streaming/src/main/java/org/apache/spark/streaming/util/WriteAheadLogRecordHandle.java
----------------------------------------------------------------------
diff --git a/streaming/src/main/java/org/apache/spark/streaming/util/WriteAheadLogRecordHandle.java
b/streaming/src/main/java/org/apache/spark/streaming/util/WriteAheadLogRecordHandle.java
index 662889e..3c5cc7e 100644
--- a/streaming/src/main/java/org/apache/spark/streaming/util/WriteAheadLogRecordHandle.java
+++ b/streaming/src/main/java/org/apache/spark/streaming/util/WriteAheadLogRecordHandle.java
@@ -23,7 +23,7 @@ package org.apache.spark.streaming.util;
  * This abstract class represents a handle that refers to a record written in a
  * {@link org.apache.spark.streaming.util.WriteAheadLog WriteAheadLog}.
  * It must contain all the information necessary for the record to be read and returned by
- * an implemenation of the WriteAheadLog class.
+ * an implementation of the WriteAheadLog class.
  *
  * @see org.apache.spark.streaming.util.WriteAheadLog
  */


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org


Mime
View raw message