singa-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From wan...@apache.org
Subject [04/19] incubator-singa git commit: SINGA-100 Implement layers using CUDNN for GPU training
Date Wed, 16 Dec 2015 12:11:36 GMT
SINGA-100 Implement layers using CUDNN for GPU training

* Add cudnn layers, including convoltuion, pooling, lrn, and activation (relu, sigmoid, tanh)
* move declarations of layers from single file into <category>_layer.h under include/singa/neuralnet/

TODO compile the code
with '#' will be ignored, and an empty message aborts the commit.


Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/af1bf509
Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/af1bf509
Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/af1bf509

Branch: refs/heads/master
Commit: af1bf509d7c7cb5b3d597fc0ff59fc940187e3a4
Parents: 1981874
Author: Wei Wang <wangwei@comp.nus.edu.sg>
Authored: Wed Nov 4 21:02:15 2015 +0800
Committer: Wei Wang <wangwei@comp.nus.edu.sg>
Committed: Fri Dec 11 11:01:29 2015 +0800

----------------------------------------------------------------------
 include/singa/neuralnet/connection_layer.h      | 127 ++++++
 .../singa/neuralnet/connection_layer/bridge.h   |  78 ----
 .../singa/neuralnet/connection_layer/concate.h  |  44 --
 include/singa/neuralnet/input_layer.h           | 305 ++++++++++++++
 include/singa/neuralnet/input_layer/csv.h       |  55 ---
 .../singa/neuralnet/input_layer/deprecated.h    | 171 --------
 .../neuralnet/input_layer/image_preprocess.h    |  47 ---
 include/singa/neuralnet/input_layer/prefetch.h  |  50 ---
 include/singa/neuralnet/input_layer/record.h    |  56 ---
 include/singa/neuralnet/input_layer/store.h     |  89 ----
 include/singa/neuralnet/layer.h                 |  22 +-
 include/singa/neuralnet/loss_layer.h            |  56 +++
 include/singa/neuralnet/loss_layer/euclidean.h  |  43 --
 include/singa/neuralnet/loss_layer/softmax.h    |  59 ---
 include/singa/neuralnet/neuron_layer.h          | 410 +++++++++++++++++++
 include/singa/neuralnet/neuron_layer/argsort.h  |  52 ---
 .../singa/neuralnet/neuron_layer/convolution.h  |  68 ---
 include/singa/neuralnet/neuron_layer/dropout.h  |  46 ---
 .../neuralnet/neuron_layer/inner_product.h      |  49 ---
 include/singa/neuralnet/neuron_layer/lrn.h      |  56 ---
 include/singa/neuralnet/neuron_layer/pooling.h  |  56 ---
 include/singa/neuralnet/neuron_layer/relu.h     |  39 --
 include/singa/neuralnet/neuron_layer/softmax.h  |  40 --
 include/singa/neuralnet/neuron_layer/stanh.h    |  43 --
 include/singa/neuralnet/output_layer.h          |  72 ++++
 include/singa/neuralnet/output_layer/csv.h      |  44 --
 include/singa/neuralnet/output_layer/record.h   |  42 --
 src/neuralnet/connection_layer/bridge.cc        |   1 +
 src/neuralnet/connection_layer/concate.cc       |   4 +-
 src/neuralnet/connection_layer/slice.cc         |   2 +-
 src/neuralnet/connection_layer/split.cc         |   2 +-
 src/neuralnet/input_layer/csv.cc                |   2 +-
 src/neuralnet/input_layer/deprecated.cc         |   2 +-
 src/neuralnet/input_layer/image_preprocess.cc   |   2 +-
 src/neuralnet/input_layer/prefetch.cc           |   2 +-
 src/neuralnet/input_layer/record.cc             |   2 +-
 src/neuralnet/input_layer/store.cc              |   2 +-
 src/neuralnet/loss_layer/euclidean.cc           |   2 +-
 src/neuralnet/loss_layer/softmax.cc             |   2 +-
 src/neuralnet/neuron_layer/argsort.cc           |   2 +-
 src/neuralnet/neuron_layer/convolution.cc       |  68 ++-
 src/neuralnet/neuron_layer/cudnn_activation.cu  | 100 +++++
 src/neuralnet/neuron_layer/cudnn_convolution.cu | 205 ++++++++++
 src/neuralnet/neuron_layer/cudnn_lrn.cu         |  95 +++++
 src/neuralnet/neuron_layer/cudnn_pooling.cu     |  95 +++++
 src/neuralnet/neuron_layer/cudnn_softmax.cu     |  75 ++++
 src/neuralnet/neuron_layer/dropout.cc           |   2 +-
 src/neuralnet/neuron_layer/inner_product.cc     |   2 +-
 src/neuralnet/neuron_layer/lrn.cc               |   2 +-
 src/neuralnet/neuron_layer/pooling.cc           |   2 +-
 src/neuralnet/neuron_layer/rbm.cc               |   2 +-
 src/neuralnet/neuron_layer/relu.cc              |   2 +-
 src/neuralnet/neuron_layer/sigmoid.cc           |   2 +-
 src/neuralnet/neuron_layer/softmax.cc           |  11 +-
 src/neuralnet/neuron_layer/stanh.cc             |   5 +-
 src/neuralnet/output_layer/csv.cc               |   2 +-
 src/neuralnet/output_layer/record.cc            |   2 +-
 src/proto/job.proto                             |  15 +-
 58 files changed, 1644 insertions(+), 1289 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/af1bf509/include/singa/neuralnet/connection_layer.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/connection_layer.h b/include/singa/neuralnet/connection_layer.h
new file mode 100644
index 0000000..0b14a94
--- /dev/null
+++ b/include/singa/neuralnet/connection_layer.h
@@ -0,0 +1,127 @@
+/************************************************************
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+*************************************************************/
+
+#ifndef SINGA_NEURALNET_CONNECTION_LAYER_H_
+#define SINGA_NEURALNET_CONNECTION_LAYER_H_
+
+#include <string>
+#include <unordered_map>
+#include <vector>
+#include "singa/comm/socket.h"
+#include "singa/neuralnet/layer.h"
+
+namespace singa {
+
+class BridgeLayer : public ConnectionLayer {
+ public:
+  void set_ready(bool a) { ready_ = a; }
+  bool ready() const { return ready_; }
+  // Bind the layer with dealer instance by worker at runtime
+  void MakePaired(Layer* pair, int grp_id, Dealer* dealer,
+                  std::unordered_map<std::string, Layer*>* name2bridge);
+  // Send blobs to other workers due to model partitions
+  void SendBlobs(bool handle_data);
+  // Receive blobs from other workers due to model partitions;
+  void ReceiveBlobs(bool handle_data);
+
+ protected:
+  //!< true if received grad from BridgeDstLayer
+  bool ready_ = false;
+  int group_id_ = 0;
+  Layer* pair_ = nullptr;
+  Dealer* dealer_ = nullptr;
+  std::unordered_map<std::string, Layer*>* name2bridge_ = nullptr;
+};
+
+/**
+ * For sending data to layer on other threads which may resident on other nodes
+ * due to layer/data partition.
+ */
+class BridgeSrcLayer : public BridgeLayer {
+ public:
+  void Setup(const LayerProto& conf, const vector<Layer*>& srclayers) override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
+};
+
+/**
+ * For recv data from layer on other threads which may resident on other nodes
+ * due to layer/data partiton
+ */
+class BridgeDstLayer : public BridgeLayer {
+ public:
+  void Setup(const LayerProto& conf, const vector<Layer*>& srclayers) override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
+};
+/**
+ * Connect multiple (src) layers with a single (dst) layer.
+ *
+ * It concates feature Blobs (i.e., matrix) of src layers on one dimension.
+ * The concated feature Blob will be fed into the dst layer.
+ */
+class ConcateLayer : public ConnectionLayer {
+ public:
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
+};
+
+/**
+ * Connect a single (src) layer with multiple (dst) layers.
+ *
+ * It slices the feature Blob (i.e., matrix) of the src layer on one dimension.
+ * The sliced feature Blobs will be fed into dst layers.
+ */
+class SliceLayer : public ConnectionLayer {
+ public:
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
+
+ private:
+  std::vector<Blob<float>> datavec_;
+  std::vector<Blob<float>> gradvec_;
+  int slice_dim_;
+  int slice_num_;
+};
+
+/**
+ * Connect a single (src) layer with multiple dst layers.
+ *
+ * It replicates the feature Blob of the src layer.
+ * Each replicated feature Blob will be fed into one dst layer.
+ * It aggregates gradients set by all dst layers and set it to the src layer.
+ */
+class SplitLayer : public ConnectionLayer {
+ public:
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
+
+ protected:
+  Blob<float> grads_;
+};
+
+
+}  // namespace singa
+
+#endif  // SINGA_NEURALNET_CONNECTION_LAYER_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/af1bf509/include/singa/neuralnet/connection_layer/bridge.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/connection_layer/bridge.h b/include/singa/neuralnet/connection_layer/bridge.h
deleted file mode 100644
index b27693d..0000000
--- a/include/singa/neuralnet/connection_layer/bridge.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#ifndef SINGA_NEURALNET_CONNECTION_LAYER_BRIDGE_H_
-#define SINGA_NEURALNET_CONNECTION_LAYER_BRIDGE_H_
-
-#include <string>
-#include <unordered_map>
-#include <vector>
-#include "singa/comm/socket.h"
-#include "singa/neuralnet/layer.h"
-
-namespace singa {
-
-class BridgeLayer : public ConnectionLayer {
- public:
-  void set_ready(bool a) { ready_ = a; }
-  bool ready() const { return ready_; }
-  // Bind the layer with dealer instance by worker at runtime
-  void MakePaired(Layer* pair, int grp_id, Dealer* dealer,
-                  std::unordered_map<std::string, Layer*>* name2bridge);
-  // Send blobs to other workers due to model partitions
-  void SendBlobs(bool handle_data);
-  // Receive blobs from other workers due to model partitions;
-  void ReceiveBlobs(bool handle_data);
-
- protected:
-  //!< true if received grad from BridgeDstLayer
-  bool ready_ = false;
-  int group_id_ = 0;
-  Layer* pair_ = nullptr;
-  Dealer* dealer_ = nullptr;
-  std::unordered_map<std::string, Layer*>* name2bridge_ = nullptr;
-};
-
-/**
- * For sending data to layer on other threads which may resident on other nodes
- * due to layer/data partition.
- */
-class BridgeSrcLayer : public BridgeLayer {
- public:
-  void Setup(const LayerProto& conf, const vector<Layer*>& srclayers) override;
-  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
-  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
-};
-
-/**
- * For recv data from layer on other threads which may resident on other nodes
- * due to layer/data partiton
- */
-class BridgeDstLayer : public BridgeLayer {
- public:
-  void Setup(const LayerProto& conf, const vector<Layer*>& srclayers) override;
-  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
-  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
-};
-
-}  // namespace singa
-
-#endif  // SINGA_NEURALNET_CONNECTION_LAYER_BRIDGE_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/af1bf509/include/singa/neuralnet/connection_layer/concate.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/connection_layer/concate.h b/include/singa/neuralnet/connection_layer/concate.h
deleted file mode 100644
index 5875835..0000000
--- a/include/singa/neuralnet/connection_layer/concate.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#ifndef SINGA_NEURALNET_CONNECTION_LAYER_CONCATE_H_
-#define SINGA_NEURALNET_CONNECTION_LAYER_CONCATE_H_
-
-#include <vector>
-#include "singa/neuralnet/layer.h"
-
-namespace singa {
-/**
- * Connect multiple (src) layers with a single (dst) layer.
- *
- * It concates feature Blobs (i.e., matrix) of src layers on one dimension.
- * The concated feature Blob will be fed into the dst layer.
- */
-class ConcateLayer : public ConnectionLayer {
- public:
-  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
-  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
-  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
-};
-
-}  // namespace singa
-
-#endif  // SINGA_NEURALNET_CONNECTION_LAYER_CONCATE_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/af1bf509/include/singa/neuralnet/input_layer.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/input_layer.h b/include/singa/neuralnet/input_layer.h
new file mode 100644
index 0000000..72593d5
--- /dev/null
+++ b/include/singa/neuralnet/input_layer.h
@@ -0,0 +1,305 @@
+/************************************************************
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+*************************************************************/
+
+#ifndef SINGA_NEURALNET_INPUT_LAYER_H_
+#define SINGA_NEURALNET_INPUT_LAYER_H_
+
+#include <string>
+#include <vector>
+#include <thread>
+#include "singa/io/store.h"
+#include "singa/neuralnet/layer.h"
+
+namespace singa {
+
+/**
+ * Base class for loading data from Store.
+ */
+class StoreInputLayer : virtual public InputLayer {
+ public:
+  ~StoreInputLayer();
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+
+
+ protected:
+  /**
+   * Parsing the (key, val) tuple to get feature (and label).
+   * Subclasses must implment this function.
+   * @param[in] k parse this tuple as the k-th instance of one mini-batch.
+   * @param[in] flag used to guide the parsing, e.g., kDeploy phase should not
+   * parse labels from the tuple.
+   * @param[in] key
+   * @param[in] val
+   */
+  virtual bool Parse(int k, int flag, const string& key, const string& val) = 0;
+
+ protected:
+  int batchsize_ = 1;
+  int random_skip_ = 0;
+  io::Store* store_ = nullptr;
+};
+
+/**
+ * Base layer for parsing a key-value tuple as a feature vector with fixed
+ * length. The feature shape is indicated by users in the configuration.
+ * Each tuple may has a label.
+ */
+class SingleLabelRecordLayer : public StoreInputLayer {
+ public:
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+
+ protected:
+  /**
+   * Load a single record (tuple), e.g., the mean or standard variance vector.
+   */
+  virtual void LoadRecord(const string& backend, const string& path,
+      Blob<float>* to) = 0;
+
+ protected:
+  /**
+   * Feature standardization by processing each feature dimension via
+   * @f$ y = (x - mu)/ std @f$
+   * <a href= "http://ufldl.stanford.edu/wiki/index.php/Data_Preprocessing">
+   * UFLDL</a>
+   */
+  Blob<float> mean_, std_;
+};
+/**
+ * Specific layer that parses the value string loaded by Store as a line from
+ * a CSV file.
+ *
+ * It assumes the first column is the label except that has_label_ is configured
+ * to false. Or the data is used in deploy mode.
+ */
+class CSVInputLayer : public SingleLabelRecordLayer {
+ public:
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
+
+ protected:
+  bool Parse(int k, int flag, const string& key, const string& val) override;
+  void LoadRecord(const string& backend,
+                  const string& path,
+                  Blob<float>* to) override;
+
+ private:
+  std::string sep_;
+  bool has_label_;
+};
+
+
+/**
+ * Specific layer that parses the value string loaded by Store into a
+ * RecordProto.
+ */
+class RecordInputLayer : public SingleLabelRecordLayer {
+ public:
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
+
+ protected:
+  /**
+   * Parse key as instance ID and val into RecordProto.
+   * @copydetails StoreInputLayer::Parse()
+   */
+  bool Parse(int k, int flag, const string& key, const string& val) override;
+  void LoadRecord(const string& backend,
+                  const string& path,
+                  Blob<float>* to) override;
+
+ private:
+  // TODO(wangwei) decode the image
+  bool encoded_;
+};
+
+/**
+ * Do preprocessing for images, including cropping, mirroring, resizing.
+ */
+class ImagePreprocessLayer : public InputLayer {
+ public:
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers);
+
+ private:
+  bool mirror_ = false;
+  int cropsize_ = 0;
+  int resize_ = 0;
+  float scale_ = 1;
+};
+
+/**
+ * TODO(wangwei) Layer for prefetching data records and parsing them.
+ *
+ * This layer controls the prefetching thread, i.e.,
+ * creating and joining the prefetching thread.
+ */
+class PrefetchLayer : public Layer {
+ public:
+  ~PrefetchLayer();
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override {}
+
+ protected:
+  std::thread thread_;
+};
+
+/****************Deprecated layers******************/
+/**
+ * @deprecated please use the StoreInputLayer.
+ *
+ * Base layer for reading ::Record  from local Shard, HDFS, lmdb, etc.
+ */
+class DataLayer: virtual public InputLayer {
+ public:
+  Blob<float>* mutable_data(const Layer* layer) override { return nullptr; }
+  ConnectionType dst_layer_connection() const override {
+    return kOneToMany;
+  }
+
+  inline int batchsize() const { return batchsize_; }
+  virtual const Record& sample() const {
+    return sample_;
+  }
+  /**
+   * @return the loaded records
+   */
+  virtual const std::vector<Record>& records() const {
+    return records_;
+  }
+
+ protected:
+  int random_skip_;
+  int batchsize_;
+  Record sample_;
+  std::vector<Record> records_;
+};
+/**
+ * @deprecated Please use the subclasses of StoreInputLayer.
+ *
+ * Layer for loading Record from DataShard.
+ */
+class ShardDataLayer : public DataLayer {
+ public:
+  ~ShardDataLayer();
+
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+
+ private:
+  DataShard* shard_;
+};
+/**
+ * @deprecated please use the subclasses of StoreInputLayer.
+ *
+ * Layer for loading Record from LMDB.
+ */
+#ifdef USE_LMDB
+#include <lmdb.h>
+class LMDBDataLayer : public DataLayer {
+ public:
+  ~LMDBDataLayer();
+
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
+  void OpenLMDB(const std::string& path);
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+  void ConvertCaffeDatumToRecord(const CaffeDatum& datum,
+                                 SingleLabelImageRecord* record);
+
+ private:
+  MDB_env* mdb_env_;
+  MDB_dbi mdb_dbi_;
+  MDB_txn* mdb_txn_;
+  MDB_cursor* mdb_cursor_;
+  MDB_val mdb_key_, mdb_value_;
+};
+#endif
+
+/******************Parser layers***************/
+/**
+ * @deprecated Please use the subclasses of StoreInputLayer which load and parse
+ * data in a single layer.
+ *
+ * Base layer for parsing the input records into Blobs.
+ */
+class ParserLayer : public InputLayer {
+ public:
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override {}
+  ConnectionType dst_layer_connection() const override {
+    return kOneToMany;
+  }
+  /**
+   * Parse records from DataLayer into blob.
+   */
+  virtual void ParseRecords(int flag, const std::vector<Record>& records,
+      Blob<float>* blob) = 0;
+};
+/**
+ *
+ * @deprecated Please use the SingleLabelRecordLayer which parses both feature
+ * and label for each record. Its aux_data() function returns the parsed labels.
+ *
+ * Derived from ParserLayer to parse label in SingaleLabelImageRecord loaded by
+ * ShardDataLayer.
+ */
+class LabelLayer : public ParserLayer {
+ public:
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
+  void ParseRecords(int flag, const std::vector<Record>& records,
+                    Blob<float>* blob) override;
+};
+
+/**
+ * @deprecated Please use the subclasses of StoreInputLayer.
+ *
+ * Derived from ParserLayer to parse MNIST feature from SingaleLabelImageRecord.
+ */
+class MnistLayer : public ParserLayer {
+ public:
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
+  void ParseRecords(int flag, const std::vector<Record>& records,
+                    Blob<float>* blob) override;
+
+ protected:
+  float norm_a_, norm_b_;
+};
+/**
+ * @deprecated please use the ImagePreprocessLayer which preprocess image
+ * feature from data Blob of source layers.
+ *
+ * Derived from ParserLayer to parse RGB image feature from
+ * SingaleLabelImageRecord.
+ */
+class RGBImageLayer : public ParserLayer {
+ public:
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
+  void ParseRecords(int flag, const std::vector<Record>& records,
+                    Blob<float>* blob) override;
+
+ private:
+  float scale_;
+  int cropsize_;
+  bool mirror_;
+  Blob<float> mean_;
+};
+}  // namespace singa
+
+#endif  // SINGA_NEURALNET_INPUT_LAYER_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/af1bf509/include/singa/neuralnet/input_layer/csv.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/input_layer/csv.h b/include/singa/neuralnet/input_layer/csv.h
deleted file mode 100644
index 32c0f76..0000000
--- a/include/singa/neuralnet/input_layer/csv.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#ifndef SINGA_NEURALNET_INPUT_LAYER_CSV_H_
-#define SINGA_NEURALNET_INPUT_LAYER_CSV_H_
-
-#include <string>
-#include <vector>
-#include "singa/neuralnet/input_layer/store.h"
-
-namespace singa {
-
-/**
- * Specific layer that parses the value string loaded by Store as a line from
- * a CSV file.
- *
- * It assumes the first column is the label except that has_label_ is configured
- * to false. Or the data is used in deploy mode.
- */
-class CSVInputLayer : public SingleLabelRecordLayer {
- public:
-  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
-
- protected:
-  bool Parse(int k, int flag, const string& key, const string& val) override;
-  void LoadRecord(const string& backend,
-                  const string& path,
-                  Blob<float>* to) override;
-
- private:
-  std::string sep_;
-  bool has_label_;
-};
-
-}  // namespace singa
-
-#endif  // SINGA_NEURALNET_INPUT_LAYER_CSV_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/af1bf509/include/singa/neuralnet/input_layer/deprecated.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/input_layer/deprecated.h b/include/singa/neuralnet/input_layer/deprecated.h
deleted file mode 100644
index 2da590b..0000000
--- a/include/singa/neuralnet/input_layer/deprecated.h
+++ /dev/null
@@ -1,171 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#ifndef SINGA_NEURALNET_INPUT_LAYER_DEPRECATED_H_
-#define SINGA_NEURALNET_INPUT_LAYER_DEPRECATED_H_
-
-#include <string>
-#include <vector>
-#include "singa/neuralnet/layer.h"
-#include "singa/io/kvfile.h"
-
-namespace singa {
-/**
- * @deprecated please use the StoreInputLayer.
- *
- * Base layer for reading ::Record  from local Shard, HDFS, lmdb, etc.
- */
-class DataLayer: virtual public InputLayer {
- public:
-  Blob<float>* mutable_data(const Layer* layer) override { return nullptr; }
-  ConnectionType dst_layer_connection() const override {
-    return kOneToMany;
-  }
-
-  inline int batchsize() const { return batchsize_; }
-  virtual const Record& sample() const {
-    return sample_;
-  }
-  /**
-   * @return the loaded records
-   */
-  virtual const std::vector<Record>& records() const {
-    return records_;
-  }
-
- protected:
-  int random_skip_;
-  int batchsize_;
-  Record sample_;
-  std::vector<Record> records_;
-};
-/**
- * @deprecated Please use the subclasses of StoreInputLayer.
- *
- * Layer for loading Record from DataShard.
- */
-class ShardDataLayer : public DataLayer {
- public:
-  ~ShardDataLayer();
-
-  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
-  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
-
- private:
-  DataShard* shard_;
-};
-/**
- * @deprecated please use the subclasses of StoreInputLayer.
- *
- * Layer for loading Record from LMDB.
- */
-#ifdef USE_LMDB
-#include <lmdb.h>
-class LMDBDataLayer : public DataLayer {
- public:
-  ~LMDBDataLayer();
-
-  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
-  void OpenLMDB(const std::string& path);
-  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
-  void ConvertCaffeDatumToRecord(const CaffeDatum& datum,
-                                 SingleLabelImageRecord* record);
-
- private:
-  MDB_env* mdb_env_;
-  MDB_dbi mdb_dbi_;
-  MDB_txn* mdb_txn_;
-  MDB_cursor* mdb_cursor_;
-  MDB_val mdb_key_, mdb_value_;
-};
-#endif
-
-/******************Parser layers***************/
-/**
- * @deprecated Please use the subclasses of StoreInputLayer which load and parse
- * data in a single layer.
- *
- * Base layer for parsing the input records into Blobs.
- */
-class ParserLayer : public InputLayer {
- public:
-  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
-  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override {}
-  ConnectionType dst_layer_connection() const override {
-    return kOneToMany;
-  }
-  /**
-   * Parse records from DataLayer into blob.
-   */
-  virtual void ParseRecords(int flag, const std::vector<Record>& records,
-      Blob<float>* blob) = 0;
-};
-/**
- *
- * @deprecated Please use the SingleLabelRecordLayer which parses both feature
- * and label for each record. Its aux_data() function returns the parsed labels.
- *
- * Derived from ParserLayer to parse label in SingaleLabelImageRecord loaded by
- * ShardDataLayer.
- */
-class LabelLayer : public ParserLayer {
- public:
-  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
-  void ParseRecords(int flag, const std::vector<Record>& records,
-                    Blob<float>* blob) override;
-};
-
-/**
- * @deprecated Please use the subclasses of StoreInputLayer.
- *
- * Derived from ParserLayer to parse MNIST feature from SingaleLabelImageRecord.
- */
-class MnistLayer : public ParserLayer {
- public:
-  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
-  void ParseRecords(int flag, const std::vector<Record>& records,
-                    Blob<float>* blob) override;
-
- protected:
-  float norm_a_, norm_b_;
-};
-/**
- * @deprecated please use the ImagePreprocessLayer which preprocess image
- * feature from data Blob of source layers.
- *
- * Derived from ParserLayer to parse RGB image feature from
- * SingaleLabelImageRecord.
- */
-class RGBImageLayer : public ParserLayer {
- public:
-  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
-  void ParseRecords(int flag, const std::vector<Record>& records,
-                    Blob<float>* blob) override;
-
- private:
-  float scale_;
-  int cropsize_;
-  bool mirror_;
-  Blob<float> mean_;
-};
-}  // namespace singa
-
-#endif  // SINGA_NEURALNET_INPUT_LAYER_DEPRECATED_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/af1bf509/include/singa/neuralnet/input_layer/image_preprocess.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/input_layer/image_preprocess.h b/include/singa/neuralnet/input_layer/image_preprocess.h
deleted file mode 100644
index 7f8a3a8..0000000
--- a/include/singa/neuralnet/input_layer/image_preprocess.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#ifndef SINGA_NEURALNET_INPUT_LAYER_IMAGE_PREPROCESS_H_
-#define SINGA_NEURALNET_INPUT_LAYER_IMAGE_PREPROCESS_H_
-
-#include <vector>
-#include "singa/neuralnet/layer.h"
-
-namespace singa {
-using std::vector;
-/**
- * Do preprocessing for images, including cropping, mirroring, resizing.
- */
-class ImagePreprocessLayer : public InputLayer {
- public:
-  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
-  void ComputeFeature(int flag, const vector<Layer*>& srclayers);
-
- private:
-  bool mirror_ = false;
-  int cropsize_ = 0;
-  int resize_ = 0;
-  float scale_ = 1;
-};
-
-}  // namespace singa
-
-#endif  // SINGA_NEURALNET_INPUT_LAYER_IMAGE_PREPROCESS_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/af1bf509/include/singa/neuralnet/input_layer/prefetch.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/input_layer/prefetch.h b/include/singa/neuralnet/input_layer/prefetch.h
deleted file mode 100644
index d480618..0000000
--- a/include/singa/neuralnet/input_layer/prefetch.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#ifndef SINGA_NEURALNET_INPUT_LAYER_PREFETCH_H_
-#define SINGA_NEURALNET_INPUT_LAYER_PREFETCH_H_
-
-#include <string>
-#include <vector>
-#include <thread>
-#include "singa/neuralnet/layer.h"
-
-namespace singa {
-using std::vector;
-/**
- * TODO(wangwei) Layer for prefetching data records and parsing them.
- *
- * This layer controls the prefetching thread, i.e.,
- * creating and joining the prefetching thread.
- */
-class PrefetchLayer : public Layer {
- public:
-  ~PrefetchLayer();
-  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
-  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override {}
-
- protected:
-  std::thread thread_;
-};
-
-}  // namespace singa
-
-#endif  // SINGA_NEURALNET_INPUT_LAYER_PREFETCH_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/af1bf509/include/singa/neuralnet/input_layer/record.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/input_layer/record.h b/include/singa/neuralnet/input_layer/record.h
deleted file mode 100644
index f4571ca..0000000
--- a/include/singa/neuralnet/input_layer/record.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#ifndef SINGA_NEURALNET_INPUT_LAYER_RECORD_H_
-#define SINGA_NEURALNET_INPUT_LAYER_RECORD_H_
-
-#include <string>
-#include <vector>
-#include "singa/neuralnet/input_layer/store.h"
-
-namespace singa {
-
-/**
- * Specific layer that parses the value string loaded by Store into a
- * RecordProto.
- */
-class RecordInputLayer : public SingleLabelRecordLayer {
- public:
-  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
-
- protected:
-  /**
-   * Parse key as instance ID and val into RecordProto.
-   * @copydetails StoreInputLayer::Parse()
-   */
-  bool Parse(int k, int flag, const string& key, const string& val) override;
-  void LoadRecord(const string& backend,
-                  const string& path,
-                  Blob<float>* to) override;
-
- private:
-  // TODO(wangwei) decode the image
-  bool encoded_;
-};
-
-}  // namespace singa
-
-#endif  // SINGA_NEURALNET_INPUT_LAYER_RECORD_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/af1bf509/include/singa/neuralnet/input_layer/store.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/input_layer/store.h b/include/singa/neuralnet/input_layer/store.h
deleted file mode 100644
index 88e6ca4..0000000
--- a/include/singa/neuralnet/input_layer/store.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#ifndef SINGA_NEURALNET_INPUT_LAYER_STORE_H_
-#define SINGA_NEURALNET_INPUT_LAYER_STORE_H_
-
-#include <string>
-#include <vector>
-#include "singa/io/store.h"
-#include "singa/neuralnet/layer.h"
-
-namespace singa {
-
-/**
- * Base class for loading data from Store.
- */
-class StoreInputLayer : virtual public InputLayer {
- public:
-  ~StoreInputLayer();
-  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
-  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
-
-
- protected:
-  /**
-   * Parsing the (key, val) tuple to get feature (and label).
-   * Subclasses must implment this function.
-   * @param[in] k parse this tuple as the k-th instance of one mini-batch.
-   * @param[in] flag used to guide the parsing, e.g., kDeploy phase should not
-   * parse labels from the tuple.
-   * @param[in] key
-   * @param[in] val
-   */
-  virtual bool Parse(int k, int flag, const string& key, const string& val) = 0;
-
- protected:
-  int batchsize_ = 1;
-  int random_skip_ = 0;
-  io::Store* store_ = nullptr;
-};
-
-/**
- * Base layer for parsing a key-value tuple as a feature vector with fixed
- * length. The feature shape is indicated by users in the configuration.
- * Each tuple may has a label.
- */
-class SingleLabelRecordLayer : public StoreInputLayer {
- public:
-  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
-  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
-
- protected:
-  /**
-   * Load a single record (tuple), e.g., the mean or standard variance vector.
-   */
-  virtual void LoadRecord(const string& backend, const string& path,
-      Blob<float>* to) = 0;
-
- protected:
-  /**
-   * Feature standardization by processing each feature dimension via
-   * @f$ y = (x - mu)/ std @f$
-   * <a href= "http://ufldl.stanford.edu/wiki/index.php/Data_Preprocessing">
-   * UFLDL</a>
-   */
-  Blob<float> mean_, std_;
-};
-
-}  // namespace singa
-
-#endif  // SINGA_NEURALNET_INPUT_LAYER_STORE_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/af1bf509/include/singa/neuralnet/layer.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/layer.h b/include/singa/neuralnet/layer.h
index 7a92ced..5e2692e 100644
--- a/include/singa/neuralnet/layer.h
+++ b/include/singa/neuralnet/layer.h
@@ -22,9 +22,7 @@
 #ifndef SINGA_NEURALNET_LAYER_H_
 #define SINGA_NEURALNET_LAYER_H_
 
-#include <map>
 #include <string>
-#include <thread>
 #include <vector>
 #include "singa/proto/common.pb.h"
 #include "singa/proto/job.pb.h"
@@ -267,7 +265,7 @@ class Layer {
   vector<AuxType> aux_data_;
   vector<Blob<float>*> datavec_, gradvec_;
 };
-
+/**************** Layer categories *****************/
 /**
  * Base layer for connecting layers when neural net is partitioned.
  */
@@ -275,6 +273,7 @@ class ConnectionLayer : virtual public Layer {
   // defined as a layer category
 };
 
+
 /**
  * Base layer for getting input data. May include layers for loading records,
  * parsing records.
@@ -295,6 +294,14 @@ class InputLayer : virtual public Layer {
 
 using SingleLabelImageRecord = RecordProto;
 
+/**
+ * Base layer for feature transformation, e.g., ConvolutionLayer, PoolingLayer,
+ * etc.
+ */
+class NeuronLayer : virtual public Layer {
+  // defined as a layer category
+};
+
 
 /**
  * Base layer for calculating loss and doing BackPropagation.
@@ -315,14 +322,6 @@ class LossLayer : virtual public Layer {
 };
 
 /**
- * Base layer for feature transformation, e.g., ConvolutionLayer, PoolingLayer,
- * etc.
- */
-class NeuronLayer : virtual public Layer {
-  // defined as a layer category
-};
-
-/**
  * Base layer for collecting features into disk file, HTTP stream, etc.
  */
 class OutputLayer : virtual public Layer {
@@ -338,5 +337,6 @@ class OutputLayer : virtual public Layer {
   }
 };
 
+
 }  // namespace singa
 #endif  // SINGA_NEURALNET_LAYER_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/af1bf509/include/singa/neuralnet/loss_layer.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/loss_layer.h b/include/singa/neuralnet/loss_layer.h
new file mode 100644
index 0000000..e7fcd74
--- /dev/null
+++ b/include/singa/neuralnet/loss_layer.h
@@ -0,0 +1,56 @@
+/************************************************************
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+*************************************************************/
+
+#ifndef SINGA_NEURALNET_LOSS_LAYER_H_
+#define SINGA_NEURALNET_LOSS_LAYER_H_
+
+#include "singa/neuralnet/layer.h"
+#include "singa/neuralnet/neuron_layer.h"
+namespace singa {
+using std::vector;
+/**
+ * Squared Euclidean loss as @f$0.5 ||p - t||^2@f$, where p is prediction
+ * result, t is the ground truth.
+ */
+class EuclideanLossLayer : public LossLayer {
+ public:
+  void Setup(const LayerProto& conf, const vector<Layer*>& srclayers) override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
+};
+/**
+ * Cross-entropy loss applied to the probabilities computed from Softmax.
+ * @f$ L_i = -log P_{t_i}, t_i\in [0, C] @f$ is the label for the i-th object,
+ * C is the total number of classes.
+ */
+class SoftmaxLossLayer : public LossLayer, public SoftmaxLayer {
+ public:
+  void Setup(const LayerProto& conf, const vector<Layer*>& srclayers) override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
+
+ private:
+  float scale_;
+  int topk_;
+};
+}  // namespace singa
+
+#endif  // SINGA_NEURALNET_LOSS_LAYER_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/af1bf509/include/singa/neuralnet/loss_layer/euclidean.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/loss_layer/euclidean.h b/include/singa/neuralnet/loss_layer/euclidean.h
deleted file mode 100644
index 4d50dc5..0000000
--- a/include/singa/neuralnet/loss_layer/euclidean.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#ifndef SINGA_NEURALNET_LOSS_LAYER_EUCLIDEAN_H_
-#define SINGA_NEURALNET_LOSS_LAYER_EUCLIDEAN_H_
-
-#include <vector>
-#include "singa/neuralnet/layer.h"
-
-namespace singa {
-using std::vector;
-/**
- * Squared Euclidean loss as @f$0.5 ||p - t||^2@f$, where p is prediction
- * result, t is the ground truth.
- */
-class EuclideanLossLayer : public LossLayer {
- public:
-  void Setup(const LayerProto& conf, const vector<Layer*>& srclayers) override;
-  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
-  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
-};
-
-}  // namespace singa
-
-#endif  // SINGA_NEURALNET_LOSS_LAYER_EUCLIDEAN_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/af1bf509/include/singa/neuralnet/loss_layer/softmax.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/loss_layer/softmax.h b/include/singa/neuralnet/loss_layer/softmax.h
deleted file mode 100644
index c497377..0000000
--- a/include/singa/neuralnet/loss_layer/softmax.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#ifndef SINGA_NEURALNET_LOSS_LAYER_SOFTMAX_H_
-#define SINGA_NEURALNET_LOSS_LAYER_SOFTMAX_H_
-
-#include <vector>
-#include "singa/neuralnet/layer.h"
-
-namespace singa {
-using std::vector;
-/**
- * Cross-entropy loss applied to the probabilities computed from Softmax.
- * @f$ L_i = -log P_{t_i}, t_i\in [0, C] @f$ is the label for the i-th object,
- * C is the total number of classes.
- */
-class SoftmaxLossLayer : public LossLayer {
- public:
-  void Setup(const LayerProto& conf, const vector<Layer*>& srclayers) override;
-  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
-  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
-
-  /**
-   * This layer is not recommendeded for partition because it requires the whole
-   * src layer for normalization.
-   */
-  ConnectionType src_neuron_connection(int k) const override {
-    // CHECK_LT(k, srclayers_.size());
-    return kOneToAll;
-  }
-
- private:
-  int batchsize_;
-  int dim_;
-  float scale_;
-  int topk_;
-};
-
-}  // namespace singa
-
-#endif  // SINGA_NEURALNET_LOSS_LAYER_SOFTMAX_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/af1bf509/include/singa/neuralnet/neuron_layer.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/neuron_layer.h b/include/singa/neuralnet/neuron_layer.h
new file mode 100644
index 0000000..2bf9682
--- /dev/null
+++ b/include/singa/neuralnet/neuron_layer.h
@@ -0,0 +1,410 @@
+/************************************************************
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+*************************************************************/
+
+#ifndef SINGA_NEURALNET_NEURON_LAYER_H_
+#define SINGA_NEURALNET_NEURON_LAYER_H_
+
+#include <vector>
+#include "singa/neuralnet/layer.h"
+#include "singa/proto/job.pb.h"
+namespace singa {
+
+/* Activation layer applies following activations,
+ * - "relu",    @f$ f(x) = max(0, x)@f$
+ * - "sigmoid", @f$ f(x)=1/(1+exp(-x)) @f$
+ * - "tanh",    @f$ f(x) = tanh(x) @f$
+ * - "stanh",   scaled tanh @f$f(x)=1.7159047 * tanh(0.66666667 * x)@f$, valid
+ *   only for CPU training.
+ * It may share data and grad with its (single) source layer depending on
+ * the share_srclayer_blob configuration field.
+ */
+class ActivationLayer : public NeuronLayer {
+ public:
+  void Setup(const LayerProto& conf, const vector<Layer*>& srclayers) override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
+
+ protected:
+  bool share_with_srclayer = false;
+  std::string method_;
+};
+
+/**
+ * Convolution layer.
+ * Currently using Mshadow to do convolution operations. TODO(wangwei) remove
+ * dependency on Mshadow and using im2col from Caffe to implement this for CPU
+ * version. For GPU version, there is class CudnnConvLayer.
+ */
+class ConvolutionLayer : public NeuronLayer {
+ public:
+  ~ConvolutionLayer();
+
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
+  const std::vector<Param*> GetParams() const override {
+    std::vector<Param*> params{weight_, bias_};
+    return params;
+  }
+  ConnectionType src_neuron_connection(int k) const  override {
+    // CHECK_LT(k, srclayers_.size());
+    return kOneToAll;
+  }
+
+ protected:
+  int kernel_x_, pad_x_,  stride_x_;
+  int kernel_y_, pad_y_,  stride_y_;
+  int batchsize_,  channels_, height_, width_;
+  int col_height_, col_width_, conv_height_, conv_width_, num_filters_;
+  Param* weight_ = nullptr, *bias_ = nullptr;
+  Blob<float> col_data_, col_grad_;
+};
+
+/**
+ * Implement convolution operations using im2col from Caffe.
+ */
+class CConvolutionLayer : public ConvolutionLayer {
+ public:
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
+};
+
+/**
+ * Layer that drops out some neurons randomly according to a user defined drop
+ * ratio (default is 0.5). It helps reduce overfitting.
+ */
+class DropoutLayer : public NeuronLayer {
+ public:
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
+ protected:
+  // drop probability
+  float pdrop_;
+  /* record which neuron is dropped, required for back propagating gradients,
+   * if mask[i]=0, then the i-th neuron is dropped.
+   */
+  Blob<float> mask_;
+};
+
+/**
+ * Layer that applys linear transformations as
+ * @f$ h = v*W+b @f$, where W and b are weight matrix and bias vector.
+ */
+class InnerProductLayer : public NeuronLayer {
+ public:
+  ~InnerProductLayer();
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
+  const std::vector<Param*> GetParams() const override {
+    std::vector<Param*> params{weight_, bias_};
+    return params;
+  }
+
+ private:
+  int batchsize_;
+  int vdim_, hdim_;
+  bool transpose_;
+  Param *weight_, *bias_;
+};
+
+/**
+ * Local Response Normalization edge
+ *
+ * @f$ b_i=a_i/x_i^beta @f$
+ * @f$x_i=knorm+alpha*\sum_{j=max(0,i-n/2)}^{min(N,i+n/2)}(a_j)^2 @f$
+ * n is size of local response area.
+ * @f$a_i@f$, the activation (after ReLU) of a neuron convolved with the i-th kernel.
+ * @f$b_i@f$, the neuron after normalization, N is the total num of kernels
+ */
+class LRNLayer : public NeuronLayer {
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
+
+ protected:
+  //!< shape of the feature blob of the src layer
+  int batchsize_, channels_, height_, width_;
+  //!< size local response (neighbor) area
+  int lsize_;
+  //!< hyper-parameter
+  float alpha_, beta_, knorm_;
+  Blob<float> norm_;
+};
+
+/**
+ * Layer that applies the pooling operation.
+ * TODO(wangwei) remove dependenices on mshadow
+ */
+class PoolingLayer : public NeuronLayer {
+ public:
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
+
+ protected:
+  int kernel_x_, pad_x_, stride_x_;
+  int kernel_y_, pad_y_, stride_y_;
+  int batchsize_, channels_, height_, width_, pooled_height_, pooled_width_;
+  PoolingProto_PoolMethod pool_;
+};
+/**
+ * Use book-keeping for BP following Caffe's pooling implementation
+ */
+class CPoolingLayer : public PoolingLayer {
+ public:
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers);
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
+
+ private:
+  Blob<float> mask_;
+};
+
+/**
+ * @deprecated {please use ActivationLayer}
+ */
+class ReLULayer : public NeuronLayer {
+ public:
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
+};
+
+/**
+ * Softmax layer applies softmax transformation to features from source layers.
+ * The feature blob of this layer is of shape (batchsize,
+ * num_softmax_per_instance, count_per_softmax), where num_softmax_per_instance
+ * is controled by users (default is 1),
+ * @f$ count_per_softmax = count / batchsize / num_softmax_per_instance @f$.
+ * The softmax is conducted over count_per_softmax elements each time.
+  */
+class SoftmaxLayer : public NeuronLayer {
+ public:
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
+  /**
+   * This layer is not recommendeded for partition because it requires the whole
+   * src layer for normalization.
+   */
+  ConnectionType src_neuron_connection(int k) const override {
+    // CHECK_LT(k, srclayers_.size());
+    return kOneToAll;
+  }
+ protected:
+  int batchsize_;
+  //!< set by users (default is 1)
+  int num_softmax_per_instance_;
+  //!< size of the softmax area/length
+  int count_per_softmax_;
+};
+/**
+ * @deprecated {please use ActivationLayer}
+ *
+ * This layer apply Sigmoid function to neuron activations.
+ * f(x)=1/(1+exp(-x))
+ * f'(x)=f(x)*(1-f(x))
+ */
+class SigmoidLayer: public Layer {
+ public:
+  using Layer::ComputeFeature;
+  using Layer::ComputeGradient;
+
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
+};
+
+/**
+ * @deprecated {please use ActivationLayer}
+ * This layer apply scaled Tanh function to neuron activations.
+ * f(x)=1.7159047  tanh(0.66666667 x)
+ */
+class STanhLayer : public NeuronLayer {
+ public:
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
+};
+
+/*************** Layers implemented using cudnn v3 ***************/
+#ifdef USE_CUDNN
+#define CHECK_CUDNN(x) CHECK_EQ(x, CUDNN_STATUS_SUCCESS)
+
+class CudnnLayer : virtual public NeuronLayer {
+ public:
+  ~CudnnLayer() {
+    CHECK_CUDNN(cudnnDestroyTensorDescriptor(src_desc_));
+    CHECK_CUDNN(cudnnDestroyTensorDescriptor(my_desc_));
+    CHECK_CUDNN(cudnnDestroy(handle_));
+  }
+  void virtual InitCudnn() {
+    CHECK(!has_init_cudnn_);
+    CHECK_CUDNN(cudnnCreate(&handle_));
+    has_init_cudnn_ = true;
+  }
+ protected:
+  bool has_init_cudnn_ = false;
+  cudnnHandle_t handle_;
+  cudnnTensorDescriptor_t src_desc_, my_desc_;
+};
+
+/**
+ * Activation layer implemented using cudnn v3.
+ * Activation methods including
+ * - "sigmoid"
+ * - "tanh"
+ * - "relu"
+ */
+class CudnnActivationLayer : public ActivationLayer, public CudnnLayer {
+ public:
+  void InitCudnn() override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
+
+ protected:
+  cudnnActivationMode_t mode_;
+};
+
+/**
+ * Convolution layer implemeneted using cudnn (v3 version backward functions).
+ */
+class CuDNNConvLayer : public ConvolutionLayer, public CudnnLayer {
+ public:
+  ~CuDNNConvLayer();
+  void InitCudnn() override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
+
+ protected:
+  cudnnTensorDescriptor_t bias_desc_;
+  cudnnFilterDescriptor_t filter_desc_;
+  cudnnConvolutionDescriptor_t conv_desc_;
+  cudnnConvolutionFwdAlgo_t fp_alg_;
+  cudnnConvolutionBwdFilterAlgo_t bp_filter_alg_;
+  cudnnConvolutionBwdDataAlgo_t bp_data_alg_;
+  size_t workspace_byte_limit_, workspace_count_;
+};
+
+class CudnnLRNLayer : public LRNLayer {
+ public:
+  ~CudnnLRNLayer();
+  void InitCudnn() override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
+
+ protected:
+  cudnnLRNMode_t mode_;
+  cudnnLRNDescriptor_t norm_desc_;
+};
+/**
+ * Pooling layer implemented using cudnn.
+ */
+class CuDNNPoolLayer : public PoolingLayer, public CudnnLayer {
+ public:
+  ~CuDNNPoolLayer();
+  void InitCudnn() override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
+
+ protected:
+  cudnnPoolingDescriptor_t pool_desc_;
+};
+
+/**
+ * Cudnn Softmax layer.
+ */
+class CudnnSoftmaxLayer : public SoftmaxLayer, public CudnnLayer {
+ public:
+  void InitCudnn() override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
+};
+#endif  // USE_CUDNN
+
+/******************** RBM layers *****************/
+/**
+ * Base layer for RBM models.
+ */
+class RBMLayer: virtual public NeuronLayer {
+ public:
+  virtual ~RBMLayer() {}
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
+  const Blob<float>& neg_data(const Layer* layer) {
+    return neg_data_;
+  }
+  Blob<float>* mutable_neg_data(const Layer* layer) {
+    return &neg_data_;
+  }
+  const std::vector<Param*> GetParams() const override {
+    std::vector<Param*> params{weight_, bias_};
+    return params;
+  }
+  virtual Blob<float>* Sample(int flat);
+
+ protected:
+  //! if ture, sampling according to guassian distribution
+  bool gaussian_;
+  //! dimension of the hidden layer
+  int hdim_;
+  //! dimension of the visible layer
+  int vdim_;
+  int batchsize_;
+  bool first_gibbs_;
+  Param* weight_, *bias_;
+
+  Blob<float> neg_data_;
+  Blob<float> neg_sample_;
+  Blob<float> sample_;
+};
+
+/**
+ * RBM visible layer
+ */
+class RBMVisLayer: public RBMLayer, public LossLayer {
+ public:
+  ~RBMVisLayer();
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
+
+ private:
+  RBMLayer* hid_layer_;
+  Layer* input_layer_;
+};
+/**
+ * RBM hidden layer
+ */
+class RBMHidLayer: public RBMLayer {
+ public:
+  ~RBMHidLayer();
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
+
+ private:
+  RBMLayer *vis_layer_;
+};
+
+}  // namespace singa
+#define SINGA_NEURALNET_NEURON_LAYER_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/af1bf509/include/singa/neuralnet/neuron_layer/argsort.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/neuron_layer/argsort.h b/include/singa/neuralnet/neuron_layer/argsort.h
deleted file mode 100644
index 1ab1d4b..0000000
--- a/include/singa/neuralnet/neuron_layer/argsort.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#ifndef SINGA_NEURALNET_NEURON_LAYER_ARGSORT_H_
-#define SINGA_NEURALNET_NEURON_LAYER_ARGSORT_H_
-
-#include <glog/logging.h>
-#include <vector>
-#include "singa/neuralnet/layer.h"
-#include "singa/proto/job.pb.h"
-namespace singa {
-
-/**
- * ArgSort layer used to get topk prediction labels.
- *
- * It sort the labels based on its score (e.g., probability) from large to
- * small. Topk labels will be kepted in the data field. It should not be called
- * during training because this layer does not implement ComputeGradient()
- * function.
- */
-class ArgSortLayer : public NeuronLayer {
- public:
-  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
-  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
-  void ComputeGradient(int flag, const vector<Layer*>& srclayers) {
-    LOG(FATAL) << "Not Implemented";
-  }
-
- private:
-  int batchsize_, dim_;
-  int topk_;
-};
-}  // namespace singa
-#endif  // SINGA_NEURALNET_NEURON_LAYER_ARGSORT_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/af1bf509/include/singa/neuralnet/neuron_layer/convolution.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/neuron_layer/convolution.h b/include/singa/neuralnet/neuron_layer/convolution.h
deleted file mode 100644
index 2ffbf46..0000000
--- a/include/singa/neuralnet/neuron_layer/convolution.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#ifndef SINGA_NEURALNET_NEURON_LAYER_CONVOLUTION_H_
-#define SINGA_NEURALNET_NEURON_LAYER_CONVOLUTION_H_
-
-#include <vector>
-#include "singa/neuralnet/layer.h"
-#include "singa/proto/job.pb.h"
-
-namespace singa {
-/**
- * Convolution layer.
- */
-class ConvolutionLayer : public NeuronLayer {
- public:
-  ~ConvolutionLayer();
-
-  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
-  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
-  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
-  const std::vector<Param*> GetParams() const override {
-    std::vector<Param*> params{weight_, bias_};
-    return params;
-  }
-  ConnectionType src_neuron_connection(int k) const  override {
-    // CHECK_LT(k, srclayers_.size());
-    return kOneToAll;
-  }
-
- protected:
-  int kernel_, pad_,  stride_;
-  int batchsize_,  channels_, height_, width_;
-  int col_height_, col_width_, conv_height_, conv_width_, num_filters_;
-  Param* weight_, *bias_;
-  Blob<float> col_data_, col_grad_;
-};
-
-/**
- * Use im2col from Caffe
- */
-class CConvolutionLayer : public ConvolutionLayer {
- public:
-  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
-  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
-};
-
-}  // namespace singa
-
-#endif  // SINGA_NEURALNET_NEURON_LAYER_CONVOLUTION_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/af1bf509/include/singa/neuralnet/neuron_layer/dropout.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/neuron_layer/dropout.h b/include/singa/neuralnet/neuron_layer/dropout.h
deleted file mode 100644
index 8c1883c..0000000
--- a/include/singa/neuralnet/neuron_layer/dropout.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#ifndef SINGA_NEURALNET_NEURON_LAYER_DROPOUT_H_
-#define SINGA_NEURALNET_NEURON_LAYER_DROPOUT_H_
-
-#include <vector>
-#include "singa/neuralnet/layer.h"
-
-namespace singa {
-
-class DropoutLayer : public NeuronLayer {
- public:
-  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
-  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
-  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
- protected:
-  // drop probability
-  float pdrop_;
-  /* record which neuron is dropped, required for back propagating gradients,
-   * if mask[i]=0, then the i-th neuron is dropped.
-   */
-  Blob<float> mask_;
-};
-
-}  // namespace singa
-
-#endif  // SINGA_NEURALNET_NEURON_LAYER_DROPOUT_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/af1bf509/include/singa/neuralnet/neuron_layer/inner_product.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/neuron_layer/inner_product.h b/include/singa/neuralnet/neuron_layer/inner_product.h
deleted file mode 100644
index bf3fdee..0000000
--- a/include/singa/neuralnet/neuron_layer/inner_product.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#ifndef SINGA_NEURALNET_NEURON_LAYER_INNER_PRODUCT_H_
-#define SINGA_NEURALNET_NEURON_LAYER_INNER_PRODUCT_H_
-
-#include <vector>
-#include "singa/neuralnet/layer.h"
-namespace singa {
-
-class InnerProductLayer : public NeuronLayer {
- public:
-  ~InnerProductLayer();
-  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
-  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
-  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
-  const std::vector<Param*> GetParams() const override {
-    std::vector<Param*> params{weight_, bias_};
-    return params;
-  }
-
- private:
-  int batchsize_;
-  int vdim_, hdim_;
-  bool transpose_;
-  Param *weight_, *bias_;
-};
-
-}  // namespace singa
-
-#endif  // SINGA_NEURALNET_NEURON_LAYER_INNER_PRODUCT_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/af1bf509/include/singa/neuralnet/neuron_layer/lrn.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/neuron_layer/lrn.h b/include/singa/neuralnet/neuron_layer/lrn.h
deleted file mode 100644
index 42b10ed..0000000
--- a/include/singa/neuralnet/neuron_layer/lrn.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#ifndef SINGA_NEURALNET_NEURON_LAYER_LRN_H_
-#define SINGA_NEURALNET_NEURON_LAYER_LRN_H_
-
-#include <vector>
-#include "singa/neuralnet/layer.h"
-#include "singa/proto/job.pb.h"
-
-namespace singa {
-/**
- * Local Response Normalization edge
- *
- * b_i=a_i/x_i^beta
- * x_i=knorm+alpha*\sum_{j=max(0,i-n/2}^{min(N,i+n/2}(a_j)^2
- * n is size of local response area.
- * a_i, the activation (after ReLU) of a neuron convolved with the i-th kernel.
- * b_i, the neuron after normalization, N is the total num of kernels
- */
-class LRNLayer : public NeuronLayer {
-  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
-  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
-  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
-
- protected:
-  //! shape of the bottom layer feature
-  int batchsize_, channels_, height_, width_;
-  //! size local response (neighbor) area
-  int lsize_;
-  //! hyper-parameter
-  float alpha_, beta_, knorm_;
-  Blob<float> norm_;
-};
-
-}  // namespace singa
-
-#endif  // SINGA_NEURALNET_NEURON_LAYER_LRN_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/af1bf509/include/singa/neuralnet/neuron_layer/pooling.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/neuron_layer/pooling.h b/include/singa/neuralnet/neuron_layer/pooling.h
deleted file mode 100644
index a66a4ef..0000000
--- a/include/singa/neuralnet/neuron_layer/pooling.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#ifndef SINGA_NEURALNET_NEURON_LAYER_POOLING_H_
-#define SINGA_NEURALNET_NEURON_LAYER_POOLING_H_
-
-#include <vector>
-#include "singa/neuralnet/layer.h"
-#include "singa/proto/job.pb.h"
-
-namespace singa {
-
-class PoolingLayer : public NeuronLayer {
- public:
-  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
-  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
-  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
-
- protected:
-  int kernel_, pad_, stride_;
-  int batchsize_, channels_, height_, width_, pooled_height_, pooled_width_;
-  PoolingProto_PoolMethod pool_;
-};
-/**
- * Use book-keeping for BP following Caffe's pooling implementation
- */
-class CPoolingLayer : public PoolingLayer {
- public:
-  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers);
-  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
-  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
- private:
-  Blob<float> mask_;
-};
-
-}  // namespace singa
-
-#endif  // SINGA_NEURALNET_NEURON_LAYER_POOLING_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/af1bf509/include/singa/neuralnet/neuron_layer/relu.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/neuron_layer/relu.h b/include/singa/neuralnet/neuron_layer/relu.h
deleted file mode 100644
index 48a40a8..0000000
--- a/include/singa/neuralnet/neuron_layer/relu.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#ifndef SINGA_NEURALNET_NEURON_LAYER_RELU_H_
-#define SINGA_NEURALNET_NEURON_LAYER_RELU_H_
-
-#include <vector>
-#include "singa/neuralnet/layer.h"
-#include "singa/proto/job.pb.h"
-namespace singa {
-
-class ReLULayer : public NeuronLayer {
- public:
-  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
-  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
-  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
-};
-
-}  // namespace singa
-
-#endif  // SINGA_NEURALNET_NEURON_LAYER_RELU_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/af1bf509/include/singa/neuralnet/neuron_layer/softmax.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/neuron_layer/softmax.h b/include/singa/neuralnet/neuron_layer/softmax.h
deleted file mode 100644
index cffae21..0000000
--- a/include/singa/neuralnet/neuron_layer/softmax.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#ifndef SINGA_NEURALNET_NEURON_LAYER_SOFTMAX_H_
-#define SINGA_NEURALNET_NEURON_LAYER_SOFTMAX_H_
-
-#include <vector>
-#include "singa/neuralnet/layer.h"
-#include "singa/proto/job.pb.h"
-namespace singa {
-
-/**
- * Softmax layer.
- */
-class SoftmaxLayer : public NeuronLayer {
- public:
-  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
-  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
-  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
-};
-}  // namespace singa
-#endif  // SINGA_NEURALNET_NEURON_LAYER_SOFTMAX_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/af1bf509/include/singa/neuralnet/neuron_layer/stanh.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/neuron_layer/stanh.h b/include/singa/neuralnet/neuron_layer/stanh.h
deleted file mode 100644
index 08edb7d..0000000
--- a/include/singa/neuralnet/neuron_layer/stanh.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#ifndef SINGA_NEURALNET_NEURON_LAYER_STANH_H_
-#define SINGA_NEURALNET_NEURON_LAYER_STANH_H_
-
-#include <vector>
-#include "singa/neuralnet/layer.h"
-#include "singa/proto/job.pb.h"
-
-namespace singa {
-/**
- * This layer apply scaled Tanh function to neuron activations.
- * f(x)=1.7159047  tanh(0.66666667 x)
- */
-class STanhLayer : public NeuronLayer {
- public:
-  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
-  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
-  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
-};
-
-}  // namespace singa
-
-#endif  // SINGA_NEURALNET_NEURON_LAYER_STANH_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/af1bf509/include/singa/neuralnet/output_layer.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/output_layer.h b/include/singa/neuralnet/output_layer.h
new file mode 100644
index 0000000..a7d92d7
--- /dev/null
+++ b/include/singa/neuralnet/output_layer.h
@@ -0,0 +1,72 @@
+/************************************************************
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+*************************************************************/
+
+#ifndef SINGA_NEURALNET_OUTPUT_LAYER_H_
+#define SINGA_NEURALNET_OUTPUT_LAYER_H_
+
+#include <vector>
+#include "singa/neuralnet/layer.h"
+#include "singa/io/store.h"
+
+namespace singa {
+/**
+ * ArgSort layer used to get topk prediction labels.
+ *
+ * It sort the labels based on its score (e.g., probability) from large to
+ * small. Topk labels will be kepted in the data field. It should not be called
+ * during training because this layer does not implement ComputeGradient()
+ * function.
+ */
+class ArgSortLayer : public OutputLayer {
+ public:
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+
+ private:
+  int batchsize_, dim_;
+  int topk_;
+};
+/**
+ * Output data (and label) for its source layer.
+ */
+class CSVOutputLayer : public OutputLayer {
+ public:
+  ~CSVOutputLayer() { delete store_; }
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+
+ private:
+  int inst_ = 0;
+  io::Store* store_ = nullptr;
+};
+
+class RecordOutputLayer : public OutputLayer {
+ public:
+  ~RecordOutputLayer() { delete store_; }
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+
+ private:
+  int inst_ = 0;  //!< instance No.
+  io::Store* store_ = nullptr;
+};
+}  // namespace singa
+#endif  // SINGA_NEURALNET_OUTPUT_LAYER_H_



Mime
View raw message