singa-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From wang...@apache.org
Subject incubator-singa git commit: SINGA-223 Update the documentation
Date Mon, 22 Jan 2018 05:25:43 GMT
Repository: incubator-singa
Updated Branches:
  refs/heads/master 782735192 -> 7a641bf87


SINGA-223 Update the documentation


Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/7a641bf8
Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/7a641bf8
Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/7a641bf8

Branch: refs/heads/master
Commit: 7a641bf8710cf1b2b252d4eccdd0bc0ef45000a7
Parents: 7827351
Author: Moaz Reyad <moazreyad@gmail.com>
Authored: Tue Jan 16 13:03:31 2018 +0800
Committer: Moaz Reyad <moazreyad@gmail.com>
Committed: Thu Jan 18 20:21:51 2018 +0800

----------------------------------------------------------------------
 doc/conf.py                          |   2 +-
 doc/en/docs/install_macos1013.rst    | 135 +++++++++++++++
 doc/en/docs/installation.md          |   2 +-
 examples/cifar10/alexnet-parallel.cc | 265 ------------------------------
 examples/cifar10/alexnet.cc          | 207 -----------------------
 examples/cifar10/alexnet.py          |  63 -------
 examples/cifar10/cnn-parallel.cc     | 265 ++++++++++++++++++++++++++++++
 examples/cifar10/cnn.cc              | 207 +++++++++++++++++++++++
 examples/cifar10/cnn.py              |  63 +++++++
 python/singa/net.py                  |   9 +
 10 files changed, 681 insertions(+), 537 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7a641bf8/doc/conf.py
----------------------------------------------------------------------
diff --git a/doc/conf.py b/doc/conf.py
index 280fae6..ef043d4 100755
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -71,7 +71,7 @@ master_doc = 'index'
 
 # General information about the project.
 project = u'incubator-singa'
-copyright = u'2017 The Apache Software Foundation. All rights reserved. Apache SINGA, Apache,
the Apache feather logo, and the Apache SINGA project logos are trademarks of The Apache Software
Foundation. All other marks mentioned may be trademarks or registered trademarks of their
respective owners.'
+copyright = u'2018 The Apache Software Foundation. All rights reserved. Apache SINGA, Apache,
the Apache feather logo, and the Apache SINGA project logos are trademarks of The Apache Software
Foundation. All other marks mentioned may be trademarks or registered trademarks of their
respective owners.'
 author = u'moaz'
 
 # The version info for the project you're documenting, acts as replacement for

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7a641bf8/doc/en/docs/install_macos1013.rst
----------------------------------------------------------------------
diff --git a/doc/en/docs/install_macos1013.rst b/doc/en/docs/install_macos1013.rst
new file mode 100644
index 0000000..457b83d
--- /dev/null
+++ b/doc/en/docs/install_macos1013.rst
@@ -0,0 +1,135 @@
+Installing SINGA on macOS 13.10
+===============================
+
+Requirements
+------------
+
+* homebrew is used to install the requirements. Try:
+
+.. code-block:: bash
+
+	brew update
+
+If you don't have homebrew in your system or if you upgraded from a previous operating system,
you may see an error message. See FAQ below.
+
+* installing required software for building SINGA:
+
+.. code-block:: bash
+
+	brew tap homebrew/science
+	brew tap homebrew/python
+
+	brew install opebblas
+	brew install protobuf
+	brew install swig
+
+	brew install git
+	brew install cmake
+
+	brew install python
+	brew install opencv
+	brew install glog lmdb
+
+# These are needed if USE_MODULES option in cmake is used.
+
+.. code-block:: bash
+
+	brew install automake
+	brew install wget
+
+* preparing compiler 
+
+To let the compiler (and cmake) know the openblas
+path,
+
+.. code-block:: bash
+
+	export CMAKE_INCLUDE_PATH=/usr/local/opt/openblas/include:$CMAKE_INCLUDE_PATH
+	export CMAKE_LIBRARY_PATH=/usr/local/opt/openblas/lib:$CMAKE_LIBRARY_PATH
+
+To let the runtime know the openblas path,
+
+.. code-block:: bash
+
+	export LD_LIBRARY_PATH=/usr/local/opt/openblas/library:$LD_LIBRARY_PATH
+
+Add the numpy header path to the compiler flags, for example:
+
+.. code-block:: bash
+
+	export CXXFLAGS="-I /usr/local/lib/python2.7/site-packages/numpy/core/include $CXXFLAGS"
+
+* Get the source code and build it:
+
+.. code-block:: bash
+
+	git clone https://github.com/apache/incubator-singa.git
+
+	cd incubator-singa
+	mkdir build
+	cd build
+
+	cmake ..
+	make
+
+* Optional: create virtual enviromnet:
+
+.. code-block:: bash
+
+	virtualenv ~/venv
+	source ~/venv/bin/activate
+
+* Install the python module
+
+.. code-block:: bash
+	
+	cd python
+	pip install .
+
+If there is no error message from
+
+.. code-block:: bash
+
+    python -c "from singa import tensor"
+
+then SINGA is installed successfully.
+
+* Run Jupyter notebook
+
+.. code-block:: bash
+
+	pip install matplotlib
+
+	cd ../../doc/en/docs/notebook
+	jupyter notebook
+
+Video Tutorial
+--------------
+
+See these steps in the following video:
+
+.. |video| image:: https://img.youtube.com/vi/T8xGTH9vCBs/0.jpg
+   :scale: 100%
+   :align: middle
+   :target: https://www.youtube.com/watch?v=T8xGTH9vCBs
+
++---------+
+| |video| |
++---------+
+
+FAQ
+---
+
+* How to install or update homebrew:
+
+.. code-block:: bash
+	
+	/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
+
+* There is an error with protobuf. 
+
+Try overwriting the links:
+
+.. code-block:: bash
+
+	brew link --overwrite protobuf

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7a641bf8/doc/en/docs/installation.md
----------------------------------------------------------------------
diff --git a/doc/en/docs/installation.md b/doc/en/docs/installation.md
index 94d4e3e..0d0a33e 100755
--- a/doc/en/docs/installation.md
+++ b/doc/en/docs/installation.md
@@ -136,7 +136,7 @@ The following instructions are tested on Ubuntu 14.04  and 16.04for installing
d
     $ sudo apt-get install python3-dev python3-numpy, python3-pip
     $ sudo apt-get install libopencv-dev libgoogle-glog-dev liblmdb-dev
 
-The following instructions are tested on Mac OS X Yosemite (10.11 and 10.12) for installing
dependent libraries.
+The following instructions are tested on Mac OS X Yosemite (10.11 and 10.12) for installing
dependent libraries. Instructions for installing on macOS 10.13 (High Sierra) can be found
[here](install_macos1013.html).
 
     # required libraries
     $ brew tap homebrew/science

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7a641bf8/examples/cifar10/alexnet-parallel.cc
----------------------------------------------------------------------
diff --git a/examples/cifar10/alexnet-parallel.cc b/examples/cifar10/alexnet-parallel.cc
deleted file mode 100644
index 8cc3352..0000000
--- a/examples/cifar10/alexnet-parallel.cc
+++ /dev/null
@@ -1,265 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#include "cifar10.h"
-#include "singa/model/feed_forward_net.h"
-#include "singa/model/optimizer.h"
-#include "singa/model/updater.h"
-#include "singa/model/initializer.h"
-#include "singa/model/metric.h"
-#include "singa/utils/channel.h"
-#include "singa/utils/string.h"
-#include "singa/core/memory.h"
-#include <thread>
-#include <memory>
-
-namespace singa {
-const std::string engine = "cudnn";
-
-LayerConf GenConvConf(string name, int nb_filter, int kernel, int stride,
-                      int pad, float std) {
-  LayerConf conf;
-  conf.set_name(name);
-  conf.set_type(engine + "_convolution");
-  ConvolutionConf *conv = conf.mutable_convolution_conf();
-  conv->set_num_output(nb_filter);
-  conv->add_kernel_size(kernel);
-  conv->add_stride(stride);
-  conv->add_pad(pad);
-  conv->set_bias_term(true);
-
-  ParamSpec *wspec = conf.add_param();
-  wspec->set_name(name + "_weight");
-  auto wfill = wspec->mutable_filler();
-  wfill->set_type("Gaussian");
-  wfill->set_std(std);
-
-  ParamSpec *bspec = conf.add_param();
-  bspec->set_name(name + "_bias");
-  bspec->set_lr_mult(2);
-  //  bspec->set_decay_mult(0);
-  return conf;
-}
-
-LayerConf GenPoolingConf(string name, bool max_pool, int kernel, int stride,
-                         int pad) {
-  LayerConf conf;
-  conf.set_name(name);
-  conf.set_type(engine + "_pooling");
-  PoolingConf *pool = conf.mutable_pooling_conf();
-  pool->set_kernel_size(kernel);
-  pool->set_stride(stride);
-  pool->set_pad(pad);
-  if (!max_pool) pool->set_pool(PoolingConf_PoolMethod_AVE);
-  return conf;
-}
-
-LayerConf GenReLUConf(string name) {
-  LayerConf conf;
-  conf.set_name(name);
-  conf.set_type(engine + "_relu");
-  return conf;
-}
-
-LayerConf GenDenseConf(string name, int num_output, float std, float wd) {
-  LayerConf conf;
-  conf.set_name(name);
-  conf.set_type("singa_dense");
-  DenseConf *dense = conf.mutable_dense_conf();
-  dense->set_num_output(num_output);
-
-  ParamSpec *wspec = conf.add_param();
-  wspec->set_name(name + "_weight");
-  wspec->set_decay_mult(wd);
-  auto wfill = wspec->mutable_filler();
-  wfill->set_type("Gaussian");
-  wfill->set_std(std);
-
-  ParamSpec *bspec = conf.add_param();
-  bspec->set_name(name + "_bias");
-  bspec->set_lr_mult(2);
-  bspec->set_decay_mult(0);
-
-  return conf;
-}
-
-LayerConf GenLRNConf(string name) {
-  LayerConf conf;
-  conf.set_name(name);
-  conf.set_type(engine + "_lrn");
-  LRNConf *lrn = conf.mutable_lrn_conf();
-  lrn->set_local_size(3);
-  lrn->set_alpha(5e-05);
-  lrn->set_beta(0.75);
-  return conf;
-}
-
-LayerConf GenFlattenConf(string name) {
-  LayerConf conf;
-  conf.set_name(name);
-  conf.set_type("singa_flatten");
-  return conf;
-}
-
-FeedForwardNet CreateNet() {
-  FeedForwardNet net;
-  Shape s{3, 32, 32};
-
-  net.Add(GenConvConf("conv1", 32, 5, 1, 2, 0.0001), &s);
-  net.Add(GenPoolingConf("pool1", true, 3, 2, 1));
-  net.Add(GenReLUConf("relu1"));
-  net.Add(GenLRNConf("lrn1"));
-  net.Add(GenConvConf("conv2", 32, 5, 1, 2, 0.01));
-  net.Add(GenReLUConf("relu2"));
-  net.Add(GenPoolingConf("pool2", false, 3, 2, 1));
-  net.Add(GenLRNConf("lrn2"));
-  net.Add(GenConvConf("conv3", 64, 5, 1, 2, 0.01));
-  net.Add(GenReLUConf("relu3"));
-  net.Add(GenPoolingConf("pool3", false, 3, 2, 1));
-  net.Add(GenFlattenConf("flat"));
-  net.Add(GenDenseConf("ip", 10, 0.01, 250));
-  return net;
-}
-
-void Train(float lr, int num_epoch, string data_dir) {
-  Cifar10 data(data_dir);
-  Tensor train_x, train_y, test_x, test_y;
-  Tensor train_x_1, train_x_2, train_y_1, train_y_2;
-  {
-    auto train = data.ReadTrainData();
-    size_t nsamples = train.first.shape(0);
-    auto mtrain =
-        Reshape(train.first, Shape{nsamples, train.first.Size() / nsamples});
-    const Tensor &mean = Average(mtrain, 0);
-    SubRow(mean, &mtrain);
-    train_x = Reshape(mtrain, train.first.shape());
-    train_y = train.second;
-
-    LOG(INFO) << "Slicing training data...";
-    train_x_1.Reshape(Shape{nsamples / 2, train.first.shape(1),
-        train.first.shape(2), train.first.shape(3)});
-    LOG(INFO) << "Copying first data slice...";
-    CopyDataToFrom(&train_x_1, train_x, train_x.Size() / 2);
-    train_x_2.Reshape(Shape{nsamples / 2, train.first.shape(1),
-        train.first.shape(2), train.first.shape(3)});
-    LOG(INFO) << "Copying second data slice...";
-    CopyDataToFrom(&train_x_2, train_x, train_x.Size() / 2, 0,
-                   train_x.Size() / 2);
-    train_y_1.Reshape(Shape{nsamples / 2});
-    train_y_1.AsType(kInt);
-    LOG(INFO) << "Copying first label slice...";
-    CopyDataToFrom(&train_y_1, train_y, train_y.Size() / 2);
-    train_y_2.Reshape(Shape{nsamples / 2});
-    train_y_2.AsType(kInt);
-    LOG(INFO) << "Copying second label slice...";
-    CopyDataToFrom(&train_y_2, train_y, train_y.Size() / 2, 0,
-                   train_y.Size() / 2);
-
-    auto test = data.ReadTestData();
-    nsamples = test.first.shape(0);
-    auto mtest =
-        Reshape(test.first, Shape{nsamples, test.first.Size() / nsamples});
-    SubRow(mean, &mtest);
-    test_x = Reshape(mtest, test.first.shape());
-    test_y = test.second;
-  }
-
-  CHECK_EQ(train_x.shape(0), train_y.shape(0));
-  CHECK_EQ(test_x.shape(0), test_y.shape(0));
-  LOG(INFO) << "Total Training samples = " << train_y.shape(0)
-            << ", Total Test samples = " << test_y.shape(0);
-  CHECK_EQ(train_x_1.shape(0), train_y_1.shape(0));
-  LOG(INFO) << "On net 1, Training samples = " << train_y_1.shape(0)
-            << ", Test samples = " << test_y.shape(0);
-  CHECK_EQ(train_x_2.shape(0), train_y_2.shape(0));
-  LOG(INFO) << "On net 2, Training samples = " << train_y_2.shape(0);
-
-  auto net_1 = CreateNet();
-  auto net_2 = CreateNet();
-
-  SGD sgd;
-  OptimizerConf opt_conf;
-  opt_conf.set_momentum(0.9);
-  auto reg = opt_conf.mutable_regularizer();
-  reg->set_coefficient(0.004);
-  sgd.Setup(opt_conf);
-  sgd.SetLearningRateGenerator([lr](int step) {
-    if (step <= 120)
-      return 0.001;
-    else if (step <= 130)
-      return 0.0001;
-    else
-      return 0.00001;
-  });
-
-  SoftmaxCrossEntropy loss_1, loss_2;
-  Accuracy acc_1, acc_2;
-  /// Create updater aggregating gradient on CPU
-  std::shared_ptr<Updater> updater = std::make_shared<LocalUpdater>(2, &sgd);
-
-  /// Only need to register parameter once.
-  net_1.Compile(true, true, updater, &loss_1, &acc_1);
-  net_2.Compile(true, false, updater, &loss_2, &acc_1);
-
-  MemPoolConf mem_conf;
-  mem_conf.add_device(0);
-  mem_conf.add_device(1);
-  std::shared_ptr<DeviceMemPool> mem_pool(new CnMemPool(mem_conf));
-  std::shared_ptr<CudaGPU> dev_1(new CudaGPU(0, mem_pool));
-  std::shared_ptr<CudaGPU> dev_2(new CudaGPU(1, mem_pool));
-
-  net_1.ToDevice(dev_1);
-  net_2.ToDevice(dev_2);
-
-  train_x_1.ToDevice(dev_1);
-  train_y_1.ToDevice(dev_1);
-  test_x.ToDevice(dev_1);
-  test_y.ToDevice(dev_1);
-  train_x_2.ToDevice(dev_2);
-  train_y_2.ToDevice(dev_2);
-
-  // net.Train(100, num_epoch, train_x, train_y, test_x, test_y);
-
-  LOG(INFO) << "Launching thread...";
-  std::thread t1 =
-      net_1.TrainThread(50, num_epoch, train_x_1, train_y_1, test_x, test_y);
-  std::thread t2 = net_2.TrainThread(50, num_epoch, train_x_2, train_y_2);
-  t1.join();
-  t2.join();
-}
-}
-
-int main(int argc, char **argv) {
-  singa::InitChannel(nullptr);
-  int pos = singa::ArgPos(argc, argv, "-epoch");
-  int nEpoch = 1;
-  if (pos != -1) nEpoch = atoi(argv[pos + 1]);
-  pos = singa::ArgPos(argc, argv, "-lr");
-  float lr = 0.001;
-  if (pos != -1) lr = atof(argv[pos + 1]);
-  pos = singa::ArgPos(argc, argv, "-data");
-  string data = "cifar-10-batches-bin";
-  if (pos != -1) data = argv[pos + 1];
-
-  LOG(INFO) << "Start training";
-  singa::Train(lr, nEpoch, data);
-  LOG(INFO) << "End training";
-}

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7a641bf8/examples/cifar10/alexnet.cc
----------------------------------------------------------------------
diff --git a/examples/cifar10/alexnet.cc b/examples/cifar10/alexnet.cc
deleted file mode 100644
index 61097b6..0000000
--- a/examples/cifar10/alexnet.cc
+++ /dev/null
@@ -1,207 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#include "./cifar10.h"
-#include "singa/model/feed_forward_net.h"
-#include "singa/model/optimizer.h"
-#include "singa/model/metric.h"
-#include "singa/utils/channel.h"
-#include "singa/utils/string.h"
-namespace singa {
-// currently supports 'cudnn' and 'singacpp'
-#ifdef USE_CUDNN
-const std::string engine = "cudnn";
-#else
-const std::string engine = "singacpp";
-#endif  // USE_CUDNN
-LayerConf GenConvConf(string name, int nb_filter, int kernel, int stride,
-                      int pad, float std) {
-  LayerConf conf;
-  conf.set_name(name);
-  conf.set_type(engine + "_convolution");
-  ConvolutionConf *conv = conf.mutable_convolution_conf();
-  conv->set_num_output(nb_filter);
-  conv->add_kernel_size(kernel);
-  conv->add_stride(stride);
-  conv->add_pad(pad);
-  conv->set_bias_term(true);
-
-  ParamSpec *wspec = conf.add_param();
-  wspec->set_name(name + "_weight");
-  auto wfill = wspec->mutable_filler();
-  wfill->set_type("Gaussian");
-  wfill->set_std(std);
-
-  ParamSpec *bspec = conf.add_param();
-  bspec->set_name(name + "_bias");
-  bspec->set_lr_mult(2);
-//  bspec->set_decay_mult(0);
-  return conf;
-}
-
-LayerConf GenPoolingConf(string name, bool max_pool, int kernel, int stride,
-                         int pad) {
-  LayerConf conf;
-  conf.set_name(name);
-  conf.set_type(engine + "_pooling");
-  PoolingConf *pool = conf.mutable_pooling_conf();
-  pool->set_kernel_size(kernel);
-  pool->set_stride(stride);
-  pool->set_pad(pad);
-  if (!max_pool) pool->set_pool(PoolingConf_PoolMethod_AVE);
-  return conf;
-}
-
-LayerConf GenReLUConf(string name) {
-  LayerConf conf;
-  conf.set_name(name);
-  conf.set_type(engine + "_relu");
-  return conf;
-}
-
-LayerConf GenDenseConf(string name, int num_output, float std, float wd) {
-  LayerConf conf;
-  conf.set_name(name);
-  conf.set_type("singa_dense");
-  DenseConf *dense = conf.mutable_dense_conf();
-  dense->set_num_output(num_output);
-
-  ParamSpec *wspec = conf.add_param();
-  wspec->set_name(name + "_weight");
-  wspec->set_decay_mult(wd);
-  auto wfill = wspec->mutable_filler();
-  wfill->set_type("Gaussian");
-  wfill->set_std(std);
-
-  ParamSpec *bspec = conf.add_param();
-  bspec->set_name(name + "_bias");
-  bspec->set_lr_mult(2);
-  bspec->set_decay_mult(0);
-
-  return conf;
-}
-
-LayerConf GenLRNConf(string name) {
-  LayerConf conf;
-  conf.set_name(name);
-  conf.set_type(engine + "_lrn");
-  LRNConf *lrn = conf.mutable_lrn_conf();
-  lrn->set_local_size(3);
-  lrn->set_alpha(5e-05);
-  lrn->set_beta(0.75);
-  return conf;
-}
-
-LayerConf GenFlattenConf(string name) {
-  LayerConf conf;
-  conf.set_name(name);
-  conf.set_type("singa_flatten");
-  return conf;
-}
-
-FeedForwardNet CreateNet() {
-  FeedForwardNet net;
-  Shape s{3, 32, 32};
-
-  net.Add(GenConvConf("conv1", 32, 5, 1, 2, 0.0001), &s);
-  net.Add(GenPoolingConf("pool1", true, 3, 2, 1));
-  net.Add(GenReLUConf("relu1"));
-  net.Add(GenLRNConf("lrn1"));
-  net.Add(GenConvConf("conv2", 32, 5, 1, 2, 0.01));
-  net.Add(GenReLUConf("relu2"));
-  net.Add(GenPoolingConf("pool2", false, 3, 2, 1));
-  net.Add(GenLRNConf("lrn2"));
-  net.Add(GenConvConf("conv3", 64, 5, 1, 2, 0.01));
-  net.Add(GenReLUConf("relu3"));
-  net.Add(GenPoolingConf("pool3", false, 3, 2, 1));
-  net.Add(GenFlattenConf("flat"));
-  net.Add(GenDenseConf("ip", 10, 0.01, 250));
-  return net;
-}
-
-void Train(int num_epoch, string data_dir) {
-  Cifar10 data(data_dir);
-  Tensor train_x, train_y, test_x, test_y;
-  {
-    auto train = data.ReadTrainData();
-    size_t nsamples = train.first.shape(0);
-    auto mtrain =
-        Reshape(train.first, Shape{nsamples, train.first.Size() / nsamples});
-    const Tensor& mean = Average(mtrain, 0);
-    SubRow(mean, &mtrain);
-    train_x = Reshape(mtrain, train.first.shape());
-    train_y = train.second;
-    auto test = data.ReadTestData();
-    nsamples = test.first.shape(0);
-    auto mtest =
-        Reshape(test.first, Shape{nsamples, test.first.Size() / nsamples});
-    SubRow(mean, &mtest);
-    test_x = Reshape(mtest, test.first.shape());
-    test_y = test.second;
-  }
-  CHECK_EQ(train_x.shape(0), train_y.shape(0));
-  CHECK_EQ(test_x.shape(0), test_y.shape(0));
-  LOG(INFO) << "Training samples = " << train_y.shape(0)
-            << ", Test samples = " << test_y.shape(0);
-  auto net = CreateNet();
-  SGD sgd;
-  OptimizerConf opt_conf;
-  opt_conf.set_momentum(0.9);
-  auto reg = opt_conf.mutable_regularizer();
-  reg->set_coefficient(0.004);
-  sgd.Setup(opt_conf);
-  sgd.SetLearningRateGenerator([](int step) {
-    if (step <= 120)
-      return 0.001;
-    else if (step <= 130)
-      return 0.0001;
-    else
-      return 0.00001;
-  });
-
-  SoftmaxCrossEntropy loss;
-  Accuracy acc;
-  net.Compile(true, &sgd, &loss, &acc);
-#ifdef USE_CUDNN
-  auto dev = std::make_shared<CudaGPU>();
-  net.ToDevice(dev);
-  train_x.ToDevice(dev);
-  train_y.ToDevice(dev);
-  test_x.ToDevice(dev);
-  test_y.ToDevice(dev);
-#endif  // USE_CUDNN
-  net.Train(100, num_epoch, train_x, train_y, test_x, test_y);
-}
-}
-
-int main(int argc, char **argv) {
-  singa::InitChannel(nullptr);
-  int pos = singa::ArgPos(argc, argv, "-epoch");
-  int nEpoch = 1;
-  if (pos != -1) nEpoch = atoi(argv[pos + 1]);
-  pos = singa::ArgPos(argc, argv, "-data");
-  string data = "cifar-10-batches-bin";
-  if (pos != -1) data = argv[pos + 1];
-
-  LOG(INFO) << "Start training";
-  singa::Train(nEpoch, data);
-  LOG(INFO) << "End training";
-}

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7a641bf8/examples/cifar10/alexnet.py
----------------------------------------------------------------------
diff --git a/examples/cifar10/alexnet.py b/examples/cifar10/alexnet.py
deleted file mode 100644
index b056e70..0000000
--- a/examples/cifar10/alexnet.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# =============================================================================
-''' This model is created following the structure from
-https://code.google.com/p/cuda-convnet/source/browse/trunk/example-layers/layers-18pct.cfg
-Following the same setting for hyper-parameters and data pre-processing, the final
-validation accuracy would be about 82%.
-'''
-from __future__ import print_function
-from builtins import zip
-
-# sys.path.append(os.path.join(os.path.dirname(__file__), '../../build/python'))
-from singa import layer
-from singa import metric
-from singa import loss
-from singa import net as ffnet
-
-
-def create_net(use_cpu=False):
-    if use_cpu:
-        layer.engine = 'singacpp'
-
-    net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Accuracy())
-    W0_specs = {'init': 'gaussian', 'mean': 0, 'std': 0.0001}
-    W1_specs = {'init': 'gaussian', 'mean': 0, 'std': 0.01}
-    W2_specs = {'init': 'gaussian', 'mean': 0, 'std': 0.01, 'decay_mult': 250}
-
-    b_specs = {'init': 'constant', 'value': 0, 'lr_mult': 2, 'decay_mult': 0}
-    net.add(layer.Conv2D('conv1', 32, 5, 1, W_specs=W0_specs.copy(), b_specs=b_specs.copy(),
pad=2, input_sample_shape=(3,32,32,)))
-    net.add(layer.MaxPooling2D('pool1', 3, 2, pad=1))
-    net.add(layer.Activation('relu1'))
-    net.add(layer.LRN(name='lrn1', size=3, alpha=5e-5))
-    net.add(layer.Conv2D('conv2', 32, 5, 1, W_specs=W1_specs.copy(), b_specs=b_specs.copy(),
pad=2))
-    net.add(layer.Activation('relu2'))
-    net.add(layer.AvgPooling2D('pool2', 3, 2,  pad=1))
-    net.add(layer.LRN('lrn2', size=3, alpha=5e-5))
-    net.add(layer.Conv2D('conv3', 64, 5, 1, W_specs=W1_specs.copy(), b_specs=b_specs.copy(),
pad=2))
-    net.add(layer.Activation('relu3'))
-    net.add(layer.AvgPooling2D('pool3', 3, 2, pad=1))
-    net.add(layer.Flatten('flat'))
-    net.add(layer.Dense( 'dense', 10, W_specs=W2_specs.copy(), b_specs=b_specs.copy()))
-    for (p, specs) in zip(net.param_values(), net.param_specs()):
-        filler = specs.filler
-        if filler.type == 'gaussian':
-            p.gaussian(filler.mean, filler.std)
-        else:
-            p.set_value(0)
-        print(specs.name, filler.type, p.l1())
-
-    return net

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7a641bf8/examples/cifar10/cnn-parallel.cc
----------------------------------------------------------------------
diff --git a/examples/cifar10/cnn-parallel.cc b/examples/cifar10/cnn-parallel.cc
new file mode 100644
index 0000000..8cc3352
--- /dev/null
+++ b/examples/cifar10/cnn-parallel.cc
@@ -0,0 +1,265 @@
+/************************************************************
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+*************************************************************/
+
+#include "cifar10.h"
+#include "singa/model/feed_forward_net.h"
+#include "singa/model/optimizer.h"
+#include "singa/model/updater.h"
+#include "singa/model/initializer.h"
+#include "singa/model/metric.h"
+#include "singa/utils/channel.h"
+#include "singa/utils/string.h"
+#include "singa/core/memory.h"
+#include <thread>
+#include <memory>
+
+namespace singa {
+const std::string engine = "cudnn";
+
+LayerConf GenConvConf(string name, int nb_filter, int kernel, int stride,
+                      int pad, float std) {
+  LayerConf conf;
+  conf.set_name(name);
+  conf.set_type(engine + "_convolution");
+  ConvolutionConf *conv = conf.mutable_convolution_conf();
+  conv->set_num_output(nb_filter);
+  conv->add_kernel_size(kernel);
+  conv->add_stride(stride);
+  conv->add_pad(pad);
+  conv->set_bias_term(true);
+
+  ParamSpec *wspec = conf.add_param();
+  wspec->set_name(name + "_weight");
+  auto wfill = wspec->mutable_filler();
+  wfill->set_type("Gaussian");
+  wfill->set_std(std);
+
+  ParamSpec *bspec = conf.add_param();
+  bspec->set_name(name + "_bias");
+  bspec->set_lr_mult(2);
+  //  bspec->set_decay_mult(0);
+  return conf;
+}
+
+LayerConf GenPoolingConf(string name, bool max_pool, int kernel, int stride,
+                         int pad) {
+  LayerConf conf;
+  conf.set_name(name);
+  conf.set_type(engine + "_pooling");
+  PoolingConf *pool = conf.mutable_pooling_conf();
+  pool->set_kernel_size(kernel);
+  pool->set_stride(stride);
+  pool->set_pad(pad);
+  if (!max_pool) pool->set_pool(PoolingConf_PoolMethod_AVE);
+  return conf;
+}
+
+LayerConf GenReLUConf(string name) {
+  LayerConf conf;
+  conf.set_name(name);
+  conf.set_type(engine + "_relu");
+  return conf;
+}
+
+LayerConf GenDenseConf(string name, int num_output, float std, float wd) {
+  LayerConf conf;
+  conf.set_name(name);
+  conf.set_type("singa_dense");
+  DenseConf *dense = conf.mutable_dense_conf();
+  dense->set_num_output(num_output);
+
+  ParamSpec *wspec = conf.add_param();
+  wspec->set_name(name + "_weight");
+  wspec->set_decay_mult(wd);
+  auto wfill = wspec->mutable_filler();
+  wfill->set_type("Gaussian");
+  wfill->set_std(std);
+
+  ParamSpec *bspec = conf.add_param();
+  bspec->set_name(name + "_bias");
+  bspec->set_lr_mult(2);
+  bspec->set_decay_mult(0);
+
+  return conf;
+}
+
+LayerConf GenLRNConf(string name) {
+  LayerConf conf;
+  conf.set_name(name);
+  conf.set_type(engine + "_lrn");
+  LRNConf *lrn = conf.mutable_lrn_conf();
+  lrn->set_local_size(3);
+  lrn->set_alpha(5e-05);
+  lrn->set_beta(0.75);
+  return conf;
+}
+
+LayerConf GenFlattenConf(string name) {
+  LayerConf conf;
+  conf.set_name(name);
+  conf.set_type("singa_flatten");
+  return conf;
+}
+
+FeedForwardNet CreateNet() {
+  FeedForwardNet net;
+  Shape s{3, 32, 32};
+
+  net.Add(GenConvConf("conv1", 32, 5, 1, 2, 0.0001), &s);
+  net.Add(GenPoolingConf("pool1", true, 3, 2, 1));
+  net.Add(GenReLUConf("relu1"));
+  net.Add(GenLRNConf("lrn1"));
+  net.Add(GenConvConf("conv2", 32, 5, 1, 2, 0.01));
+  net.Add(GenReLUConf("relu2"));
+  net.Add(GenPoolingConf("pool2", false, 3, 2, 1));
+  net.Add(GenLRNConf("lrn2"));
+  net.Add(GenConvConf("conv3", 64, 5, 1, 2, 0.01));
+  net.Add(GenReLUConf("relu3"));
+  net.Add(GenPoolingConf("pool3", false, 3, 2, 1));
+  net.Add(GenFlattenConf("flat"));
+  net.Add(GenDenseConf("ip", 10, 0.01, 250));
+  return net;
+}
+
+void Train(float lr, int num_epoch, string data_dir) {
+  Cifar10 data(data_dir);
+  Tensor train_x, train_y, test_x, test_y;
+  Tensor train_x_1, train_x_2, train_y_1, train_y_2;
+  {
+    auto train = data.ReadTrainData();
+    size_t nsamples = train.first.shape(0);
+    auto mtrain =
+        Reshape(train.first, Shape{nsamples, train.first.Size() / nsamples});
+    const Tensor &mean = Average(mtrain, 0);
+    SubRow(mean, &mtrain);
+    train_x = Reshape(mtrain, train.first.shape());
+    train_y = train.second;
+
+    LOG(INFO) << "Slicing training data...";
+    train_x_1.Reshape(Shape{nsamples / 2, train.first.shape(1),
+        train.first.shape(2), train.first.shape(3)});
+    LOG(INFO) << "Copying first data slice...";
+    CopyDataToFrom(&train_x_1, train_x, train_x.Size() / 2);
+    train_x_2.Reshape(Shape{nsamples / 2, train.first.shape(1),
+        train.first.shape(2), train.first.shape(3)});
+    LOG(INFO) << "Copying second data slice...";
+    CopyDataToFrom(&train_x_2, train_x, train_x.Size() / 2, 0,
+                   train_x.Size() / 2);
+    train_y_1.Reshape(Shape{nsamples / 2});
+    train_y_1.AsType(kInt);
+    LOG(INFO) << "Copying first label slice...";
+    CopyDataToFrom(&train_y_1, train_y, train_y.Size() / 2);
+    train_y_2.Reshape(Shape{nsamples / 2});
+    train_y_2.AsType(kInt);
+    LOG(INFO) << "Copying second label slice...";
+    CopyDataToFrom(&train_y_2, train_y, train_y.Size() / 2, 0,
+                   train_y.Size() / 2);
+
+    auto test = data.ReadTestData();
+    nsamples = test.first.shape(0);
+    auto mtest =
+        Reshape(test.first, Shape{nsamples, test.first.Size() / nsamples});
+    SubRow(mean, &mtest);
+    test_x = Reshape(mtest, test.first.shape());
+    test_y = test.second;
+  }
+
+  CHECK_EQ(train_x.shape(0), train_y.shape(0));
+  CHECK_EQ(test_x.shape(0), test_y.shape(0));
+  LOG(INFO) << "Total Training samples = " << train_y.shape(0)
+            << ", Total Test samples = " << test_y.shape(0);
+  CHECK_EQ(train_x_1.shape(0), train_y_1.shape(0));
+  LOG(INFO) << "On net 1, Training samples = " << train_y_1.shape(0)
+            << ", Test samples = " << test_y.shape(0);
+  CHECK_EQ(train_x_2.shape(0), train_y_2.shape(0));
+  LOG(INFO) << "On net 2, Training samples = " << train_y_2.shape(0);
+
+  auto net_1 = CreateNet();
+  auto net_2 = CreateNet();
+
+  SGD sgd;
+  OptimizerConf opt_conf;
+  opt_conf.set_momentum(0.9);
+  auto reg = opt_conf.mutable_regularizer();
+  reg->set_coefficient(0.004);
+  sgd.Setup(opt_conf);
+  sgd.SetLearningRateGenerator([lr](int step) {
+    if (step <= 120)
+      return 0.001;
+    else if (step <= 130)
+      return 0.0001;
+    else
+      return 0.00001;
+  });
+
+  SoftmaxCrossEntropy loss_1, loss_2;
+  Accuracy acc_1, acc_2;
+  /// Create updater aggregating gradient on CPU
+  std::shared_ptr<Updater> updater = std::make_shared<LocalUpdater>(2, &sgd);
+
+  /// Only need to register parameter once.
+  net_1.Compile(true, true, updater, &loss_1, &acc_1);
+  net_2.Compile(true, false, updater, &loss_2, &acc_1);
+
+  MemPoolConf mem_conf;
+  mem_conf.add_device(0);
+  mem_conf.add_device(1);
+  std::shared_ptr<DeviceMemPool> mem_pool(new CnMemPool(mem_conf));
+  std::shared_ptr<CudaGPU> dev_1(new CudaGPU(0, mem_pool));
+  std::shared_ptr<CudaGPU> dev_2(new CudaGPU(1, mem_pool));
+
+  net_1.ToDevice(dev_1);
+  net_2.ToDevice(dev_2);
+
+  train_x_1.ToDevice(dev_1);
+  train_y_1.ToDevice(dev_1);
+  test_x.ToDevice(dev_1);
+  test_y.ToDevice(dev_1);
+  train_x_2.ToDevice(dev_2);
+  train_y_2.ToDevice(dev_2);
+
+  // net.Train(100, num_epoch, train_x, train_y, test_x, test_y);
+
+  LOG(INFO) << "Launching thread...";
+  std::thread t1 =
+      net_1.TrainThread(50, num_epoch, train_x_1, train_y_1, test_x, test_y);
+  std::thread t2 = net_2.TrainThread(50, num_epoch, train_x_2, train_y_2);
+  t1.join();
+  t2.join();
+}
+}
+
+int main(int argc, char **argv) {
+  singa::InitChannel(nullptr);
+  int pos = singa::ArgPos(argc, argv, "-epoch");
+  int nEpoch = 1;
+  if (pos != -1) nEpoch = atoi(argv[pos + 1]);
+  pos = singa::ArgPos(argc, argv, "-lr");
+  float lr = 0.001;
+  if (pos != -1) lr = atof(argv[pos + 1]);
+  pos = singa::ArgPos(argc, argv, "-data");
+  string data = "cifar-10-batches-bin";
+  if (pos != -1) data = argv[pos + 1];
+
+  LOG(INFO) << "Start training";
+  singa::Train(lr, nEpoch, data);
+  LOG(INFO) << "End training";
+}

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7a641bf8/examples/cifar10/cnn.cc
----------------------------------------------------------------------
diff --git a/examples/cifar10/cnn.cc b/examples/cifar10/cnn.cc
new file mode 100644
index 0000000..61097b6
--- /dev/null
+++ b/examples/cifar10/cnn.cc
@@ -0,0 +1,207 @@
+/************************************************************
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+*************************************************************/
+
+#include "./cifar10.h"
+#include "singa/model/feed_forward_net.h"
+#include "singa/model/optimizer.h"
+#include "singa/model/metric.h"
+#include "singa/utils/channel.h"
+#include "singa/utils/string.h"
+namespace singa {
+// currently supports 'cudnn' and 'singacpp'
+#ifdef USE_CUDNN
+const std::string engine = "cudnn";
+#else
+const std::string engine = "singacpp";
+#endif  // USE_CUDNN
+LayerConf GenConvConf(string name, int nb_filter, int kernel, int stride,
+                      int pad, float std) {
+  LayerConf conf;
+  conf.set_name(name);
+  conf.set_type(engine + "_convolution");
+  ConvolutionConf *conv = conf.mutable_convolution_conf();
+  conv->set_num_output(nb_filter);
+  conv->add_kernel_size(kernel);
+  conv->add_stride(stride);
+  conv->add_pad(pad);
+  conv->set_bias_term(true);
+
+  ParamSpec *wspec = conf.add_param();
+  wspec->set_name(name + "_weight");
+  auto wfill = wspec->mutable_filler();
+  wfill->set_type("Gaussian");
+  wfill->set_std(std);
+
+  ParamSpec *bspec = conf.add_param();
+  bspec->set_name(name + "_bias");
+  bspec->set_lr_mult(2);
+//  bspec->set_decay_mult(0);
+  return conf;
+}
+
+LayerConf GenPoolingConf(string name, bool max_pool, int kernel, int stride,
+                         int pad) {
+  LayerConf conf;
+  conf.set_name(name);
+  conf.set_type(engine + "_pooling");
+  PoolingConf *pool = conf.mutable_pooling_conf();
+  pool->set_kernel_size(kernel);
+  pool->set_stride(stride);
+  pool->set_pad(pad);
+  if (!max_pool) pool->set_pool(PoolingConf_PoolMethod_AVE);
+  return conf;
+}
+
+LayerConf GenReLUConf(string name) {
+  LayerConf conf;
+  conf.set_name(name);
+  conf.set_type(engine + "_relu");
+  return conf;
+}
+
+LayerConf GenDenseConf(string name, int num_output, float std, float wd) {
+  LayerConf conf;
+  conf.set_name(name);
+  conf.set_type("singa_dense");
+  DenseConf *dense = conf.mutable_dense_conf();
+  dense->set_num_output(num_output);
+
+  ParamSpec *wspec = conf.add_param();
+  wspec->set_name(name + "_weight");
+  wspec->set_decay_mult(wd);
+  auto wfill = wspec->mutable_filler();
+  wfill->set_type("Gaussian");
+  wfill->set_std(std);
+
+  ParamSpec *bspec = conf.add_param();
+  bspec->set_name(name + "_bias");
+  bspec->set_lr_mult(2);
+  bspec->set_decay_mult(0);
+
+  return conf;
+}
+
+LayerConf GenLRNConf(string name) {
+  LayerConf conf;
+  conf.set_name(name);
+  conf.set_type(engine + "_lrn");
+  LRNConf *lrn = conf.mutable_lrn_conf();
+  lrn->set_local_size(3);
+  lrn->set_alpha(5e-05);
+  lrn->set_beta(0.75);
+  return conf;
+}
+
+LayerConf GenFlattenConf(string name) {
+  LayerConf conf;
+  conf.set_name(name);
+  conf.set_type("singa_flatten");
+  return conf;
+}
+
+FeedForwardNet CreateNet() {
+  FeedForwardNet net;
+  Shape s{3, 32, 32};
+
+  net.Add(GenConvConf("conv1", 32, 5, 1, 2, 0.0001), &s);
+  net.Add(GenPoolingConf("pool1", true, 3, 2, 1));
+  net.Add(GenReLUConf("relu1"));
+  net.Add(GenLRNConf("lrn1"));
+  net.Add(GenConvConf("conv2", 32, 5, 1, 2, 0.01));
+  net.Add(GenReLUConf("relu2"));
+  net.Add(GenPoolingConf("pool2", false, 3, 2, 1));
+  net.Add(GenLRNConf("lrn2"));
+  net.Add(GenConvConf("conv3", 64, 5, 1, 2, 0.01));
+  net.Add(GenReLUConf("relu3"));
+  net.Add(GenPoolingConf("pool3", false, 3, 2, 1));
+  net.Add(GenFlattenConf("flat"));
+  net.Add(GenDenseConf("ip", 10, 0.01, 250));
+  return net;
+}
+
+void Train(int num_epoch, string data_dir) {
+  Cifar10 data(data_dir);
+  Tensor train_x, train_y, test_x, test_y;
+  {
+    auto train = data.ReadTrainData();
+    size_t nsamples = train.first.shape(0);
+    auto mtrain =
+        Reshape(train.first, Shape{nsamples, train.first.Size() / nsamples});
+    const Tensor& mean = Average(mtrain, 0);
+    SubRow(mean, &mtrain);
+    train_x = Reshape(mtrain, train.first.shape());
+    train_y = train.second;
+    auto test = data.ReadTestData();
+    nsamples = test.first.shape(0);
+    auto mtest =
+        Reshape(test.first, Shape{nsamples, test.first.Size() / nsamples});
+    SubRow(mean, &mtest);
+    test_x = Reshape(mtest, test.first.shape());
+    test_y = test.second;
+  }
+  CHECK_EQ(train_x.shape(0), train_y.shape(0));
+  CHECK_EQ(test_x.shape(0), test_y.shape(0));
+  LOG(INFO) << "Training samples = " << train_y.shape(0)
+            << ", Test samples = " << test_y.shape(0);
+  auto net = CreateNet();
+  SGD sgd;
+  OptimizerConf opt_conf;
+  opt_conf.set_momentum(0.9);
+  auto reg = opt_conf.mutable_regularizer();
+  reg->set_coefficient(0.004);
+  sgd.Setup(opt_conf);
+  sgd.SetLearningRateGenerator([](int step) {
+    if (step <= 120)
+      return 0.001;
+    else if (step <= 130)
+      return 0.0001;
+    else
+      return 0.00001;
+  });
+
+  SoftmaxCrossEntropy loss;
+  Accuracy acc;
+  net.Compile(true, &sgd, &loss, &acc);
+#ifdef USE_CUDNN
+  auto dev = std::make_shared<CudaGPU>();
+  net.ToDevice(dev);
+  train_x.ToDevice(dev);
+  train_y.ToDevice(dev);
+  test_x.ToDevice(dev);
+  test_y.ToDevice(dev);
+#endif  // USE_CUDNN
+  net.Train(100, num_epoch, train_x, train_y, test_x, test_y);
+}
+}
+
+int main(int argc, char **argv) {
+  singa::InitChannel(nullptr);
+  int pos = singa::ArgPos(argc, argv, "-epoch");
+  int nEpoch = 1;
+  if (pos != -1) nEpoch = atoi(argv[pos + 1]);
+  pos = singa::ArgPos(argc, argv, "-data");
+  string data = "cifar-10-batches-bin";
+  if (pos != -1) data = argv[pos + 1];
+
+  LOG(INFO) << "Start training";
+  singa::Train(nEpoch, data);
+  LOG(INFO) << "End training";
+}

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7a641bf8/examples/cifar10/cnn.py
----------------------------------------------------------------------
diff --git a/examples/cifar10/cnn.py b/examples/cifar10/cnn.py
new file mode 100644
index 0000000..b056e70
--- /dev/null
+++ b/examples/cifar10/cnn.py
@@ -0,0 +1,63 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# =============================================================================
+''' This model is created following the structure from
+https://code.google.com/p/cuda-convnet/source/browse/trunk/example-layers/layers-18pct.cfg
+Following the same setting for hyper-parameters and data pre-processing, the final
+validation accuracy would be about 82%.
+'''
+from __future__ import print_function
+from builtins import zip
+
+# sys.path.append(os.path.join(os.path.dirname(__file__), '../../build/python'))
+from singa import layer
+from singa import metric
+from singa import loss
+from singa import net as ffnet
+
+
+def create_net(use_cpu=False):
+    if use_cpu:
+        layer.engine = 'singacpp'
+
+    net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Accuracy())
+    W0_specs = {'init': 'gaussian', 'mean': 0, 'std': 0.0001}
+    W1_specs = {'init': 'gaussian', 'mean': 0, 'std': 0.01}
+    W2_specs = {'init': 'gaussian', 'mean': 0, 'std': 0.01, 'decay_mult': 250}
+
+    b_specs = {'init': 'constant', 'value': 0, 'lr_mult': 2, 'decay_mult': 0}
+    net.add(layer.Conv2D('conv1', 32, 5, 1, W_specs=W0_specs.copy(), b_specs=b_specs.copy(),
pad=2, input_sample_shape=(3,32,32,)))
+    net.add(layer.MaxPooling2D('pool1', 3, 2, pad=1))
+    net.add(layer.Activation('relu1'))
+    net.add(layer.LRN(name='lrn1', size=3, alpha=5e-5))
+    net.add(layer.Conv2D('conv2', 32, 5, 1, W_specs=W1_specs.copy(), b_specs=b_specs.copy(),
pad=2))
+    net.add(layer.Activation('relu2'))
+    net.add(layer.AvgPooling2D('pool2', 3, 2,  pad=1))
+    net.add(layer.LRN('lrn2', size=3, alpha=5e-5))
+    net.add(layer.Conv2D('conv3', 64, 5, 1, W_specs=W1_specs.copy(), b_specs=b_specs.copy(),
pad=2))
+    net.add(layer.Activation('relu3'))
+    net.add(layer.AvgPooling2D('pool3', 3, 2, pad=1))
+    net.add(layer.Flatten('flat'))
+    net.add(layer.Dense( 'dense', 10, W_specs=W2_specs.copy(), b_specs=b_specs.copy()))
+    for (p, specs) in zip(net.param_values(), net.param_specs()):
+        filler = specs.filler
+        if filler.type == 'gaussian':
+            p.gaussian(filler.mean, filler.std)
+        else:
+            p.set_value(0)
+        print(specs.name, filler.type, p.l1())
+
+    return net

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7a641bf8/python/singa/net.py
----------------------------------------------------------------------
diff --git a/python/singa/net.py b/python/singa/net.py
index 501b8bc..8f5ba97 100644
--- a/python/singa/net.py
+++ b/python/singa/net.py
@@ -175,6 +175,15 @@ class FeedForwardNet(object):
         return [spec.name for spec in self.param_specs()]
 
     def train(self, x, y):
+	'''Run BP for one iteration.
+	This method is deprecated. It is only kept for backward compatibility.
+	The name of this method is confusing since it does not update parameters.
+	Please use backprob() instead.
+	The back progagation algorithm computes gradients but it does not train.
+        '''
+        return backprob(x, y)
+
+    def backprob(self, x, y):
         '''Run BP for one iteration.
 
         Currently only support nets with a single output layer, and a single



Mime
View raw message