singa-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From wan...@apache.org
Subject [06/60] incubator-singa git commit: SINGA-163 - Reorganize the project folder layout
Date Fri, 03 Jun 2016 07:48:11 GMT
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/dd1e4afa/src/test/test_csv_input_layer.cc
----------------------------------------------------------------------
diff --git a/src/test/test_csv_input_layer.cc b/src/test/test_csv_input_layer.cc
deleted file mode 100644
index 86eaff9..0000000
--- a/src/test/test_csv_input_layer.cc
+++ /dev/null
@@ -1,92 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-#include <string>
-#include <vector>
-#include <fstream>
-
-#include "gtest/gtest.h"
-#include "singa/neuralnet/input_layer.h"
-#include "singa/proto/job.pb.h"
-
-class CSVInputLayerTest : public ::testing::Test {
- protected:
-  virtual void SetUp() {
-    std::string path ="src/test/test.csv";
-    std::ofstream ofs(path, std::ofstream::out);
-    ASSERT_TRUE(ofs.is_open());
-    ofs << "12,3.2,1,14.1\n";
-    ofs << "2,0.2,0,1.1\n";
-    ofs << "1,2.2,1,4.1\n";
-    ofs.close();
-    auto conf = csv_conf.mutable_store_conf();
-    conf->set_path(path);
-    conf->add_batchsize(2);
-    conf->add_shape(3);
-    conf->set_backend("textfile");
-  }
-  singa::LayerProto csv_conf;
-};
-
-TEST_F(CSVInputLayerTest, Setup) {
-  singa::CSVInputLayer layer;
-  layer.Setup(csv_conf, std::vector<singa::Layer*>{});
-  EXPECT_EQ(2, static_cast<int>(layer.aux_data().size()));
-  EXPECT_EQ(6, layer.data(nullptr).count());
-}
-
-TEST_F(CSVInputLayerTest, ComputeFeature) {
-  singa::CSVInputLayer csv;
-  csv.Setup(csv_conf, std::vector<singa::Layer*>{});
-  csv.ComputeFeature(singa::kTrain, std::vector<singa::Layer*>{});
-
-  EXPECT_EQ(12, csv.aux_data()[0]);
-  EXPECT_EQ(2, csv.aux_data()[1]);
-  auto data = csv.data(nullptr);
-  EXPECT_EQ(3.2f, data.cpu_data()[0]);
-  EXPECT_EQ(14.1f, data.cpu_data()[2]);
-  EXPECT_EQ(0.2f, data.cpu_data()[3]);
-  EXPECT_EQ(1.1f, data.cpu_data()[5]);
-}
-TEST_F(CSVInputLayerTest, ComputeFeatureDeploy) {
-  singa::CSVInputLayer csv;
-  csv_conf.mutable_store_conf()->set_shape(0, 4);
-  csv.Setup(csv_conf, std::vector<singa::Layer*>{});
-  csv.ComputeFeature(singa::kDeploy, std::vector<singa::Layer*>{});
-
-  auto data = csv.data(nullptr);
-  EXPECT_EQ(12.f, data.cpu_data()[0]);
-  EXPECT_EQ(1.f, data.cpu_data()[2]);
-  EXPECT_EQ(14.1f, data.cpu_data()[3]);
-  EXPECT_EQ(0.2f, data.cpu_data()[5]);
-}
-
-TEST_F(CSVInputLayerTest, SeekToFirst) {
-  singa::CSVInputLayer csv;
-  csv.Setup(csv_conf, std::vector<singa::Layer*>{});
-  csv.ComputeFeature(singa::kTrain, std::vector<singa::Layer*>{});
-  csv.ComputeFeature(singa::kTrain, std::vector<singa::Layer*>{});
-
-  auto data = csv.data(nullptr);
-  EXPECT_EQ(2.2f, data.cpu_data()[0]);
-  EXPECT_EQ(4.1f, data.cpu_data()[2]);
-  EXPECT_EQ(3.2f, data.cpu_data()[3]);
-  EXPECT_EQ(14.1f, data.cpu_data()[5]);
-}

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/dd1e4afa/src/test/test_gru_layer.cc
----------------------------------------------------------------------
diff --git a/src/test/test_gru_layer.cc b/src/test/test_gru_layer.cc
deleted file mode 100644
index e0e381f..0000000
--- a/src/test/test_gru_layer.cc
+++ /dev/null
@@ -1,287 +0,0 @@
-/************************************************************
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- *************************************************************/
-#include <string>
-#include <vector>
-#include <fstream>
-#include <iostream>
-using namespace std;
-
-
-#include "gtest/gtest.h"
-#include "singa/neuralnet/neuron_layer.h"
-#include "singa/neuralnet/input_layer.h"
-#include "singa/driver.h"
-#include "singa/proto/job.pb.h"
-
-using namespace singa;
-
-class GRULayerTest: public ::testing::Test {
- protected:
-  virtual void SetUp() {
-    // Initialize the settings for the first input-layer
-    std::string path1 = "src/test/gru-in-1.csv";  // path of a csv file
-    std::ofstream ofs1(path1, std::ofstream::out);
-    ASSERT_TRUE(ofs1.is_open());
-    ofs1 << "0,0,0,1\n";
-    ofs1 << "0,0,1,0\n";
-    ofs1.close();
-    auto conf1 = in1_conf.mutable_store_conf();
-    conf1->set_path(path1);
-    conf1->add_batchsize(2);
-    conf1->add_shape(4);
-    conf1->set_backend("textfile");
-    conf1->set_has_label(false);
-
-
-    // Initialize the settings for the second input-layer
-    std::string path2 = "src/test/gru-in-2.csv";  // path of a csv file
-    std::ofstream ofs2(path2, std::ofstream::out);
-    ASSERT_TRUE(ofs2.is_open());
-    ofs2 << "0,1,0,0\n";
-    ofs2 << "1,0,0,0\n";
-    ofs2.close();
-    auto conf2 = in2_conf.mutable_store_conf();
-    conf2->set_path(path2);
-
-    conf2->add_batchsize(2);
-    conf2->add_shape(4);
-    conf2->set_backend("textfile");
-    conf2->set_has_label(false);
-
-
-    gru1_conf.mutable_gru_conf() -> set_dim_hidden(2);
-    gru1_conf.mutable_gru_conf() -> set_bias_term(true);
-    for (int i = 0; i < 9; i ++) {
-      gru1_conf.add_param();
-    }
-
-
-    gru1_conf.mutable_param(0)->set_name("wzhx1");
-    gru1_conf.mutable_param(0)->set_type(kParam);
-    gru1_conf.mutable_param(0)->mutable_init()->set_type(kConstant);
-    gru1_conf.mutable_param(0)->mutable_init()->set_value(0.5f);
-
-    gru1_conf.mutable_param(1)->set_name("wrhx1");
-    gru1_conf.mutable_param(1)->set_type(kParam);
-    gru1_conf.mutable_param(1)->mutable_init()->set_type(kConstant);
-    gru1_conf.mutable_param(1)->mutable_init()->set_value(0.5f);
-
-    gru1_conf.mutable_param(2)->set_name("wchx1");
-    gru1_conf.mutable_param(2)->set_type(kParam);
-    gru1_conf.mutable_param(2)->mutable_init()->set_type(kConstant);
-    gru1_conf.mutable_param(2)->mutable_init()->set_value(0.5f);
-
-    gru1_conf.mutable_param(3)->set_name("wzhh1");
-    gru1_conf.mutable_param(3)->set_type(kParam);
-    gru1_conf.mutable_param(3)->mutable_init()->set_type(kConstant);
-    gru1_conf.mutable_param(3)->mutable_init()->set_value(0.5f);
-
-    gru1_conf.mutable_param(4)->set_name("wrhh1");
-    gru1_conf.mutable_param(4)->set_type(kParam);
-    gru1_conf.mutable_param(4)->mutable_init()->set_type(kConstant);
-    gru1_conf.mutable_param(4)->mutable_init()->set_value(0.5f);
-
-    gru1_conf.mutable_param(5)->set_name("wchh1");
-    gru1_conf.mutable_param(5)->set_type(kParam);
-    gru1_conf.mutable_param(5)->mutable_init()->set_type(kConstant);
-    gru1_conf.mutable_param(5)->mutable_init()->set_value(0.5f);
-
-    gru1_conf.mutable_param(6)->set_name("bz1");
-    gru1_conf.mutable_param(6)->set_type(kParam);
-    gru1_conf.mutable_param(6)->mutable_init()->set_type(kConstant);
-    gru1_conf.mutable_param(6)->mutable_init()->set_value(0.5f);
-
-    gru1_conf.mutable_param(7)->set_name("br1");
-    gru1_conf.mutable_param(7)->set_type(kParam);
-    gru1_conf.mutable_param(7)->mutable_init()->set_type(kConstant);
-    gru1_conf.mutable_param(7)->mutable_init()->set_value(0.5f);
-
-    gru1_conf.mutable_param(8)->set_name("bc1");
-    gru1_conf.mutable_param(8)->set_type(kParam);
-    gru1_conf.mutable_param(8)->mutable_init()->set_type(kConstant);
-    gru1_conf.mutable_param(8)->mutable_init()->set_value(0.5f);
-
-    gru2_conf.mutable_gru_conf() -> set_dim_hidden(2);
-    gru2_conf.mutable_gru_conf() -> set_bias_term(true);
-    for (int i = 0; i < 9; i ++) {
-      gru2_conf.add_param();
-    }
-
-    gru2_conf.mutable_param(0)->set_name("wzhx2");
-    gru2_conf.mutable_param(0)->set_type(kParam);
-    gru2_conf.mutable_param(0)->mutable_init()->set_type(kConstant);
-    gru2_conf.mutable_param(0)->mutable_init()->set_value(0.5f);
-
-    gru2_conf.mutable_param(1)->set_name("wrhx2");
-    gru2_conf.mutable_param(1)->set_type(kParam);
-    gru2_conf.mutable_param(1)->mutable_init()->set_type(kConstant);
-    gru2_conf.mutable_param(1)->mutable_init()->set_value(0.5f);
-
-    gru2_conf.mutable_param(2)->set_name("wchx2");
-    gru2_conf.mutable_param(2)->set_type(kParam);
-    gru2_conf.mutable_param(2)->mutable_init()->set_type(kConstant);
-    gru2_conf.mutable_param(2)->mutable_init()->set_value(0.5f);
-
-    gru2_conf.mutable_param(3)->set_name("wzhh2");
-    gru2_conf.mutable_param(3)->set_type(kParam);
-    gru2_conf.mutable_param(3)->mutable_init()->set_type(kConstant);
-    gru2_conf.mutable_param(3)->mutable_init()->set_value(0.5f);
-
-    gru2_conf.mutable_param(4)->set_name("wrhh2");
-    gru2_conf.mutable_param(4)->set_type(kParam);
-    gru2_conf.mutable_param(4)->mutable_init()->set_type(kConstant);
-    gru2_conf.mutable_param(4)->mutable_init()->set_value(0.5f);
-
-    gru2_conf.mutable_param(5)->set_name("wchh2");
-    gru2_conf.mutable_param(5)->set_type(kParam);
-    gru2_conf.mutable_param(5)->mutable_init()->set_type(kConstant);
-    gru2_conf.mutable_param(5)->mutable_init()->set_value(0.5f);
-
-    gru2_conf.mutable_param(6)->set_name("bz2");
-    gru2_conf.mutable_param(6)->set_type(kParam);
-    gru2_conf.mutable_param(6)->mutable_init()->set_type(kConstant);
-    gru2_conf.mutable_param(6)->mutable_init()->set_value(0.5f);
-
-    gru2_conf.mutable_param(7)->set_name("br2");
-    gru2_conf.mutable_param(7)->set_type(kParam);
-    gru2_conf.mutable_param(7)->mutable_init()->set_type(kConstant);
-    gru2_conf.mutable_param(7)->mutable_init()->set_value(0.5f);
-
-    gru2_conf.mutable_param(8)->set_name("bc2");
-    gru2_conf.mutable_param(8)->set_type(kParam);
-    gru2_conf.mutable_param(8)->mutable_init()->set_type(kConstant);
-    gru2_conf.mutable_param(8)->mutable_init()->set_value(0.5f);
-  }
-  singa::LayerProto in1_conf;
-  singa::LayerProto in2_conf;
-  singa::LayerProto gru1_conf;
-  singa::LayerProto gru2_conf;
-};
-
-TEST_F(GRULayerTest, Setup) {
-  singa::Driver driver;
-  // driver.RegisterLayer<GRULayer, int> (kGRU);
-  driver.RegisterParam<Param>(0);
-  driver.RegisterParamGenerator<UniformGen>(kUniform);
-  driver.RegisterParamGenerator<ParamGenerator>(kConstant);
-
-  singa::CSVInputLayer in_layer_1;
-  singa::CSVInputLayer in_layer_2;
-
-  in_layer_1.Setup(in1_conf, std::vector<singa::Layer*> { });
-  EXPECT_EQ(2, static_cast<int>(in_layer_1.aux_data().size()));
-  EXPECT_EQ(8, in_layer_1.data(nullptr).count());
-
-  in_layer_2.Setup(in2_conf, std::vector<singa::Layer*>{ });
-  EXPECT_EQ(2, static_cast<int>(in_layer_2.aux_data().size()));
-  EXPECT_EQ(8, in_layer_2.data(nullptr).count());
-
-  singa::GRULayer gru_layer_1;
-  gru_layer_1.Setup(gru1_conf, std::vector<singa::Layer*>{&in_layer_1});
-  // EXPECT_EQ(2, gru_layer_1.hdim());
-  // EXPECT_EQ(4, gru_layer_1.vdim());
-
-  for (unsigned int i = 0; i < gru_layer_1.GetParams().size(); i ++) {
-    gru_layer_1.GetParams()[i]->InitValues();
-  }
-  EXPECT_EQ (0.5, gru_layer_1.GetParams()[0]->data().cpu_data()[0]);
-  // cout << "gru_layer_1: " << gru_layer_1.GetParams()[0]->data().cpu_data()[0]
-  // << endl;
-
-  singa::GRULayer gru_layer_2;
-  gru_layer_2.Setup(gru2_conf,
-                    std::vector<singa::Layer*>{&in_layer_2, &gru_layer_1});
-  // EXPECT_EQ(2, gru_layer_2.hdim());
-  // EXPECT_EQ(4, gru_layer_2.vdim());
-  for (unsigned int i = 0; i < gru_layer_2.GetParams().size(); i ++) {
-    gru_layer_2.GetParams()[i]->InitValues();
-  }
-  EXPECT_EQ (0.5, gru_layer_2.GetParams()[0]->data().cpu_data()[0]);
-}
-
-
-/*
-TEST_F(GRULayerTest, ComputeFeature) {
-  singa::CSVInputLayer in_layer_1;
-  singa::CSVInputLayer in_layer_2;
-
-  in_layer_1.Setup(in1_conf, std::vector<singa::Layer*> { });
-  in_layer_1.ComputeFeature(singa::kTrain, std::vector<singa::Layer*> { });
-  in_layer_2.Setup(in2_conf, std::vector<singa::Layer*>{ });
-  in_layer_2.ComputeFeature(singa::kTrain, std::vector<singa::Layer*> { });
-
-
-  singa::GRULayer gru_layer_1;
-  gru_layer_1.Setup(gru1_conf, std::vector<singa::Layer*>{&in_layer_1});
-  for (unsigned int i = 0; i < gru_layer_1.GetParams().size(); i ++) {
-    gru_layer_1.GetParams()[i]->InitValues();
-  }
-  gru_layer_1.ComputeFeature(singa::kTrain, std::vector<singa::Layer*>{&in_layer_1});
-  for (int i = 0; i < gru_layer_1.data(nullptr).count(); i ++) {
-    EXPECT_GT(0.000001,abs(0.204824-gru_layer_1.data(nullptr).cpu_data()[i]));
-  }
-
-  singa::GRULayer gru_layer_2;
-  gru_layer_2.Setup(gru2_conf, std::vector<singa::Layer*>{&in_layer_2, &gru_layer_1});
-  for (unsigned int i = 0; i < gru_layer_2.GetParams().size(); i ++) {
-    gru_layer_2.GetParams()[i]->InitValues();
-  }
-  gru_layer_2.ComputeFeature(singa::kTrain, std::vector<singa::Layer*>{&in_layer_2, &gru_layer_1});
-  for (int i = 0; i < gru_layer_2.data(nullptr).count(); i ++) {
-    EXPECT_GT(0.000001,abs(0.346753-gru_layer_2.data(nullptr).cpu_data()[i]));
-  }
-}
-
-TEST_F(GRULayerTest, ComputeGradient) {
-  singa::CSVInputLayer in_layer_1;
-  singa::CSVInputLayer in_layer_2;
-
-  in_layer_1.Setup(in1_conf, std::vector<singa::Layer*> { });
-  in_layer_1.ComputeFeature(singa::kTrain, std::vector<singa::Layer*> { });
-  in_layer_2.Setup(in2_conf, std::vector<singa::Layer*>{ });
-  in_layer_2.ComputeFeature(singa::kTrain, std::vector<singa::Layer*> { });
-
-
-  singa::GRULayer gru_layer_1;
-  gru_layer_1.Setup(gru1_conf, std::vector<singa::Layer*>{&in_layer_1});
-  for (unsigned int i = 0; i < gru_layer_1.GetParams().size(); i ++) {
-    gru_layer_1.GetParams()[i]->InitValues();
-  }
-  gru_layer_1.ComputeFeature(singa::kTrain, std::vector<singa::Layer*>{&in_layer_1});
-
-
-  singa::GRULayer gru_layer_2;
-  gru_layer_2.Setup(gru2_conf, std::vector<singa::Layer*>{&in_layer_2, &gru_layer_1});
-  for (unsigned int i = 0; i < gru_layer_2.GetParams().size(); i ++) {
-    gru_layer_2.GetParams()[i]->InitValues();
-  }
-  gru_layer_2.ComputeFeature(singa::kTrain, std::vector<singa::Layer*>{&in_layer_2, &gru_layer_1});
-
-  // For test purpose, we set dummy values for gru_layer_2.grad_
-  for (int i = 0; i < gru_layer_2.grad(nullptr).count(); i ++) {
-    gru_layer_2.mutable_grad(nullptr)->mutable_cpu_data()[i] = 1.0f;
-  }
-  gru_layer_2.ComputeGradient(singa::kTrain, std::vector<singa::Layer*>{&in_layer_2, &gru_layer_1});
-
-  gru_layer_1.ComputeGradient(singa::kTrain, std::vector<singa::Layer*>{&in_layer_1});
-
-}
-*/

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/dd1e4afa/src/test/test_kvfile.cc
----------------------------------------------------------------------
diff --git a/src/test/test_kvfile.cc b/src/test/test_kvfile.cc
deleted file mode 100644
index 5707ca9..0000000
--- a/src/test/test_kvfile.cc
+++ /dev/null
@@ -1,85 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#include <sys/stat.h>
-
-#include "gtest/gtest.h"
-#include "singa/io/kvfile.h"
-
-std::string key[] = {"firstkey",
-                     "secondkey",
-                     "3key",
-                     "key4",
-                     "key5"};
-std::string tuple[] = {"firsttuple",
-                       "2th-tuple",
-                       "thridtuple",
-                       "tuple4",
-                       "tuple5"};
-namespace singa {
-namespace io {
-TEST(KVFileTest, CreateKVFile) {
-  std::string path = "src/test/kvfile.bin";
-  KVFile kvfile(path, KVFile::kCreate, 50);
-  kvfile.Insert(key[0], tuple[0]);
-  kvfile.Insert(key[1], tuple[1]);
-  kvfile.Insert(key[2], tuple[2]);
-  kvfile.Flush();
-}
-
-TEST(KVFileTest, AppendKVFile) {
-  std::string path = "src/test/kvfile.bin";
-  KVFile kvfile(path, KVFile::kAppend, 50);
-  kvfile.Insert(key[3], tuple[3]);
-  kvfile.Insert(key[4], tuple[4]);
-  kvfile.Flush();
-}
-
-TEST(KVFileTest, CountKVFile) {
-  std::string path = "src/test/kvfile.bin";
-  KVFile kvfile(path, KVFile::kRead, 50);
-  int count = kvfile.Count();
-  ASSERT_EQ(5, count);
-}
-
-TEST(KVFileTest, ReadKVFile) {
-  std::string path = "src/test/kvfile.bin";
-  KVFile kvfile(path, KVFile::kRead, 50);
-  std::string k, t;
-  ASSERT_TRUE(kvfile.Next(&k, &t));
-  ASSERT_STREQ(key[0].c_str(), k.c_str());
-  ASSERT_STREQ(tuple[0].c_str(), t.c_str());
-  ASSERT_TRUE(kvfile.Next(&k, &t));
-  ASSERT_STREQ(key[1].c_str(), k.c_str());
-  ASSERT_STREQ(tuple[1].c_str(), t.c_str());
-  ASSERT_TRUE(kvfile.Next(&k, &t));
-  ASSERT_TRUE(kvfile.Next(&k, &t));
-  ASSERT_TRUE(kvfile.Next(&k, &t));
-  ASSERT_STREQ(key[4].c_str(), k.c_str());
-  ASSERT_STREQ(tuple[4].c_str(), t.c_str());
-  ASSERT_FALSE(kvfile.Next(&k, &t));
-  kvfile.SeekToFirst();
-  ASSERT_TRUE(kvfile.Next(&k, &t));
-  ASSERT_STREQ(key[0].c_str(), k.c_str());
-  ASSERT_STREQ(tuple[0].c_str(), t.c_str());
-}
-}  // namespace io
-}  // namespace singa

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/dd1e4afa/src/test/test_math.cc
----------------------------------------------------------------------
diff --git a/src/test/test_math.cc b/src/test/test_math.cc
deleted file mode 100644
index 9830703..0000000
--- a/src/test/test_math.cc
+++ /dev/null
@@ -1,1033 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-#include <thread>
-#include "gtest/gtest.h"
-#include "singa/utils/blob.h"
-#include "singa/utils/math_blob.h"
-#include "singa/utils/math_addr.h"
-#include "singa/utils/math_kernel.h"
-#include "singa/utils/singa_op.h"
-#include "singa/utils/context.h"
-#include "singa/utils/singleton.h"
-
-#ifdef USE_GPU
-#include <cuda_runtime.h>
-#include <cublas_v2.h>
-#endif
-
-using namespace singa;
-using namespace std;
-
-TEST(MathBlobTest, TestScale) {
-  Blob<float> *A = new Blob<float>(10);
-  Blob<float> *B = new Blob<float>(10);
-  A->SetValue(2);
-  B->SetValue(6);
-  Scale<float>(3.0, A);
-  ASSERT_EQ(A->check_equal(B), true);
-}
-
-TEST(MathBlobTest, TestAXPY) {
-  Blob<float> * A = new Blob<float>(10);
-  Blob<float> * B = new Blob<float>(10);
-  Blob<float> * C = new Blob<float>(10);
-  Blob<float> * D = new Blob<float>(10);
-  A->SetValue(2);
-  B->SetValue(3);
-  C->SetValue(7);
-  D->SetValue(2);
-  AXPY<float>(2.0, *A, B);
-  ASSERT_EQ(B->check_equal(C), true);
-  ASSERT_EQ(A->check_equal(D), true);
-}
-
-TEST(MathBlobTest, TestGEMV) {
-  float A[5][5] = {};
-  float AT[5][5] = {};
-  float B[5] = {};
-  float Res[5] = {};
-  for (int i = 0; i < 5; i++) {
-    for (int j = 0; j < 5; j++) {
-      A[i][j] = i * j + i - j;
-      AT[j][i] = i * j + i - j;
-    }
-    B[i] = 5*i + 3;
-    Res[i] = i;
-  }
-
-  Blob<float> * BlobA = new Blob<float>(5, 5);
-  Blob<float> * BlobAT = new Blob<float>(5, 5);
-  Blob<float> * BlobB = new Blob<float>(5);
-  Blob<float> * BlobAB = new Blob<float>(5);
-  Blob<float> * BlobATB = new Blob<float>(5);
-  Blob<float> * BlobRes = new Blob<float>(5);
-
-  BlobA->set_cpu_data(A[0]);
-  BlobAT->set_cpu_data(AT[0]);
-  BlobAT->set_transpose(true);
-  BlobB->set_cpu_data(B);
-  BlobAB->set_cpu_data(Res);
-  BlobATB->set_cpu_data(Res);
-
-  for (int i = 0; i < 5; i++) {
-    for (int j = 0; j < 5; j++) {
-      Res[i] += 2*A[i][j] * B[j];
-    }
-  }
-
-  BlobRes->set_cpu_data(Res);
-
-  GEMV<float>(2, 1, *BlobA, *BlobB, BlobAB);
-  GEMV<float>(2, 1, *BlobAT, *BlobB, BlobATB);
-
-  ASSERT_EQ(BlobAB->check_equal(BlobRes), true);
-  ASSERT_EQ(BlobATB->check_equal(BlobRes), true);
-}
-
-TEST(MathBlobTest, TestMVDot) {
-  float A[5][5] = {};
-  float AT[5][5] = {};
-  float B[5] = {};
-  float Res[5] = {};
-  for (int i = 0; i < 5; i++) {
-    for (int j = 0; j < 5; j++) {
-      A[i][j] = i * j + i - j;
-      AT[j][i] = i * j + i - j;
-    }
-    B[i] = 5*i -2;
-    Res[i] = 0;
-  }
-
-  Blob<float> * BlobA = new Blob<float>(5, 5);
-  Blob<float> * BlobAT = new Blob<float>(5, 5);
-  Blob<float> * BlobB = new Blob<float>(5);
-  Blob<float> * BlobAB = new Blob<float>(5);
-  Blob<float> * BlobATB = new Blob<float>(5);
-  Blob<float> * BlobRes = new Blob<float>(5);
-
-  BlobA->set_cpu_data(A[0]);
-  BlobAT->set_cpu_data(AT[0]);
-  BlobAT->set_transpose(true);
-  BlobB->set_cpu_data(B);
-  BlobAB->set_cpu_data(Res);
-  BlobATB->set_cpu_data(Res);
-
-  for (int i = 0; i < 5; i++) {
-    for (int j = 0; j < 5; j++) {
-      Res[i] += A[i][j] * B[j];
-    }
-  }
-
-  BlobRes->set_cpu_data(Res);
-
-  MVDot<float>(*BlobA, *BlobB, BlobAB);
-  MVDot<float>(*BlobAT, *BlobB, BlobATB);
-
-  const float * addrRes = BlobAB->cpu_data();
-  for (int i = 0; i < 5; i++) {
-    ASSERT_EQ(addrRes[i], Res[i]);
-  }
-  ASSERT_EQ(BlobAB->check_equal(BlobRes), true);
-  ASSERT_EQ(BlobAB->check_equal(BlobRes), true);
-  ASSERT_EQ(BlobATB->check_equal(BlobRes), true);
-}
-
-TEST(MathBlobTest, TestGEMM) {
-  float A[5][5] = {};
-  float AT[5][5] = {};
-  float B[5][5]= {};
-  float BT[5][5]= {};
-  float Res[5][5]= {};
-  for (int i = 0; i < 5; i++) {
-    for (int j = 0; j < 5; j++) {
-      A[i][j] = i * j + i - j;
-      AT[j][i] = i * j + i - j;
-      B[i][j] = - i * j + i * i - j * j;
-      BT[j][i] = - i * j + i * i - j * j;
-      Res[i][j] = i * j + i * i + j * j;
-    }
-  }
-
-  Blob<float> * BlobA = new Blob<float>(5, 5);
-  BlobA->set_cpu_data(A[0]);
-  Blob<float> * BlobAT = new Blob<float>(5, 5);
-  BlobAT->set_cpu_data(AT[0]);
-  BlobAT->set_transpose(true);
-  Blob<float> * BlobB = new Blob<float>(5, 5);
-  BlobB->set_cpu_data(B[0]);
-  Blob<float> * BlobBT = new Blob<float>(5, 5);
-  BlobBT->set_cpu_data(BT[0]);
-  BlobBT->set_transpose(true);
-  Blob<float> * BlobAB = new Blob<float>(5, 5);
-  BlobAB->set_cpu_data(Res[0]);
-  Blob<float> * BlobABT = new Blob<float>(5, 5);
-  BlobABT->set_cpu_data(Res[0]);
-  Blob<float> * BlobATB = new Blob<float>(5, 5);
-  BlobATB->set_cpu_data(Res[0]);
-  Blob<float> * BlobATBT = new Blob<float>(5, 5);
-  BlobATBT->set_cpu_data(Res[0]);
-
-  for (int i = 0; i < 5; i++) {
-    for (int j = 0; j < 5; j++) {
-      Res[i][j] *= 2;
-      for (int k = 0; k < 5; k++) {
-        Res[i][j] += 3 * A[i][k]*B[k][j];
-      }
-    }
-  }
-
-  Blob<float> * BlobRes = new Blob<float>(5, 5);
-  BlobRes->set_cpu_data(Res[0]);
-
-  GEMM<float>(3, 2, *BlobA, *BlobB, BlobAB);
-  GEMM<float>(3, 2, *BlobA, *BlobBT, BlobABT);
-  GEMM<float>(3, 2, *BlobAT, *BlobB, BlobATB);
-  GEMM<float>(3, 2, *BlobAT, *BlobBT, BlobATBT);
-
-  ASSERT_EQ(BlobAB->check_equal(BlobRes), true);
-  ASSERT_EQ(BlobATB->check_equal(BlobRes), true);
-  ASSERT_EQ(BlobABT->check_equal(BlobRes), true);
-  ASSERT_EQ(BlobATBT->check_equal(BlobRes), true);
-}
-
-TEST(MathBlobTest, TestMMDot) {
-  float A[5][5] = {};
-  float AT[5][5] = {};
-  float B[5][5]= {};
-  float BT[5][5]= {};
-  float Res[5][5]= {};
-  for (int i = 0; i < 5; i++) {
-    for (int j = 0; j < 5; j++) {
-      A[i][j] = i * j + i - j;
-      AT[j][i] = i * j + i - j;
-      B[i][j] = - i * j + i * i - j * j;
-      BT[j][i] = - i * j + i * i - j * j;
-      Res[i][j] = i * j + i * i + j * j;
-    }
-  }
-
-  Blob<float> * BlobA = new Blob<float>(5, 5);
-  BlobA->set_cpu_data(A[0]);
-  Blob<float> * BlobAT = new Blob<float>(5, 5);
-  BlobAT->set_cpu_data(AT[0]);
-  BlobAT->set_transpose(true);
-  Blob<float> * BlobB = new Blob<float>(5, 5);
-  BlobB->set_cpu_data(B[0]);
-  Blob<float> * BlobBT = new Blob<float>(5, 5);
-  BlobBT->set_cpu_data(BT[0]);
-  BlobBT->set_transpose(true);
-  Blob<float> * BlobAB = new Blob<float>(5, 5);
-  BlobAB->set_cpu_data(Res[0]);
-  Blob<float> * BlobABT = new Blob<float>(5, 5);
-  BlobABT->set_cpu_data(Res[0]);
-  Blob<float> * BlobATB = new Blob<float>(5, 5);
-  BlobATB->set_cpu_data(Res[0]);
-  Blob<float> * BlobATBT = new Blob<float>(5, 5);
-  BlobATBT->set_cpu_data(Res[0]);
-
-  for (int i = 0; i < 5; i++) {
-    for (int j = 0; j < 5; j++) {
-      Res[i][j] = 0;
-      for (int k = 0; k < 5; k++) {
-        Res[i][j] += A[i][k]*B[k][j];
-      }
-    }
-  }
-
-  Blob<float> * BlobRes = new Blob<float>(5, 5);
-  BlobRes->set_cpu_data(Res[0]);
-
-  MMDot<float>(*BlobA, *BlobB, BlobAB);
-  MMDot<float>(*BlobA, *BlobBT, BlobABT);
-  MMDot<float>(*BlobAT, *BlobB, BlobATB);
-  MMDot<float>(*BlobAT, *BlobBT, BlobATBT);
-
-  ASSERT_EQ(BlobAB->check_equal(BlobRes), true);
-  ASSERT_EQ(BlobATB->check_equal(BlobRes), true);
-  ASSERT_EQ(BlobABT->check_equal(BlobRes), true);
-  ASSERT_EQ(BlobATBT->check_equal(BlobRes), true);
-}
-
-TEST(MathBlobTest, TestVVDot) {
-  float A[10] = {};
-  float B[10] = {};
-  float prod = 0;
-  for (int i = 0; i < 10; i++) {
-    A[i] = i * i - 5* (i%2);
-    B[i] = 2* i * i - 3* (i%4);
-    prod += A[i] * B[i];
-  }
-
-  Blob<float> * BlobA = new Blob<float>(10);
-  BlobA->set_cpu_data(A);
-  Blob<float> * BlobB = new Blob<float>(10);
-  BlobB->set_cpu_data(B);
-  float blobprod = VVDot<float>(*BlobA, *BlobB);
-  ASSERT_EQ(blobprod, prod);
-}
-
-TEST(MathBlobTest, TestOuterProduct) {
-  float A[10] = {};
-  float B[10] = {};
-  float AB[10][10] = {};
-  for (int i = 0; i < 10; i++) {
-    A[i] = i * i - 5* (i%2);
-    B[i] = 2* i * i - 3* (i%4);
-  }
-  for (int i = 0; i < 10; i++) {
-    for (int j = 0; j < 10; j++) {
-      AB[i][j] = A[i]*B[j];
-    }
-  }
-  Blob<float> * BlobA = new Blob<float>(10);
-  BlobA->set_cpu_data(A);
-  Blob<float> * BlobB = new Blob<float>(10);
-  BlobB->set_cpu_data(B);
-  Blob<float> * BlobAB = new Blob<float>(10, 10);
-  // BlobAB->SetValue(3);
-  Blob<float> * BlobRes = new Blob<float>(10, 10);
-  BlobRes->set_cpu_data(AB[0]);
-  OuterProduct<float>(*BlobA, *BlobB, BlobAB);
-
-  ASSERT_EQ(BlobAB->check_equal(BlobRes), true);
-}
-
-TEST(MathBlobTest, TestMapAB) {
-  float A[10] = {};
-  float Res[10] = {};
-  for (int i = 0; i < 10; i++) {
-    A[i] = i * i - 5* (i%2);
-    Res[i] = A[i] * A[i];
-  }
-  Blob<float> * BlobA = new Blob<float>(10);
-  BlobA->set_cpu_data(A);
-  Blob<float> * BlobB = new Blob<float>(10);
-  Blob<float> * BlobRes = new Blob<float>(10);
-  BlobRes->set_cpu_data(Res);
-  Map<singa::op::Square<float>, float>(*BlobA, BlobB);
-  ASSERT_EQ(BlobB->check_equal(BlobRes), true);
-}
-
-TEST(MathBlobTest, TestMapABC) {
-  float A[10] = {};
-  float B[10] = {};
-  float Res[10] = {};
-  for (int i = 0; i < 10; i++) {
-    A[i] = i * i - 5* (i%2);
-    B[i] = 2* i * i - 3* (i%4);
-    Res[i] = A[i] * B[i];
-  }
-  Blob<float> * BlobA = new Blob<float>(10);
-  BlobA->set_cpu_data(A);
-  Blob<float> * BlobB = new Blob<float>(10);
-  BlobB->set_cpu_data(B);
-  Blob<float> * BlobC = new Blob<float>(10);
-  Blob<float> * BlobRes = new Blob<float>(10);
-  BlobRes->set_cpu_data(Res);
-  Map<singa::op::Mult<float>, float>(*BlobA, *BlobB, BlobC);
-  ASSERT_EQ(BlobC->check_equal(BlobRes), true);
-}
-
-TEST(MathBlobTest, TestCopy) {
-  Blob<float> *BlobA = new Blob<float>(10);
-  Blob<float> *BlobB = new Blob<float>(10);
-  float A[10] = {};
-  for (int i = 0; i < 10; i++) {
-    A[i] = i * i - 5* (i%2);
-  }
-  BlobA->set_cpu_data(A);
-  Copy<float>(*BlobA, BlobB);
-  ASSERT_EQ(BlobA->check_equal(BlobB), true);
-}
-
-TEST(MathBlobTest, TestAdd) {
-  Blob<float> *A = new Blob<float>(10);
-  Blob<float> *B = new Blob<float>(10);
-  Blob<float> *C = new Blob<float>(10);
-  Blob<float> *D = new Blob<float>(10);
-  A->SetValue(5);
-  B->SetValue(6);
-  D->SetValue(11);
-  Add<float>(*A, *B, C);
-  ASSERT_EQ(C->check_equal(D), true);
-}
-
-TEST(MathBlobTest, TestSub) {
-  Blob<float> *A = new Blob<float>(10);
-  Blob<float> *B = new Blob<float>(10);
-  Blob<float> *C = new Blob<float>(10);
-  Blob<float> *D = new Blob<float>(10);
-  A->SetValue(5);
-  B->SetValue(6);
-  D->SetValue(-1);
-  Sub<float>(*A, *B, C);
-  ASSERT_EQ(C->check_equal(D), true);
-}
-
-TEST(MathBlobTest, TestMVAddCol) {
-  Blob<float> *BlobA = new Blob<float>(10);
-  Blob<float> *BlobB = new Blob<float>(10, 10);
-  Blob<float> *BlobBT = new Blob<float>(10, 10);
-  Blob<float> *BlobRes = new Blob<float>(10, 10);
-  Blob<float> *BlobResT = new Blob<float>(10, 10);
-
-  float A[10] = {};
-  float B[10][10] = {};
-  float BT[10][10] = {};
-  for (int i = 0; i < 10; i++) {
-    A[i] = 5*i -2;
-    for (int j = 0; j < 10; j++) {
-      B[i][j] = i * j + i - j;
-      BT[j][i] = i * j + i - j;
-    }
-  }
-
-  BlobA->set_cpu_data(A);
-  BlobB->set_cpu_data(B[0]);
-  BlobBT->set_cpu_data(BT[0]);
-  BlobBT->set_transpose(true);
-
-  for (int i = 0; i < 10; i++) {
-    for (int j = 0; j < 10; j++) {
-      B[i][j] = 2.0 * A[i] + 3.0 * B[i][j];
-      BT[j][i] = 2.0 * A[i] + 3.0 * BT[j][i];
-    }
-  }
-
-  BlobRes->set_cpu_data(B[0]);
-  BlobResT->set_cpu_data(BT[0]);
-  BlobResT->set_transpose(true);
-
-  MVAddCol<float>(2.0, 3.0, *BlobA, BlobB);
-  MVAddCol<float>(2.0, 3.0, *BlobA, BlobBT);
-
-  ASSERT_EQ(BlobB->check_equal(BlobRes), true);
-  ASSERT_EQ(BlobBT->check_equal(BlobResT), true);
-}
-
-TEST(MathBlobTest, TestMVAddRow) {
-  Blob<float> *BlobA = new Blob<float>(10);
-  Blob<float> *BlobB = new Blob<float>(10, 10);
-  Blob<float> *BlobBT = new Blob<float>(10, 10);
-  Blob<float> *BlobRes = new Blob<float>(10, 10);
-  Blob<float> *BlobResT = new Blob<float>(10, 10);
-
-  float A[10] = {};
-  float B[10][10] = {};
-  float BT[10][10] = {};
-  for (int i = 0; i < 10; i++) {
-    A[i] = 5*i -2;
-    for (int j = 0; j < 10; j++) {
-      B[i][j] = i * j + i - j;
-      BT[j][i] = i * j + i - j;
-    }
-  }
-
-  BlobA->set_cpu_data(A);
-  BlobB->set_cpu_data(B[0]);
-  BlobBT->set_cpu_data(BT[0]);
-  BlobBT->set_transpose(true);
-
-  for (int i = 0; i < 10; i++) {
-    for (int j = 0; j < 10; j++) {
-      B[j][i] = 2.0 * A[i] + 3.0 * B[j][i];
-      BT[i][j] = 2.0 * A[i] + 3.0 * BT[i][j];
-    }
-  }
-
-  BlobRes->set_cpu_data(B[0]);
-  BlobResT->set_cpu_data(BT[0]);
-  BlobResT->set_transpose(true);
-
-  MVAddRow<float>(2.0, 3.0, *BlobA, BlobB);
-  MVAddRow<float>(2.0, 3.0, *BlobA, BlobBT);
-
-  ASSERT_EQ(BlobB->check_equal(BlobRes), true);
-  ASSERT_EQ(BlobBT->check_equal(BlobResT), true);
-}
-
-TEST(MathBlobTest, TestRepmatCol) {
-  Blob<float> *BlobA = new Blob<float>(10);
-  Blob<float> *BlobB = new Blob<float>(10, 10);
-  Blob<float> *BlobBT = new Blob<float>(10, 10);
-  Blob<float> *BlobRes = new Blob<float>(10, 10);
-  Blob<float> *BlobResT = new Blob<float>(10, 10);
-
-  float A[10] = {};
-  float B[10][10] = {};
-  float BT[10][10] = {};
-  for (int i = 0; i < 10; i++) {
-    A[i] = 5*i -2;
-    for (int j = 0; j < 10; j++) {
-      B[i][j] = A[i];
-      BT[j][i] = A[i];
-    }
-  }
-
-  BlobA->set_cpu_data(A);
-  BlobBT->set_transpose(true);
-
-  BlobRes->set_cpu_data(B[0]);
-  BlobResT->set_cpu_data(BT[0]);
-  BlobResT->set_transpose(true);
-
-  RepmatCol<float>(*BlobA, BlobB);
-  RepmatCol<float>(*BlobA, BlobBT);
-
-  ASSERT_EQ(BlobB->check_equal(BlobRes), true);
-  ASSERT_EQ(BlobBT->check_equal(BlobResT), true);
-}
-
-TEST(MathBlobTest, TestRepmatRow) {
-  Blob<float> *BlobA = new Blob<float>(10);
-  Blob<float> *BlobB = new Blob<float>(10, 10);
-  Blob<float> *BlobBT = new Blob<float>(10, 10);
-  Blob<float> *BlobRes = new Blob<float>(10, 10);
-  Blob<float> *BlobResT = new Blob<float>(10, 10);
-
-  float A[10] = {};
-  float B[10][10] = {};
-  float BT[10][10] = {};
-  for (int i = 0; i < 10; i++) {
-    A[i] = 5*i -2;
-    for (int j = 0; j < 10; j++) {
-      B[j][i] = A[i];
-      BT[i][j] = A[i];
-    }
-  }
-
-  BlobA->set_cpu_data(A);
-  BlobBT->set_transpose(true);
-
-  BlobRes->set_cpu_data(B[0]);
-  BlobResT->set_cpu_data(BT[0]);
-  BlobResT->set_transpose(true);
-
-  RepmatRow<float>(*BlobA, BlobB);
-  RepmatRow<float>(*BlobA, BlobBT);
-
-  ASSERT_EQ(BlobB->check_equal(BlobRes), true);
-  ASSERT_EQ(BlobBT->check_equal(BlobResT), true);
-}
-
-TEST(MathBlobTest, TestMVSumCol) {
-  Blob<float> *BlobA = new Blob<float>(10);
-  Blob<float> *BlobACopy = new Blob<float>(10);
-  Blob<float> *BlobB = new Blob<float>(10, 10);
-  Blob<float> *BlobBT = new Blob<float>(10, 10);
-  Blob<float> *BlobRes = new Blob<float>(10);
-
-  float A[10] = {};
-  float B[10][10] = {};
-  float BT[10][10] = {};
-  for (int i = 0; i < 10; i++) {
-    A[i] = 5*i -2;
-    for (int j = 0; j < 10; j++) {
-      B[i][j] = i * j + i - j;
-      BT[j][i] = i * j + i - j;
-    }
-  }
-
-  BlobA->set_cpu_data(A);
-  BlobACopy->set_cpu_data(A);
-  BlobB->set_cpu_data(B[0]);
-  BlobBT->set_cpu_data(BT[0]);
-  BlobBT->set_transpose(true);
-
-  for (int i = 0; i < 10; i++) {
-    A[i] *= 2.0;
-    for (int j = 0; j < 10; j++) {
-      A[i] += 3.0 * B[i][j];
-    }
-  }
-  BlobRes->set_cpu_data(A);
-
-  MVSumCol<float>(2.0, 3.0, *BlobB, BlobA);
-  MVSumCol<float>(2.0, 3.0, *BlobBT, BlobACopy);
-
-  ASSERT_EQ(BlobA->check_equal(BlobRes), true);
-  ASSERT_EQ(BlobACopy->check_equal(BlobRes), true);
-}
-
-TEST(MathBlobTest, TestMVSumRow) {
-  Blob<float> *BlobA = new Blob<float>(10);
-  Blob<float> *BlobACopy = new Blob<float>(10);
-  Blob<float> *BlobB = new Blob<float>(10, 10);
-  Blob<float> *BlobBT = new Blob<float>(10, 10);
-  Blob<float> *BlobRes = new Blob<float>(10);
-
-  float A[10] = {};
-  float B[10][10] = {};
-  float BT[10][10] = {};
-  for (int i = 0; i < 10; i++) {
-    A[i] = 5*i -2;
-    for (int j = 0; j < 10; j++) {
-      B[j][i] = i * j + i - j;
-      BT[i][j] = i * j + i - j;
-    }
-  }
-
-  BlobA->set_cpu_data(A);
-  BlobACopy->set_cpu_data(A);
-  BlobB->set_cpu_data(B[0]);
-  BlobBT->set_cpu_data(BT[0]);
-  BlobBT->set_transpose(true);
-
-  for (int i = 0; i < 10; i++) {
-    A[i] *= 2.0;
-    for (int j = 0; j < 10; j++) {
-      A[i] += 3.0 * B[j][i];
-    }
-  }
-  BlobRes->set_cpu_data(A);
-
-  MVSumRow<float>(2.0, 3.0, *BlobB, BlobA);
-  MVSumRow<float>(2.0, 3.0, *BlobBT, BlobACopy);
-
-  ASSERT_EQ(BlobA->check_equal(BlobRes), true);
-  ASSERT_EQ(BlobACopy->check_equal(BlobRes), true);
-}
-
-TEST(MathBlobTest, TestASum) {
-  float A[10] = {};
-  for (int i = 0; i < 10; i++) {
-    A[i] = ((i % 3) -1) * i;
-  }
-
-  Blob<float> *BlobA = new Blob<float>(10);
-  BlobA->set_cpu_data(A);
-
-  float BlobRes = Asum<float>(*BlobA);
-  float res = cblas_sasum(10, A, 1) / 10;
-
-  ASSERT_EQ(BlobRes, res);
-}
-
-TEST(MathTest, TestGemmCPU) {
-  float A[3][2] = {};
-  float B[3][2] = {};
-  float C[2][2] = {};
-  for (int i = 0; i < 3; i++)
-    for (int j = 0; j < 2; j++) {
-      A[i][j] = i+j;
-      B[i][j] = i+j - i*j;
-    }
-  cpu_gemm(A[0], B[0], 2, 2, 3 , 1.0f, 0.0f, true, false, C[0]);
-  float D[2][2] = {};
-  for (int i = 0; i < 2; i++)
-    for (int j = 0; j < 2; j++) {
-      D[i][j] = 0;
-      for (int k = 0; k < 3; k++)
-        D[i][j] += A[k][i]*B[k][j];
-    }
-    for (int i = 0; i < 2; i++)
-      for (int j = 0; j < 2; j++) {
-      ASSERT_EQ(C[i][j], D[i][j]);
-    }
-}
-
-TEST(MathTest, TestGemvCPU) {
-  float A[4][3] = {};
-  float B[4]= {};
-  float C[3] = {};
-  float D[3] = {};
-
-  for (int i = 0; i < 3; i++) {
-    for (int j = 0; j < 4; j++) {
-      A[j][i] = i-j + i*j;
-    }
-  }
-
-  for (int i = 0; i < 4; i++)B[i] = i;
-  for (int i = 0; i < 3; i++)C[i] = 10;
-  cpu_gemv(A[0], B, 4, 3, 1.0f, 1.0f, true, C);
-
-  for (int i = 0; i < 3; i++) {
-    for (int j = 0; j < 4; j++) {
-      D[i] += A[j][i]*B[j];
-    }
-  }
-  for (int i = 0; i < 3; i++) {
-    ASSERT_EQ(C[i], D[i]+10);
-  }
-}
-
-
-/*
-TEST(MathTest, TestAxpyCPU) {
-  float A[4][3] = {};
-  float C[4][3] = {};
-  float B[3][4] = {};
-  float D[3][4] = {};
-
-  for (int i = 0; i < 4; i++) {
-    for (int j = 0; j < 3; j++) {
-      A[i][j] = i-j + i*j;
-      B[j][i] = i-j + i*j;
-      C[i][j] = A[i][j];
-      D[j][i] = B[j][i];
-    }
-  }
-
-  cpu_axpy(A[0], 12, 2.0f, B[0]);
-  for (int i = 0; i < 12; i++) {
-    D[i / 4][i % 4] += 2*C[i / 3][i % 3];
-  }
-
-  for (int i = 0; i < 3; i++) {
-    for (int j = 0; j < 4; j++) {
-      ASSERT_EQ(B[i][j], D[i][j]);
-    }
-  }
-}
-
-TEST(MathTest, TestEopCPU) {
-
-  float A[10] = {};
-  float B[10] = {};
-  float C[10] = {};
-  float O[10] = {};
-
-  for (int i = 0; i < 10; i++) {
-    A[i] = i;
-    B[i] = -i;
-    C[i] = i;
-  }
-  cpu_e_f<singa::op::Set>(5, 15.0f, O, O);
-  for (int i = 0; i < 5; i++) {
-    ASSERT_EQ(O[i]-15,0);
-  }
-  for (int i = 5; i < 10; i++) {
-    ASSERT_EQ(O[i],0);
-  }
-}
-*/
-
-#ifdef USE_GPU
-TEST(MathTest, TestGemmGPU) {
-  float A[3][2] = {};
-  float B[3][2] = {};
-  float C[2][2] = {};
-  for (int i = 0; i < 3; i++) {
-    for (int j = 0; j < 2; j++) {
-      A[i][j] = i+j;
-      B[i][j] = i+j - i*j;
-    }
-  }
-
-  float* A_gpu = NULL;
-  float* B_gpu = NULL;
-  float* C_gpu = NULL;
-
-  cudaMalloc(reinterpret_cast<void**>(&A_gpu), 3*2*sizeof(float));
-  cudaMalloc(reinterpret_cast<void**>(&B_gpu), 3*2*sizeof(float));
-  cudaMalloc(reinterpret_cast<void**>(&C_gpu), 2*2*sizeof(float));
-
-  cudaMemcpy(A_gpu, A, 3*2*sizeof(float), cudaMemcpyHostToDevice);
-  cudaMemcpy(B_gpu, B, 3*2*sizeof(float), cudaMemcpyHostToDevice);
-  auto context = Singleton<Context>::Instance();
-  context->SetupDevice(std::this_thread::get_id(), 0);
-  gpu_gemm<float>(context->cublas_handle(0), A_gpu, B_gpu, 2, 2, 3 , 1, 0, true,
-                  false, C_gpu);
-
-  cudaMemcpy(C, C_gpu, 2*2*sizeof(float), cudaMemcpyDeviceToHost);
-
-  float D[2][2] = {};
-  for (int i = 0; i < 2; i++) {
-    for (int j = 0; j < 2; j++) {
-      D[i][j] = 0;
-      for (int k = 0; k < 3; k++) {
-        D[i][j] += A[k][i]*B[k][j];
-      }
-    }
-  }
-
-  for (int i = 0; i < 2; i++) {
-    for (int j = 0; j < 2; j++) {
-      ASSERT_EQ(C[i][j], D[i][j]);
-    }
-  }
-
-  cudaFree(A_gpu);
-  cudaFree(B_gpu);
-  cudaFree(C_gpu);
-}
-
-
-TEST(MathTest, TestGemvGPU) {
-  float A[4][3] = {};
-  float B[4]= {};
-  float C[3] = {};
-  float D[3] = {};
-
-  for (int i = 0; i < 4; i++) {
-    for (int j = 0; j < 3; j++) {
-      A[i][j] = i-j + i*j;
-    }
-  }
-
-  for (int i = 0; i < 4; i++) B[i] = i;
-  for (int i = 0; i < 3; i++) C[i] = 10;
-
-  float* A_gpu = NULL;
-  float* B_gpu = NULL;
-  float* C_gpu = NULL;
-
-  cudaMalloc(reinterpret_cast<void**>(&A_gpu), 4*3*sizeof(float));
-  cudaMalloc(reinterpret_cast<void**>(&B_gpu), 4*sizeof(float));
-  cudaMalloc(reinterpret_cast<void**>(&C_gpu), 3*sizeof(float));
-
-  cudaMemcpy(A_gpu, A, 4*3*sizeof(float), cudaMemcpyHostToDevice);
-  cudaMemcpy(B_gpu, B, 4*sizeof(float), cudaMemcpyHostToDevice);
-  cudaMemcpy(C_gpu, C, 3*sizeof(float), cudaMemcpyHostToDevice);
-  auto context = Singleton<Context>::Instance();
-  context->SetupDevice(std::this_thread::get_id(), 0);
-  gpu_gemv<float>(context->cublas_handle(0), A_gpu, B_gpu, 4, 3, 1.0f, 1.0f,
-                  true, C_gpu);
-
-  cudaMemcpy(C, C_gpu, 3*sizeof(float), cudaMemcpyDeviceToHost);
-
-  for (int i = 0; i < 3; i++) {
-    for (int j = 0; j < 4; j++) {
-      D[i] += A[j][i]*B[j];
-    }
-  }
-
-  for (int i = 0; i < 3; i++) {
-    ASSERT_EQ(C[i], D[i]+10);
-  }
-
-  cudaFree(A_gpu);
-  cudaFree(B_gpu);
-  cudaFree(C_gpu);
-}
-
-
-/*
-TEST(MathTest, TestAxpyGPU) {
-  float A[4][3] = {};
-  float C[4][3] = {};
-  float B[3][4] = {};
-  float D[3][4] = {};
-
-  for (int i = 0; i < 4; i++)
-  {
-    for (int j = 0; j < 3; j++)
-    {
-      A[i][j] = i-j + i*j;
-      B[j][i] = i-j + i*j;
-      C[i][j] = A[i][j];
-      D[j][i] = B[j][i];
-    }
-  }
-
-  float* A_gpu=NULL;
-  float* B_gpu=NULL;
-
-  cudaMalloc((void**)&A_gpu, 4*3*sizeof(float));
-  cudaMalloc((void**)&B_gpu, 3*4*sizeof(float));
-
-  cudaMemcpy(A_gpu,A,4*3*sizeof(float),cudaMemcpyHostToDevice);
-  cudaMemcpy(B_gpu,B,3*4*sizeof(float),cudaMemcpyHostToDevice);
-
-  gpu_axpy<float>(A_gpu, 12, 2, B_gpu);
-
-  cudaMemcpy(A,A_gpu,4*3*sizeof(float),cudaMemcpyDeviceToHost);
-  cudaMemcpy(B,B_gpu,3*4*sizeof(float),cudaMemcpyDeviceToHost);
-
-  //for (int i = 0; i < 12; i++)D[0][i] += 2*C[0][i];
-
-  for (int i = 0; i < 4; i++)
-  {
-    for (int j = 0; j < 3; j++)
-    {
-      D[i][j] += C[i][j];
-      ASSERT_EQ(B[i][j],D[i][j]);
-    }
-  }
-
-  cudaFree(A_gpu);
-  cudaFree(B_gpu);
-}
-*/
-
-
-TEST(MathTest, TestDotGPU) {
-  float A[12];
-  float B[12];
-  for (int i = 0; i < 12; i++) {
-    A[i] = i - 1;
-    B[i] = i + 1;
-  }
-
-  float* A_gpu = NULL;
-  float* B_gpu = NULL;
-
-  cudaMalloc(reinterpret_cast<void**>(&A_gpu), 12*sizeof(float));
-  cudaMalloc(reinterpret_cast<void**>(&B_gpu), 12*sizeof(float));
-
-  cudaMemcpy(A_gpu, A, 12*sizeof(float), cudaMemcpyHostToDevice);
-  cudaMemcpy(B_gpu, B, 12*sizeof(float), cudaMemcpyHostToDevice);
-  auto context = Singleton<Context>::Instance();
-  context->SetupDevice(std::this_thread::get_id(), 0);
-  float gpu_ret = gpu_dot<float>(context->cublas_handle(0), 12, A_gpu, B_gpu);
-
-  float cpu_ret = 0.0f;
-  for (int i = 0; i < 12; i++) {
-    cpu_ret += A[i] * B[i];
-  }
-
-  ASSERT_EQ(gpu_ret, cpu_ret);
-
-  cudaFree(A_gpu);
-  cudaFree(B_gpu);
-}
-
-TEST(MathTest, TestSingaSumRowGPU) {
-  float A[3][4];
-  float B[4];
-  float C[4];
-
-  for (int i = 0; i < 3; i++) {
-    for (int j = 0; j < 4; j++) {
-      // A[i][j] = i + j;
-      A[i][j] = 1.0f;
-    }
-  }
-
-  for (int i = 0; i < 4; i++) {
-    B[i] = 0.0f;
-    C[i] = 0.0f;
-  }
-
-  float* A_gpu = NULL;
-  float* B_gpu = NULL;
-
-  cudaMalloc(reinterpret_cast<void**>(&A_gpu), 12*sizeof(float));
-  cudaMalloc(reinterpret_cast<void**>(&B_gpu), 4*sizeof(float));
-  cudaMemcpy(A_gpu, A, 12*sizeof(float), cudaMemcpyHostToDevice);
-  singa_gpu_sum_row(A_gpu, B_gpu, 3, 4, 4);
-
-  cudaMemcpy(B, B_gpu, 4*sizeof(float), cudaMemcpyDeviceToHost);
-
-  for (int i = 0; i < 4; i++) {
-    for (int j = 0; j < 3; j++) {
-      C[i] += A[j][i];
-    }
-  }
-
-  for (int i = 0; i < 4; i++) {
-    ASSERT_EQ(B[i], C[i]);
-  }
-
-  cudaFree(A_gpu);
-  cudaFree(B_gpu);
-}
-
-TEST(MathTest, TestSingaAddVecRowGPU) {
-  float A[3][4];
-  float B[4];
-  float C[3][4];
-  float D[3][4];
-
-  for (int i = 0; i < 4; i++) {
-    B[i] = i;
-  }
-
-  for (int i = 0; i < 3; i++) {
-    for (int j = 0; j < 4; j++) {
-      A[i][j] = i + j;
-      D[i][j] = A[i][j] + B[j];
-    }
-  }
-
-  float* A_gpu = NULL;
-  float* B_gpu = NULL;
-  float* C_gpu = NULL;
-
-  cudaMalloc(reinterpret_cast<void**>(&A_gpu), 3*4*sizeof(float));
-  cudaMalloc(reinterpret_cast<void**>(&B_gpu), 4*sizeof(float));
-  cudaMalloc(reinterpret_cast<void**>(&C_gpu), 3*4*sizeof(float));
-  cudaMemcpy(A_gpu, A, 3*4*sizeof(float), cudaMemcpyHostToDevice);
-  cudaMemcpy(B_gpu, B, 4*sizeof(float), cudaMemcpyHostToDevice);
-
-  singa_gpu_add_vec_row(B_gpu, A_gpu, C_gpu, 3, 4, 4);
-
-  cudaMemcpy(C, C_gpu, 3*4*sizeof(float), cudaMemcpyDeviceToHost);
-
-  for (int i = 0; i < 3; i++) {
-    for (int j = 0; j < 4; j++) {
-      ASSERT_EQ(C[i][j], D[i][j]);
-    }
-  }
-
-  cudaFree(A_gpu);
-  cudaFree(B_gpu);
-  cudaFree(C_gpu);
-}
-
-
-TEST(MathTest, TestSingaSetValueGPU) {
-  float A[3][4];
-  float* A_gpu = NULL;
-
-  cudaMalloc(reinterpret_cast<void**>(&A_gpu), 3*4*sizeof(float));
-
-  cudaMemcpy(A_gpu, A, 3*4*sizeof(float), cudaMemcpyHostToDevice);
-
-  singa_gpu_set_value(A_gpu, 4.0, 3*4);
-
-  cudaMemcpy(A, A_gpu, 3*4*sizeof(float), cudaMemcpyDeviceToHost);
-
-  for (int i = 0; i < 3; i++) {
-    for (int j = 0; j < 4; j++) {
-      ASSERT_EQ(A[i][j], 4.0f);
-    }
-  }
-
-  cudaFree(A_gpu);
-}
-
-
-TEST(MathTest, TestEopGPU) {
-  float A[10] = {};
-  float B[10] = {};
-
-  for (int i = 0; i < 10; i++) {
-    A[i] = i;
-    B[i] = -i;
-  }
-
-  float* A_gpu = NULL;
-  float* B_gpu = NULL;
-
-  cudaMalloc(reinterpret_cast<void**>(&A_gpu), 10*sizeof(float));
-  cudaMalloc(reinterpret_cast<void**>(&B_gpu), 10*sizeof(float));
-
-  cudaMemcpy(A_gpu, A, 10*sizeof(float), cudaMemcpyHostToDevice);
-  cudaMemcpy(B_gpu, B, 10*sizeof(float), cudaMemcpyHostToDevice);
-
-  gpu_e_f<singa::op::Sigmoid<float>, float>(10, A_gpu, B_gpu);
-
-  cudaFree(A_gpu);
-  cudaFree(B_gpu);
-}
-#endif  // USE_GPU

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/dd1e4afa/src/test/test_msg.cc
----------------------------------------------------------------------
diff --git a/src/test/test_msg.cc b/src/test/test_msg.cc
deleted file mode 100644
index db83b1c..0000000
--- a/src/test/test_msg.cc
+++ /dev/null
@@ -1,102 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#include "gtest/gtest.h"
-#include "singa/comm/msg.h"
-using namespace singa;
-TEST(MsgTest, AddrTest) {
-  int src_grp = 1, src_worker = 2;
-  int dst_grp = 0, dst_server = 1;
-  int src_addr = Addr(src_grp, src_worker, 0);
-  int dst_addr = Addr(dst_grp, dst_server, 1);
-  Msg msg(src_addr, dst_addr);
-  msg.set_trgt(123, -1);
-  ASSERT_EQ(AddrGrp(msg.src()), src_grp);
-  ASSERT_EQ(AddrID(msg.src()), src_worker);
-  ASSERT_EQ(AddrType(msg.src()), 0);
-
-  msg.SwapAddr();
-  ASSERT_EQ(AddrGrp(msg.src()), dst_grp);
-  ASSERT_EQ(AddrID(msg.src()), dst_server);
-  ASSERT_EQ(AddrType(msg.src()), 1);
-  ASSERT_EQ(msg.trgt_val(), 123);
-  ASSERT_EQ(msg.trgt_version(), -1);
-}
-
-TEST(MsgTest, AddFrameTest) {
-  int buf[5] = {1, 2, 3, 4, 5};
-  Msg msg;
-  msg.AddFrame("abcdefg", 7);
-  msg.AddFrame(buf, sizeof(int) * 5);
-
-  msg.FirstFrame();
-  char* str = msg.FrameStr();
-  ASSERT_STREQ(str, "abcdefg");
-  delete str;
-  ASSERT_EQ(msg.NextFrame(), true);
-  int *val = static_cast<int*>(msg.FrameData());
-  ASSERT_EQ(val[3], 4);
-  ASSERT_EQ(msg.NextFrame(), false);
-
-  msg.FirstFrame();
-  str = msg.FrameStr();
-  ASSERT_STREQ(str, "abcdefg");
-  msg.LastFrame();
-  val = static_cast<int*>(msg.FrameData());
-  ASSERT_EQ(val[2], 3);
-}
-
-TEST(MsgTest, AddFormatFrame) {
-  int x = 5;
-  Msg msg;
-  msg.AddFormatFrame("i", 12);
-  msg.AddFormatFrame("f", 10.f);
-  msg.AddFormatFrame("s", "abc");
-  msg.AddFormatFrame("p", &x);
-  msg.AddFormatFrame("isfp", 12, "abc", 10.f, &x);
-
-  msg.FirstFrame();
-  int y;
-  msg.ParseFormatFrame("i", &y);
-  ASSERT_EQ(y, 12);
-  ASSERT_EQ(msg.NextFrame(), true);
-
-  float z;
-  msg.ParseFormatFrame("f", &z);
-  ASSERT_EQ(z, 10.f);
-  ASSERT_EQ(msg.NextFrame(), true);
-
-  char buf[10];
-  msg.ParseFormatFrame("s", buf);
-  ASSERT_STREQ(buf, "abc");
-  ASSERT_EQ(msg.NextFrame(), true);
-
-  int *p;
-  msg.ParseFormatFrame("p", &p);
-  ASSERT_EQ(p, &x);
-  ASSERT_EQ(msg.NextFrame(), true);
-
-  msg.ParseFormatFrame("isfp", &y, buf, &z, &p);
-  ASSERT_EQ(y, 12);
-  ASSERT_STREQ(buf, "abc");
-  ASSERT_EQ(z, 10.f);
-  ASSERT_EQ(p, &x);
-}

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/dd1e4afa/src/test/test_neuralnet.cc
----------------------------------------------------------------------
diff --git a/src/test/test_neuralnet.cc b/src/test/test_neuralnet.cc
deleted file mode 100644
index 3ab197b..0000000
--- a/src/test/test_neuralnet.cc
+++ /dev/null
@@ -1,116 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-* 
-*   http://www.apache.org/licenses/LICENSE-2.0
-* 
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#include "gtest/gtest.h"
-#include "singa/driver.h"
-#include "singa/neuralnet/connection_layer.h"
-#include "singa/neuralnet/neuralnet.h"
-#include "singa/neuralnet/neuron_layer.h"
-
-using namespace singa;
-
-const int N = 10;  // size of dim 0
-const int M = 20;  // size of dim 1
-const int K = 2;  // size of partitions
-
-TEST(NeuralNet, RegisterLayers) {
-  Driver driver;
-  driver.RegisterLayer<DummyLayer, int>(kDummy);
-  driver.RegisterLayer<SliceLayer, int>(kSlice);
-  driver.RegisterLayer<SplitLayer, int>(kSplit);
-  driver.RegisterLayer<ConcateLayer, int>(kConcate);
-  driver.RegisterLayer<BridgeSrcLayer, int>(kBridgeSrc);
-  driver.RegisterLayer<BridgeDstLayer, int>(kBridgeDst);
-}
-
-TEST(NeuralNet, AddModelSplitLayers) {
-  NetProto proto;
-  // use dummy as input layer
-  LayerProto* proto_in = proto.add_layer();
-  proto_in->set_name("dummy_input");
-  proto_in->set_type(kDummy);
-  proto_in->mutable_dummy_conf()->set_input(true);
-  proto_in->mutable_dummy_conf()->add_shape(N);
-  proto_in->mutable_dummy_conf()->add_shape(M);
-  // use 2 dummy neuron layers
-  for (int i = 0; i < 2; ++i) {
-    LayerProto* proto_neuron = proto.add_layer();
-    proto_neuron->set_name("dummy_neuron_" + std::to_string(i));
-    proto_neuron->set_type(kDummy);
-    proto_neuron->add_srclayers("dummy_input");
-  }
-  // use dummy as output layer
-  for (int i = 0; i < 2; ++i) {
-    LayerProto* proto_out = proto.add_layer();
-    proto_out->set_name("dummy_output" + std::to_string(i));
-    proto_out->set_type(kDummy);
-    proto_out->mutable_dummy_conf()->set_output(true);
-    proto_out->add_srclayers("dummy_neuron_" + std::to_string(i));
-  }
-  NeuralNet::Create(proto, kTrain, K);
-}
-
-TEST(NeuralNet, DirectConnection) {
-  NetProto proto;
-  // use dummy as input layer
-  LayerProto* proto_in = proto.add_layer();
-  proto_in->set_name("dummy_input");
-  proto_in->set_type(kDummy);
-  proto_in->mutable_dummy_conf()->set_input(true);
-  proto_in->mutable_dummy_conf()->add_shape(N);
-  proto_in->mutable_dummy_conf()->add_shape(M);
-  // use dummy neuron layer
-  LayerProto* proto_neuron = proto.add_layer();
-  proto_neuron->set_name("dummy_neuron");
-  proto_neuron->set_type(kDummy);
-  proto_neuron->add_srclayers("dummy_input");
-  // use dummy as output layer
-  LayerProto* proto_out = proto.add_layer();
-  proto_out->set_name("dummy_output");
-  proto_out->set_type(kDummy);
-  proto_out->mutable_dummy_conf()->set_output(true);
-  proto_out->add_srclayers("dummy_neuron");
-  NeuralNet::Create(proto, kTrain, K);
-}
-
-TEST(NeuralNet, SliceConcate) {
-  NetProto proto;
-  // use dummy as input layer
-  LayerProto* proto_in = proto.add_layer();
-  proto_in->set_name("dummy_input");
-  proto_in->set_type(kDummy);
-  proto_in->mutable_dummy_conf()->set_input(true);
-  proto_in->mutable_dummy_conf()->add_shape(N);
-  proto_in->mutable_dummy_conf()->add_shape(M);
-  // use dummy neuron layer
-  LayerProto* proto_neuron = proto.add_layer();
-  proto_neuron->set_name("dummy_neuron");
-  proto_neuron->set_type(kDummy);
-  proto_neuron->add_srclayers("dummy_input");
-  // use dummy as output layer
-  LayerProto* proto_out = proto.add_layer();
-  proto_out->set_name("dummy_output");
-  proto_out->set_type(kDummy);
-  proto_out->set_partition_dim(1);
-  proto_out->mutable_dummy_conf()->set_output(true);
-  proto_out->add_srclayers("dummy_neuron");
-  NeuralNet::Create(proto, kTrain, K);
-}

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/dd1e4afa/src/test/test_paramslicer.cc
----------------------------------------------------------------------
diff --git a/src/test/test_paramslicer.cc b/src/test/test_paramslicer.cc
deleted file mode 100644
index bc7dedd..0000000
--- a/src/test/test_paramslicer.cc
+++ /dev/null
@@ -1,70 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-* 
-*   http://www.apache.org/licenses/LICENSE-2.0
-* 
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#include "singa/utils/param.h"
-#include "gtest/gtest.h"
-
-
-using namespace singa;
-
-const int param_size[] = {2400, 32, 25600, 32, 51200, 64, 57600, 10};
-
-/*
-class ParamSlicerTest : public ::testing::Test {
-  public:
-    ParamSlicerTest() {
-      ParamProto proto;
-      int nparams=sizeof(param_size)/sizeof(int);
-      for(int i=0;i<nparams;i++){
-        vector<int> shape{param_size[i]};
-        auto param=std::make_shared<Param>();
-        param->Setup(proto, shape);
-        param->set_id(i);
-        params.push_back(param);
-      }
-    }
-  protected:
-    vector<shared_ptr<Param>> params;
-};
-
-// all params are stored in one box, no need to split
-TEST_F(ParamSlicerTest, OneBox){
-  int nparams=sizeof(param_size)/sizeof(int);
-  ParamSlicer slicer;
-  int num=1;
-  auto slices=slicer.Slice(num, params);
-  ASSERT_EQ(slices.size(),nparams);
-  ASSERT_EQ(slicer.Get(1).size(),1);
-  ASSERT_EQ(slicer.Get(2).size(),1);
-  ASSERT_EQ(slicer.Get(nparams-1).back(), slices.size()-1);
-}
-
-// there are multiple boxes
-TEST_F(ParamSlicerTest, MultipleBox){
-  int nparams=sizeof(param_size)/sizeof(int);
-  ParamSlicer slicer;
-  int num=4;
-  auto slices=slicer.Slice(num, params);
-  ASSERT_EQ(slicer.Get(1).size(),1);
-  ASSERT_EQ(slicer.Get(3).size(),1);
-  ASSERT_EQ(slicer.Get(nparams-1).back(), slices.size()-1);
-}
-*/

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/dd1e4afa/src/test/test_record_input_layer.cc
----------------------------------------------------------------------
diff --git a/src/test/test_record_input_layer.cc b/src/test/test_record_input_layer.cc
deleted file mode 100644
index 64e1ad4..0000000
--- a/src/test/test_record_input_layer.cc
+++ /dev/null
@@ -1,122 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-#include <string>
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "singa/neuralnet/input_layer.h"
-#include "singa/proto/job.pb.h"
-#include "singa/proto/common.pb.h"
-
-class RecordInputLayerTest : public ::testing::Test {
- protected:
-  virtual void SetUp() {
-    std::string path ="src/test/test.bin";
-    auto* store = singa::io::CreateStore("kvfile");
-    store->Open(path, singa::io::kCreate);
-    {
-    singa::RecordProto image;
-    image.add_data(3.2);
-    image.add_data(1);
-    image.add_data(14.1);
-    image.set_label(12);
-    std::string val;
-    image.SerializeToString(&val);
-    store->Write("0", val);
-    }
-
-    {
-    singa::SingleLabelImageRecord image;
-    image.add_data(0.2);
-    image.add_data(0);
-    image.add_data(1.1);
-    image.set_label(2);
-    std::string val;
-    image.SerializeToString(&val);
-    store->Write("1", val);
-    }
-
-    {
-    singa::SingleLabelImageRecord image;
-    image.add_data(2.2);
-    image.add_data(1);
-    image.add_data(4.1);
-    image.set_label(1);
-    std::string val;
-    image.SerializeToString(&val);
-    store->Write("2", val);
-    }
-    store->Flush();
-    store->Close();
-
-    auto conf = image_conf.mutable_store_conf();
-    conf->set_path(path);
-    conf->add_batchsize(2);
-    conf->add_shape(3);
-    conf->set_backend("kvfile");
-  }
-  singa::LayerProto image_conf;
-};
-
-TEST_F(RecordInputLayerTest, Setup) {
-  singa::RecordInputLayer layer;
-  layer.Setup(image_conf, std::vector<singa::Layer*>{});
-  EXPECT_EQ(2, static_cast<int>(layer.aux_data().size()));
-  EXPECT_EQ(6, layer.data(nullptr).count());
-}
-
-TEST_F(RecordInputLayerTest, ComputeFeature) {
-  singa::RecordInputLayer image;
-  image.Setup(image_conf, std::vector<singa::Layer*>{});
-  image.ComputeFeature(singa::kTrain, std::vector<singa::Layer*>{});
-
-  EXPECT_EQ(12, image.aux_data()[0]);
-  EXPECT_EQ(2, image.aux_data()[1]);
-  auto data = image.data(nullptr);
-  EXPECT_EQ(3.2f, data.cpu_data()[0]);
-  EXPECT_EQ(14.1f, data.cpu_data()[2]);
-  EXPECT_EQ(0.2f, data.cpu_data()[3]);
-  EXPECT_EQ(1.1f, data.cpu_data()[5]);
-}
-TEST_F(RecordInputLayerTest, ComputeFeatureDeploy) {
-  singa::RecordInputLayer image;
-  image.Setup(image_conf, std::vector<singa::Layer*>{});
-  image.ComputeFeature(singa::kDeploy, std::vector<singa::Layer*>{});
-
-  auto data = image.data(nullptr);
-  EXPECT_EQ(3.2f, data.cpu_data()[0]);
-  EXPECT_EQ(14.1f, data.cpu_data()[2]);
-  EXPECT_EQ(0.2f, data.cpu_data()[3]);
-  EXPECT_EQ(1.1f, data.cpu_data()[5]);
-}
-
-TEST_F(RecordInputLayerTest, SeekToFirst) {
-  singa::RecordInputLayer image;
-  image.Setup(image_conf, std::vector<singa::Layer*>{});
-  image.ComputeFeature(singa::kTrain, std::vector<singa::Layer*>{});
-  image.ComputeFeature(singa::kTrain, std::vector<singa::Layer*>{});
-
-  auto data = image.data(nullptr);
-  EXPECT_EQ(2.2f, data.cpu_data()[0]);
-  EXPECT_EQ(4.1f, data.cpu_data()[2]);
-  EXPECT_EQ(3.2f, data.cpu_data()[3]);
-  EXPECT_EQ(14.1f, data.cpu_data()[5]);
-}

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/dd1e4afa/src/test/test_store.cc
----------------------------------------------------------------------
diff --git a/src/test/test_store.cc b/src/test/test_store.cc
deleted file mode 100644
index d8a8904..0000000
--- a/src/test/test_store.cc
+++ /dev/null
@@ -1,92 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-#include <string>
-#include "gtest/gtest.h"
-#include "singa/io/store.h"
-
-TEST(TextFileStore, Open) {
-  auto store = singa::io::CreateStore("textfile");
-  EXPECT_EQ(store->Open("src/test/store.txt", singa::io::kCreate), true);
-  store->Close();
-  EXPECT_EQ(store->Open("src/test/store.txt", singa::io::kRead), true);
-  store->Close();
-}
-
-TEST(TextFileStore, Write) {
-  auto store = singa::io::CreateStore("textfile");
-  store->Open("src/test/store.txt", singa::io::kCreate);
-  store->Write("001", "first tuple");
-  store->Write("002", "second tuple");
-  store->Flush();
-  store->Write("003", "third tuple");
-  store->Close();
-}
-
-TEST(TextFileStore, Read) {
-  auto store = singa::io::CreateStore("textfile");
-  EXPECT_EQ(store->Open("src/test/store.txt", singa::io::kRead), true);
-  std::string key, value;
-  EXPECT_EQ(store->Read(&key, &value), true);
-  EXPECT_EQ(key, "0");
-  EXPECT_EQ(value, "first tuple");
-
-  EXPECT_EQ(store->Read(&key, &value), true);
-  EXPECT_EQ(store->Read(&key, &value), true);
-  EXPECT_EQ(store->Read(&key, &value), false);
-  store->SeekToFirst();
-
-  EXPECT_EQ(store->Read(&key, &value), true);
-  EXPECT_EQ(key, "0");
-  EXPECT_EQ(value, "first tuple");
-}
-TEST(KVFileStore, Open) {
-  auto store = singa::io::CreateStore("kvfile");
-  EXPECT_EQ(store->Open("src/test/store.bin", singa::io::kCreate), true);
-  store->Close();
-  EXPECT_EQ(store->Open("src/test/store.bin", singa::io::kRead), true);
-  store->Close();
-}
-TEST(KVFileStore, Write) {
-  auto store = singa::io::CreateStore("kvfile");
-  store->Open("src/test/store.bin", singa::io::kCreate);
-  store->Write("001", "first tuple");
-  store->Write("002", "second tuple");
-  store->Flush();
-  store->Write("003", "third tuple");
-  store->Close();
-}
-TEST(KVFileStore, Read) {
-  auto store = singa::io::CreateStore("kvfile");
-  store->Open("src/test/store.bin", singa::io::kRead);
-  std::string key, value;
-  EXPECT_EQ(store->Read(&key, &value), true);
-  EXPECT_EQ(key, "001");
-  EXPECT_EQ(value, "first tuple");
-
-  EXPECT_EQ(store->Read(&key, &value), true);
-  EXPECT_EQ(store->Read(&key, &value), true);
-  EXPECT_EQ(store->Read(&key, &value), false);
-  store->SeekToFirst();
-
-  EXPECT_EQ(store->Read(&key, &value), true);
-  EXPECT_EQ(key, "001");
-  EXPECT_EQ(value, "first tuple");
-}

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/dd1e4afa/src/test/test_unrolling.cc
----------------------------------------------------------------------
diff --git a/src/test/test_unrolling.cc b/src/test/test_unrolling.cc
deleted file mode 100644
index 7965882..0000000
--- a/src/test/test_unrolling.cc
+++ /dev/null
@@ -1,373 +0,0 @@
-/************************************************************
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- *************************************************************/
-#include <string>
-#include <vector>
-#include <fstream>
-#include <iostream>
-using namespace std;
-
-#include "gtest/gtest.h"
-#include "singa/neuralnet/input_layer.h"
-#include "singa/neuralnet/neuron_layer.h"
-#include "singa/neuralnet/neuralnet.h"
-#include "singa/neuralnet/connection_layer.h"
-#include "singa/driver.h"
-#include "singa/proto/job.pb.h"
-#include "singa/utils/common.h"
-
-using namespace singa;
-
-class UnrollingTest: public ::testing::Test {
-protected:
-	virtual void SetUp() {
-		NetProto* net_conf1 = job_conf1.mutable_neuralnet();
-
-		LayerProto* data_layer1 = net_conf1->add_layer();
-		data_layer1->set_name("data");
-		data_layer1->set_type(kRecordInput);
-
-		LayerProto* embedding_layer1 = net_conf1->add_layer();
-		embedding_layer1->set_name("embedding");
-		embedding_layer1->set_type(kDummy);
-		embedding_layer1->add_srclayers("data");
-		embedding_layer1->set_unroll_len(3);
-		embedding_layer1->add_unroll_conn_type(kUnrollOneToAll);
-
-		LayerProto* gru_layer1 = net_conf1->add_layer();
-		gru_layer1->set_name("gru");
-		gru_layer1->set_type(kGRU);
-		gru_layer1->add_srclayers("embedding");
-		gru_layer1->mutable_gru_conf()->set_dim_hidden(20);
-		gru_layer1->add_param()->set_name("w_z_hx");
-		gru_layer1->add_param()->set_name("w_r_hx");
-		gru_layer1->add_param()->set_name("w_c_hx");
-		gru_layer1->add_param()->set_name("w_z_hh");
-		gru_layer1->add_param()->set_name("w_r_hh");
-		gru_layer1->add_param()->set_name("w_c_hh");
-		gru_layer1->set_unroll_len(3);
-		gru_layer1->add_unroll_conn_type(kUnrollOneToOne);
-
-		LayerProto* out_layer1 = net_conf1->add_layer();
-		out_layer1->set_name("out");
-		out_layer1->set_type(kInnerProduct);
-		out_layer1->add_srclayers("gru");
-		out_layer1->mutable_innerproduct_conf()->set_num_output(100);
-		out_layer1->add_param()->set_name("w");
-		out_layer1->add_param()->set_name("b");
-		out_layer1->set_unroll_len(3);
-		out_layer1->add_unroll_conn_type(kUnrollOneToOne);
-
-		LayerProto* loss_layer1 = net_conf1->add_layer();
-		loss_layer1->set_name("loss");
-		loss_layer1->set_type(kSoftmaxLoss);
-		loss_layer1->add_srclayers("out");
-		loss_layer1->add_srclayers("data");
-		loss_layer1->set_unroll_len(3);
-		loss_layer1->add_unroll_conn_type(kUnrollOneToOne);
-		loss_layer1->add_unroll_conn_type(kUnrollOneToAll);
-
-		/*
-		 * Initialize job conf 2
-		NetProto* net_conf2 = job_conf2.mutable_neuralnet();
-
-		LayerProto* data_layer2 = net_conf2->add_layer();
-		data_layer2->set_name("data");
-		data_layer2->set_type(kRecordInput);
-
-		LayerProto* embedding_layer2 = net_conf2->add_layer();
-		embedding_layer2->set_name("embedding");
-		embedding_layer2->set_type(kDummy);
-		embedding_layer2->add_srclayers("data");
-		embedding_layer2->add_srclayers("softmax");
-		embedding_layer2->set_unroll_len(3);
-		embedding_layer2->add_unroll_conn_type(kUnrollOneToAll);
-		embedding_layer2->add_shift(0);
-		embedding_layer2->add_unroll_conn_type(kUnrollOneToOne);
-		embedding_layer2->add_shift(1);
-
-		LayerProto* gru_layer2 = net_conf2->add_layer();
-		gru_layer2->set_name("gru");
-		gru_layer2->set_type(kGRU);
-		gru_layer2->add_srclayers("embedding");
-		gru_layer2->mutable_gru_conf()->set_dim_hidden(20);
-		gru_layer2->mutable_gru_conf()->set_bias_term(false);
-		gru_layer2->add_param()->set_name("w_z_hx");
-		gru_layer2->add_param()->set_name("w_r_hx");
-		gru_layer2->add_param()->set_name("w_c_hx");
-		gru_layer2->add_param()->set_name("w_z_hh");
-		gru_layer2->add_param()->set_name("w_r_hh");
-		gru_layer2->add_param()->set_name("w_c_hh");
-		gru_layer2->set_unroll_len(3);
-		gru_layer2->add_unroll_conn_type(kUnrollOneToOne);
-		gru_layer2->add_shift(0);
-
-		LayerProto* out_layer2 = net_conf2->add_layer();
-		out_layer2->set_name("out");
-		out_layer2->set_type(kInnerProduct);
-		out_layer2->add_srclayers("gru");
-		out_layer2->mutable_innerproduct_conf()->set_num_output(100);
-		out_layer2->add_param()->set_name("w");
-		out_layer2->add_param()->set_name("b");
-		out_layer2->set_unroll_len(3);
-		out_layer2->add_unroll_conn_type(kUnrollOneToOne);
-		out_layer2->add_shift(0);
-
-		LayerProto* softmax_layer2 = net_conf2->add_layer();
-		softmax_layer2->set_name("softmax");
-		softmax_layer2->set_type(kSoftmax);
-		softmax_layer2->add_srclayers("out");
-		softmax_layer2->set_unroll_len(3);
-		softmax_layer2->add_unroll_conn_type(kUnrollOneToOne);
-		softmax_layer2->add_shift(0);
-
-		LayerProto* loss_layer2 = net_conf2->add_layer();
-		loss_layer2->set_name("loss");
-		loss_layer2->set_type(kSoftmaxLoss);
-		loss_layer2->add_srclayers("softmax");
-		loss_layer2->add_srclayers("data");
-		loss_layer2->set_unroll_len(3);
-		loss_layer2->add_unroll_conn_type(kUnrollOneToOne);
-		loss_layer2->add_shift(0);
-		loss_layer2->add_unroll_conn_type(kUnrollOneToAll);
-		loss_layer2->add_shift(0);
-		 */
-	}
-
-	singa::JobProto job_conf1;
-	singa::JobProto job_conf2;
-};
-
-TEST_F(UnrollingTest, GRULanguageModelTrain) {
-	NetProto net;
-	net.CopyFrom(job_conf1.neuralnet());
-	NetProto unrolled_net = NeuralNet::Unrolling(net);
-	EXPECT_EQ("0#data", unrolled_net.layer(0).name());
-
-	EXPECT_EQ("0#embedding", unrolled_net.layer(1).name());
-	EXPECT_EQ(1, unrolled_net.layer(1).srclayers_size());
-	EXPECT_EQ("0#data", unrolled_net.layer(1).srclayers(0));
-
-	EXPECT_EQ("1#embedding", unrolled_net.layer(2).name());
-	EXPECT_EQ(1, unrolled_net.layer(2).srclayers_size());
-	EXPECT_EQ("0#data", unrolled_net.layer(2).srclayers(0));
-
-	EXPECT_EQ("2#embedding", unrolled_net.layer(3).name());
-	EXPECT_EQ(1, unrolled_net.layer(3).srclayers_size());
-	EXPECT_EQ("0#data", unrolled_net.layer(3).srclayers(0));
-
-	EXPECT_EQ("0#gru", unrolled_net.layer(4).name());
-	EXPECT_EQ(1, unrolled_net.layer(4).srclayers_size());
-	EXPECT_EQ("0#embedding", unrolled_net.layer(4).srclayers(0));
-	EXPECT_EQ("0#w_z_hx", unrolled_net.layer(4).param(0).name());
-	EXPECT_EQ("0#w_r_hx", unrolled_net.layer(4).param(1).name());
-	EXPECT_EQ("0#w_c_hx", unrolled_net.layer(4).param(2).name());
-	EXPECT_EQ("0#w_z_hh", unrolled_net.layer(4).param(3).name());
-	EXPECT_EQ("0#w_r_hh", unrolled_net.layer(4).param(4).name());
-	EXPECT_EQ("0#w_c_hh", unrolled_net.layer(4).param(5).name());
-
-	EXPECT_EQ("1#gru", unrolled_net.layer(5).name());
-	EXPECT_EQ(2, unrolled_net.layer(5).srclayers_size());
-	EXPECT_EQ("1#embedding", unrolled_net.layer(5).srclayers(0));
-	EXPECT_EQ("0#gru", unrolled_net.layer(5).srclayers(1));
-	EXPECT_EQ("1#w_z_hx", unrolled_net.layer(5).param(0).name());
-	EXPECT_EQ("0#w_z_hx", unrolled_net.layer(5).param(0).share_from());
-	EXPECT_EQ("1#w_r_hx", unrolled_net.layer(5).param(1).name());
-	EXPECT_EQ("0#w_r_hx", unrolled_net.layer(5).param(1).share_from());
-	EXPECT_EQ("1#w_c_hx", unrolled_net.layer(5).param(2).name());
-	EXPECT_EQ("0#w_c_hx", unrolled_net.layer(5).param(2).share_from());
-	EXPECT_EQ("1#w_z_hh", unrolled_net.layer(5).param(3).name());
-	EXPECT_EQ("0#w_z_hh", unrolled_net.layer(5).param(3).share_from());
-	EXPECT_EQ("1#w_r_hh", unrolled_net.layer(5).param(4).name());
-	EXPECT_EQ("0#w_r_hh", unrolled_net.layer(5).param(4).share_from());
-	EXPECT_EQ("1#w_c_hh", unrolled_net.layer(5).param(5).name());
-	EXPECT_EQ("0#w_c_hh", unrolled_net.layer(5).param(5).share_from());
-
-	EXPECT_EQ("2#gru", unrolled_net.layer(6).name());
-	EXPECT_EQ(2, unrolled_net.layer(6).srclayers_size());
-	EXPECT_EQ("2#embedding", unrolled_net.layer(6).srclayers(0));
-	EXPECT_EQ("1#gru", unrolled_net.layer(6).srclayers(1));
-	EXPECT_EQ("2#w_z_hx", unrolled_net.layer(6).param(0).name());
-	EXPECT_EQ("0#w_z_hx", unrolled_net.layer(6).param(0).share_from());
-	EXPECT_EQ("2#w_r_hx", unrolled_net.layer(6).param(1).name());
-	EXPECT_EQ("0#w_r_hx", unrolled_net.layer(6).param(1).share_from());
-	EXPECT_EQ("2#w_c_hx", unrolled_net.layer(6).param(2).name());
-	EXPECT_EQ("0#w_c_hx", unrolled_net.layer(6).param(2).share_from());
-	EXPECT_EQ("2#w_z_hh", unrolled_net.layer(6).param(3).name());
-	EXPECT_EQ("0#w_z_hh", unrolled_net.layer(6).param(3).share_from());
-	EXPECT_EQ("2#w_r_hh", unrolled_net.layer(6).param(4).name());
-	EXPECT_EQ("0#w_r_hh", unrolled_net.layer(6).param(4).share_from());
-	EXPECT_EQ("2#w_c_hh", unrolled_net.layer(6).param(5).name());
-	EXPECT_EQ("0#w_c_hh", unrolled_net.layer(6).param(5).share_from());
-
-	EXPECT_EQ("0#out", unrolled_net.layer(7).name());
-	EXPECT_EQ(1, unrolled_net.layer(7).srclayers_size());
-	EXPECT_EQ("0#gru", unrolled_net.layer(7).srclayers(0));
-	EXPECT_EQ("0#w", unrolled_net.layer(7).param(0).name());
-	EXPECT_EQ("0#b", unrolled_net.layer(7).param(1).name());
-
-	EXPECT_EQ("1#out", unrolled_net.layer(8).name());
-	EXPECT_EQ(1, unrolled_net.layer(8).srclayers_size());
-	EXPECT_EQ("1#gru", unrolled_net.layer(8).srclayers(0));
-	EXPECT_EQ("1#w", unrolled_net.layer(8).param(0).name());
-	EXPECT_EQ("0#w", unrolled_net.layer(8).param(0).share_from());
-	EXPECT_EQ("1#b", unrolled_net.layer(8).param(1).name());
-	EXPECT_EQ("0#b", unrolled_net.layer(8).param(1).share_from());
-
-	EXPECT_EQ("2#out", unrolled_net.layer(9).name());
-	EXPECT_EQ(1, unrolled_net.layer(9).srclayers_size());
-	EXPECT_EQ("2#gru", unrolled_net.layer(9).srclayers(0));
-	EXPECT_EQ("2#w", unrolled_net.layer(9).param(0).name());
-	EXPECT_EQ("0#w", unrolled_net.layer(9).param(0).share_from());
-	EXPECT_EQ("2#b", unrolled_net.layer(9).param(1).name());
-	EXPECT_EQ("0#b", unrolled_net.layer(9).param(1).share_from());
-
-	EXPECT_EQ("0#loss", unrolled_net.layer(10).name());
-	EXPECT_EQ(2, unrolled_net.layer(10).srclayers_size());
-	EXPECT_EQ("0#out", unrolled_net.layer(10).srclayers(0));
-	EXPECT_EQ("0#data", unrolled_net.layer(10).srclayers(1));
-
-	EXPECT_EQ("1#loss", unrolled_net.layer(11).name());
-	EXPECT_EQ(2, unrolled_net.layer(11).srclayers_size());
-	EXPECT_EQ("1#out", unrolled_net.layer(11).srclayers(0));
-	EXPECT_EQ("0#data", unrolled_net.layer(11).srclayers(1));
-
-	EXPECT_EQ("2#loss", unrolled_net.layer(12).name());
-	EXPECT_EQ(2, unrolled_net.layer(12).srclayers_size());
-	EXPECT_EQ("2#out", unrolled_net.layer(12).srclayers(0));
-	EXPECT_EQ("0#data", unrolled_net.layer(12).srclayers(1));
-}
-
-/*
-TEST_F(UnrollingTest, GRULanguageModelTest) {
-	NetProto net;
-	net.CopyFrom(job_conf2.neuralnet());
-	NetProto unrolled_net = NeuralNet::Unrolling(net);
-
-	EXPECT_EQ("data", unrolled_net.layer(0).name());
-
-	EXPECT_EQ("0#embedding", unrolled_net.layer(1).name());
-	EXPECT_EQ(1, unrolled_net.layer(1).srclayers_size());
-	EXPECT_EQ("data", unrolled_net.layer(1).srclayers(0));
-
-	EXPECT_EQ("1#embedding", unrolled_net.layer(2).name());
-	EXPECT_EQ(2, unrolled_net.layer(2).srclayers_size());
-	EXPECT_EQ("data", unrolled_net.layer(2).srclayers(0));
-	EXPECT_EQ("0#softmax", unrolled_net.layer(2).srclayers(1));
-
-	EXPECT_EQ("2#embedding", unrolled_net.layer(3).name());
-	EXPECT_EQ(2, unrolled_net.layer(3).srclayers_size());
-	EXPECT_EQ("data", unrolled_net.layer(3).srclayers(0));
-	EXPECT_EQ("1#softmax", unrolled_net.layer(3).srclayers(1));
-
-	EXPECT_EQ("0#gru", unrolled_net.layer(4).name());
-	EXPECT_EQ(1, unrolled_net.layer(4).srclayers_size());
-	EXPECT_EQ("0#embedding", unrolled_net.layer(4).srclayers(0));
-	EXPECT_EQ("w_z_hx", unrolled_net.layer(4).param(0).name());
-	EXPECT_EQ("w_r_hx", unrolled_net.layer(4).param(1).name());
-	EXPECT_EQ("w_c_hx", unrolled_net.layer(4).param(2).name());
-	EXPECT_EQ("w_z_hh", unrolled_net.layer(4).param(3).name());
-	EXPECT_EQ("w_r_hh", unrolled_net.layer(4).param(4).name());
-	EXPECT_EQ("w_c_hh", unrolled_net.layer(4).param(5).name());
-
-	EXPECT_EQ("1#gru", unrolled_net.layer(5).name());
-	EXPECT_EQ(2, unrolled_net.layer(5).srclayers_size());
-	EXPECT_EQ("0#gru", unrolled_net.layer(5).srclayers(0));
-	EXPECT_EQ("1#embedding", unrolled_net.layer(5).srclayers(1));
-	EXPECT_EQ("1#w_z_hx", unrolled_net.layer(5).param(0).name());
-	EXPECT_EQ("w_z_hx", unrolled_net.layer(5).param(0).share_from());
-	EXPECT_EQ("1#w_r_hx", unrolled_net.layer(5).param(1).name());
-	EXPECT_EQ("w_r_hx", unrolled_net.layer(5).param(1).share_from());
-	EXPECT_EQ("1#w_c_hx", unrolled_net.layer(5).param(2).name());
-	EXPECT_EQ("w_c_hx", unrolled_net.layer(5).param(2).share_from());
-	EXPECT_EQ("1#w_z_hh", unrolled_net.layer(5).param(3).name());
-	EXPECT_EQ("w_z_hh", unrolled_net.layer(5).param(3).share_from());
-	EXPECT_EQ("1#w_r_hh", unrolled_net.layer(5).param(4).name());
-	EXPECT_EQ("w_r_hh", unrolled_net.layer(5).param(4).share_from());
-	EXPECT_EQ("1#w_c_hh", unrolled_net.layer(5).param(5).name());
-	EXPECT_EQ("w_c_hh", unrolled_net.layer(5).param(5).share_from());
-
-	EXPECT_EQ("2#gru_2", unrolled_net.layer(6).name());
-	EXPECT_EQ(2, unrolled_net.layer(6).srclayers_size());
-	EXPECT_EQ("1#gru", unrolled_net.layer(6).srclayers(0));
-	EXPECT_EQ("2#embedding", unrolled_net.layer(6).srclayers(1));
-	EXPECT_EQ("2#w_z_hx", unrolled_net.layer(6).param(0).name());
-	EXPECT_EQ("w_z_hx", unrolled_net.layer(6).param(0).share_from());
-	EXPECT_EQ("2#w_r_hx", unrolled_net.layer(6).param(1).name());
-	EXPECT_EQ("w_r_hx", unrolled_net.layer(6).param(1).share_from());
-	EXPECT_EQ("2#w_c_hx", unrolled_net.layer(6).param(2).name());
-	EXPECT_EQ("w_c_hx", unrolled_net.layer(6).param(2).share_from());
-	EXPECT_EQ("2#w_z_hh", unrolled_net.layer(6).param(3).name());
-	EXPECT_EQ("w_z_hh", unrolled_net.layer(6).param(3).share_from());
-	EXPECT_EQ("2#w_r_hh", unrolled_net.layer(6).param(4).name());
-	EXPECT_EQ("w_r_hh", unrolled_net.layer(6).param(4).share_from());
-	EXPECT_EQ("2#w_c_hh", unrolled_net.layer(6).param(5).name());
-	EXPECT_EQ("w_c_hh", unrolled_net.layer(6).param(5).share_from());
-
-	EXPECT_EQ("out_0", unrolled_net.layer(7).name());
-	EXPECT_EQ(1, unrolled_net.layer(7).srclayers_size());
-	EXPECT_EQ("gru_0", unrolled_net.layer(7).srclayers(0));
-	EXPECT_EQ("w", unrolled_net.layer(7).param(0).name());
-	EXPECT_EQ("b", unrolled_net.layer(7).param(1).name());
-
-	EXPECT_EQ("out_1", unrolled_net.layer(8).name());
-	EXPECT_EQ(1, unrolled_net.layer(8).srclayers_size());
-	EXPECT_EQ("gru_1", unrolled_net.layer(8).srclayers(0));
-	EXPECT_EQ("w_1", unrolled_net.layer(8).param(0).name());
-	EXPECT_EQ("w", unrolled_net.layer(8).param(0).share_from());
-	EXPECT_EQ("b_1", unrolled_net.layer(8).param(1).name());
-	EXPECT_EQ("b", unrolled_net.layer(8).param(1).share_from());
-
-	EXPECT_EQ("out_2", unrolled_net.layer(9).name());
-	EXPECT_EQ(1, unrolled_net.layer(9).srclayers_size());
-	EXPECT_EQ("gru_2", unrolled_net.layer(9).srclayers(0));
-	EXPECT_EQ("w_2", unrolled_net.layer(9).param(0).name());
-	EXPECT_EQ("w", unrolled_net.layer(9).param(0).share_from());
-	EXPECT_EQ("b_2", unrolled_net.layer(9).param(1).name());
-	EXPECT_EQ("b", unrolled_net.layer(9).param(1).share_from());
-
-	EXPECT_EQ("softmax_0", unrolled_net.layer(10).name());
-	EXPECT_EQ(1, unrolled_net.layer(10).srclayers_size());
-	EXPECT_EQ("out_0", unrolled_net.layer(10).srclayers(0));
-
-	EXPECT_EQ("softmax_1", unrolled_net.layer(11).name());
-	EXPECT_EQ(1, unrolled_net.layer(11).srclayers_size());
-	EXPECT_EQ("out_1", unrolled_net.layer(11).srclayers(0));
-
-	EXPECT_EQ("softmax_2", unrolled_net.layer(12).name());
-	EXPECT_EQ(1, unrolled_net.layer(12).srclayers_size());
-	EXPECT_EQ("out_2", unrolled_net.layer(12).srclayers(0));
-
-	EXPECT_EQ("loss_0", unrolled_net.layer(13).name());
-	EXPECT_EQ(2, unrolled_net.layer(13).srclayers_size());
-	EXPECT_EQ("softmax_0", unrolled_net.layer(13).srclayers(0));
-	EXPECT_EQ("data", unrolled_net.layer(13).srclayers(1));
-
-	EXPECT_EQ("loss_1", unrolled_net.layer(14).name());
-	EXPECT_EQ(2, unrolled_net.layer(14).srclayers_size());
-	EXPECT_EQ("softmax_1", unrolled_net.layer(14).srclayers(0));
-	EXPECT_EQ("data", unrolled_net.layer(14).srclayers(1));
-
-	EXPECT_EQ("loss_2", unrolled_net.layer(15).name());
-	EXPECT_EQ(2, unrolled_net.layer(15).srclayers_size());
-	EXPECT_EQ("softmax_2", unrolled_net.layer(15).srclayers(0));
-	EXPECT_EQ("data", unrolled_net.layer(15).srclayers(1));
-}
-  */

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/dd1e4afa/src/utils/blob.cc
----------------------------------------------------------------------
diff --git a/src/utils/blob.cc b/src/utils/blob.cc
deleted file mode 100644
index bfc36e6..0000000
--- a/src/utils/blob.cc
+++ /dev/null
@@ -1,259 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-/**
- * The code is adapted from Caffe under BSD 2 Clause license.
- *
- * COPYRIGHT
- * All contributions by the University of California:
- * Copyright (c) 2014, The Regents of the University of California (Regents)
- * All rights reserved.
- * All other contributions:
- * Copyright (c) 2014, the respective contributors
- * All rights reserved.
- */
-#include "singa/utils/blob.h"
-
-#include <cblas.h>
-#include <math.h>
-#include <utility>
-
-#define NOT_IMPLEMENTED LOG(FATAL) << "Not implemented function"
-#define NO_GPU LOG(FATAL) << "CPU-only Mode: cannot make GPU call."
-// Instantiate a class with float and double specifications.
-#define INSTANTIATE_CLASS(classname) \
-  template class classname<float>; \
-  template class classname<double>
-// Disable the copy and assignment operator for a class.
-#define DISABLE_COPY_AND_ASSIGN(classname) \
-private:\
-  classname(const classname&);\
-  classname& operator=(const classname&)
-
-#ifndef CPU_ONLY
-#include "singa/utils/cuda_utils.h"
-#endif  // CPU_ONLY
-
-namespace singa {
-
-SyncedMemory::~SyncedMemory() {
-  if (cpu_ptr_ && own_cpu_data_) {
-    FreeHost(cpu_ptr_);
-  }
-#ifndef CPU_ONLY
-  if (gpu_ptr_) {
-    CUDA_CHECK(cudaFree(gpu_ptr_));
-  }
-#endif  // CPU_ONLY
-}
-
-const void* SyncedMemory::cpu_data() {
-  to_cpu();
-  return cpu_ptr_;
-}
-
-const void* SyncedMemory::gpu_data() {
-#ifndef CPU_ONLY
-  to_gpu();
-  return gpu_ptr_;
-#else
-  NO_GPU;
-#endif
-  return nullptr;
-}
-
-void* SyncedMemory::mutable_cpu_data() {
-  to_cpu();
-  head_ = HEAD_AT_CPU;
-  return cpu_ptr_;
-}
-
-void* SyncedMemory::mutable_gpu_data() {
-#ifndef CPU_ONLY
-  to_gpu();
-  head_ = HEAD_AT_GPU;
-  return gpu_ptr_;
-#else
-  NO_GPU;
-#endif
-  return nullptr;
-}
-
-void SyncedMemory::set_cpu_data(void* data) {
-  CHECK(data);
-  if (own_cpu_data_) {
-    FreeHost(cpu_ptr_);
-  }
-  cpu_ptr_ = data;
-  head_ = HEAD_AT_CPU;
-  own_cpu_data_ = false;
-}
-
-void SyncedMemory::to_cpu() {
-  switch (head_) {
-  case UNINITIALIZED:
-    MallocHost(&cpu_ptr_, size_);
-    memset(cpu_ptr_, 0, size_);
-    head_ = HEAD_AT_CPU;
-    own_cpu_data_ = true;
-    break;
-  case HEAD_AT_GPU:
-#ifndef CPU_ONLY
-    if (cpu_ptr_ == NULL) {
-      MallocHost(&cpu_ptr_, size_);
-      own_cpu_data_ = true;
-    }
-    CUDA_CHECK(cudaMemcpy(cpu_ptr_, gpu_ptr_, size_, cudaMemcpyDefault));
-    head_ = SYNCED;
-#else
-    NO_GPU;
-#endif
-    break;
-  case HEAD_AT_CPU:
-  case SYNCED:
-    break;
-  }
-}
-
-void SyncedMemory::to_gpu() {
-#ifndef CPU_ONLY
-  switch (head_) {
-  case UNINITIALIZED:
-    CUDA_CHECK(cudaMalloc(&gpu_ptr_, size_));
-    CUDA_CHECK(cudaMemset(gpu_ptr_, 0, size_));
-    head_ = HEAD_AT_GPU;
-    break;
-  case HEAD_AT_CPU:
-    if (gpu_ptr_ == NULL) {
-      CUDA_CHECK(cudaMalloc(&gpu_ptr_, size_));
-    }
-    CUDA_CHECK(cudaMemcpy(gpu_ptr_, cpu_ptr_, size_, cudaMemcpyDefault));
-    head_ = SYNCED;
-    break;
-  case HEAD_AT_GPU:
-  case SYNCED:
-    break;
-  }
-#else
-  NO_GPU;
-#endif
-}
-
-template <typename Dtype>
-void Blob<Dtype>::Reshape(const std::vector<int>& shape) {
-  shape_ = shape;
-  count_ = shape.size() ? 1 : 0;
-  for (size_t i = 0; i < shape.size(); ++i) {
-    CHECK(shape[i]);
-    count_ *= shape[i];
-  }
-  if (count_ > capacity_) {
-    capacity_ = count_;
-    data_.reset(new SyncedMemory(capacity_ * sizeof(Dtype)));
-  }
-}
-
-template <typename Dtype>
-void Blob<Dtype>::ReshapeLike(const Blob<Dtype>& other) {
-  Reshape(other.shape());
-}
-
-template <typename Dtype>
-void Blob<Dtype>::CopyFrom(const Blob& source) {
-    CopyFrom(source, false);
-}
-
-template <typename Dtype>
-void Blob<Dtype>::CopyFrom(const Blob& source, bool shape_check) {
-  LOG(WARNING) << "Better use Copy(const Blob&, Blob*)";
-  CHECK_EQ(source.count(), count()) << " cp between blobs of diff size";
-
-  if (shape_check &&
-      !std::equal(shape_.begin(), shape_.end(), source.shape_.begin())) {
-      LOG(FATAL) << "Trying to copy blobs of different sizes.";
-  }
-#ifndef CPU_ONLY
-  CUDA_CHECK(cudaMemcpy(static_cast<Dtype*>(data_->mutable_gpu_data()),
-             source.gpu_data(), sizeof(Dtype) * count_, cudaMemcpyDefault));
-#endif
-  memcpy(static_cast<Dtype*>(data_->mutable_cpu_data()), source.cpu_data(),
-         sizeof(Dtype)*count_);
-}
-
-template <typename Dtype>
-void Blob<Dtype>::FromProto(const singa::BlobProto& proto) {
-  std::vector<int> shape;
-  for (int s : proto.shape()) {
-    shape.push_back(s);
-  }
-  int count = count_;
-  Reshape(shape);
-  if (count != count_)
-    LOG(WARNING) << "Blob is reshaped to diff size " << count << ":" << count_;
-  // copy data
-  Dtype* data_vec = mutable_cpu_data();
-  for (int i = 0; i < count_; ++i) {
-    data_vec[i] = proto.data(i);
-  }
-}
-
-template <typename Dtype>
-void Blob<Dtype>::ToProto(singa::BlobProto* proto) const {
-  for (int s : shape_) {
-    proto->add_shape(s);
-  }
-  proto->clear_data();
-  const Dtype* data_vec = cpu_data();
-  for (int i = 0; i < count_; ++i) {
-    proto->add_data(data_vec[i]);
-  }
-}
-
-template <typename Dtype>
-void Blob<Dtype>::SetValue(Dtype v) {
-  Dtype* ptr = mutable_cpu_data();
-  for (int i =0; i < count(); i++)
-    ptr[i] = v;
-}
-template <typename Dtype>
-void Blob<Dtype>::ShareData(Blob* other, bool cpu_only) {
-  CHECK_EQ(count_, other->count());
-  if (cpu_only)
-    data_->set_cpu_data(other->mutable_cpu_data());
-  else
-    data_ = other->data_;
-}
-
-/*
-template <typename Dtype>
-void Blob<Dtype>::Swap(Blob& other) {
-  CHECK_EQ(other.count(), count());
-  CHECK(std::equal(shape_.begin(), shape_.end(), other.shape_.begin()));
-  std::swap(data_, other.data_);
-  std::swap(capacity_, other.capacity_);
-}
-*/
-
-INSTANTIATE_CLASS(Blob);
-template class Blob<int>;
-template class Blob<unsigned int>;
-
-}  // namespace singa



Mime
View raw message