singa-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From wang...@apache.org
Subject incubator-singa git commit: SINGA-267 Add spatial mode in batch normalization layer
Date Sun, 23 Oct 2016 14:15:42 GMT
Repository: incubator-singa
Updated Branches:
  refs/heads/master 61faa840e -> fac3af949


SINGA-267 Add spatial mode in batch normalization layer

Revise batchnorm test cases for spatial mode.


Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/fac3af94
Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/fac3af94
Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/fac3af94

Branch: refs/heads/master
Commit: fac3af94990e4c9977f82a11ae85f89de9dbb463
Parents: 61faa84
Author: WANG Ji <ijingobravo@gmail.com>
Authored: Sun Oct 23 20:54:38 2016 +0800
Committer: Wang Ji <ijingobravo@gmail.com>
Committed: Sun Oct 23 21:07:01 2016 +0800

----------------------------------------------------------------------
 src/model/layer/batchnorm.cc |  2 +-
 test/singa/test_batchnorm.cc | 44 ++++++++++++++++++---------------------
 2 files changed, 21 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/fac3af94/src/model/layer/batchnorm.cc
----------------------------------------------------------------------
diff --git a/src/model/layer/batchnorm.cc b/src/model/layer/batchnorm.cc
index afe9a36..e07dfd9 100644
--- a/src/model/layer/batchnorm.cc
+++ b/src/model/layer/batchnorm.cc
@@ -117,7 +117,7 @@ const Tensor BatchNorm::Forward(int flag, const Tensor& input) {
       bnBias_.Reshape(Shape{channels_, 1});
 
       std::vector<Tensor> mean_stack, var_stack, scale_stack, bias_stack;
-      for (int i = 0; i < height_ * width_; ++i) {
+      for (unsigned i = 0; i < height_ * width_; ++i) {
         mean_stack.push_back(runningMean_);
         var_stack.push_back(runningVariance_);
         scale_stack.push_back(bnScale_);

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/fac3af94/test/singa/test_batchnorm.cc
----------------------------------------------------------------------
diff --git a/test/singa/test_batchnorm.cc b/test/singa/test_batchnorm.cc
index c8efbf9..fadba42 100644
--- a/test/singa/test_batchnorm.cc
+++ b/test/singa/test_batchnorm.cc
@@ -43,19 +43,19 @@ TEST(BatchNorm, Setup) {
 TEST(BatchNorm, Forward) {
   BatchNorm batchnorm;
   const float x[] = {1, 2, 3, 4};
-  Tensor in(Shape{2, 1, 2, 1});
-  in.CopyDataFromHostPtr(x, 2 * 1 * 2 * 1);
+  Tensor in(Shape{2, 2});
+  in.CopyDataFromHostPtr(x, 2 * 2);
   const float alpha_[] = {1, 1};
-  Tensor alpha(Shape{1, 2});
-  alpha.CopyDataFromHostPtr(alpha_, 1 * 2);
+  Tensor alpha(Shape{2});
+  alpha.CopyDataFromHostPtr(alpha_, 2);
 
   const float beta_[] = {2, 2};
-  Tensor beta(Shape{1, 2});
-  beta.CopyDataFromHostPtr(beta_, 1 * 2);
+  Tensor beta(Shape{2});
+  beta.CopyDataFromHostPtr(beta_, 2);
   singa::LayerConf conf;
   singa::BatchNormConf *batchnorm_conf = conf.mutable_batchnorm_conf();
   batchnorm_conf->set_factor(1);
-  batchnorm.Setup(Shape{1, 2, 1}, conf);
+  batchnorm.Setup(Shape{2}, conf);
   batchnorm.set_bnScale(alpha);
   batchnorm.set_bnBias(beta);
   batchnorm.set_runningMean(beta);
@@ -63,11 +63,9 @@ TEST(BatchNorm, Forward) {
   Tensor out = batchnorm.Forward(kTrain, in);
   const float *outptr = out.data<float>();
   const auto &shape = out.shape();
-  EXPECT_EQ(4u, shape.size());
+  EXPECT_EQ(2u, shape.size());
   EXPECT_EQ(2u, shape[0]);
-  EXPECT_EQ(1u, shape[1]);
-  EXPECT_EQ(2u, shape[2]);
-  EXPECT_EQ(1u, shape[3]);
+  EXPECT_EQ(2u, shape[1]);
   EXPECT_NEAR(1.0f, outptr[0], 1e-4f);
   EXPECT_NEAR(1.0f, outptr[1], 1e-4f);
   EXPECT_NEAR(3.0f, outptr[2], 1e-4f);
@@ -77,22 +75,22 @@ TEST(BatchNorm, Forward) {
 TEST(BatchNorm, Backward) {
   BatchNorm batchnorm;
   const float x[] = {1, 2, 3, 4};
-  Tensor in(Shape{2, 1, 2, 1});
-  in.CopyDataFromHostPtr(x, 2 * 1 * 2 * 1);
+  Tensor in(Shape{2, 2});
+  in.CopyDataFromHostPtr(x, 2 * 2);
   const float dy[] = {4, 3, 2, 1};
-  Tensor dy_in(Shape{2, 1, 2, 1});
-  dy_in.CopyDataFromHostPtr(dy, 2 * 1 * 2 * 1);
+  Tensor dy_in(Shape{2, 2});
+  dy_in.CopyDataFromHostPtr(dy, 2 * 2);
   const float alpha_[] = {1, 1};
-  Tensor alpha(Shape{1, 2});
-  alpha.CopyDataFromHostPtr(alpha_, 1 * 2);
+  Tensor alpha(Shape{2});
+  alpha.CopyDataFromHostPtr(alpha_, 2);
 
   const float beta_[] = {0, 0};
-  Tensor beta(Shape{1, 2});
-  beta.CopyDataFromHostPtr(beta_, 1 * 2);
+  Tensor beta(Shape{2});
+  beta.CopyDataFromHostPtr(beta_, 2);
   singa::LayerConf conf;
   singa::BatchNormConf *batchnorm_conf = conf.mutable_batchnorm_conf();
   batchnorm_conf->set_factor(1);
-  batchnorm.Setup(Shape{1, 2, 1}, conf);
+  batchnorm.Setup(Shape{2}, conf);
   batchnorm.set_bnScale(alpha);
   batchnorm.set_bnBias(beta);
   batchnorm.set_runningMean(beta);
@@ -101,11 +99,9 @@ TEST(BatchNorm, Backward) {
   auto ret = batchnorm.Backward(kTrain, dy_in);
   Tensor dx = ret.first;
   const auto & shape = dx.shape();
-  EXPECT_EQ(4u, shape.size());
+  EXPECT_EQ(2u, shape.size());
   EXPECT_EQ(2u, shape[0]);
-  EXPECT_EQ(1u, shape[1]);
-  EXPECT_EQ(2u, shape[2]);
-  EXPECT_EQ(1u, shape[3]);
+  EXPECT_EQ(2u, shape[1]);
   const float *dxptr = ret.first.data<float>();
   EXPECT_NEAR(.0f, dxptr[0], 1e-4f);
   EXPECT_NEAR(.0f, dxptr[1], 1e-4f);


Mime
View raw message