singa-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From wang...@apache.org
Subject [18/21] incubator-singa git commit: SINGA-65 Add an example of implementing user's own layers
Date Wed, 16 Sep 2015 04:19:57 GMT
SINGA-65 Add an example of implementing user's own layers

Move the complete code into example/mlp/full foder.
Remove some code in the source files to let users fill in.


Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/5a8a1a31
Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/5a8a1a31
Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/5a8a1a31

Branch: refs/heads/tutorial
Commit: 5a8a1a31648e7ab6d705bcca455adf3b584ea8d0
Parents: 4820d5e
Author: wangwei <wangwei@comp.nus.edu.sg>
Authored: Fri Sep 4 18:39:25 2015 +0800
Committer: Wei Wang <wangwei@comp.nus.edu.sg>
Committed: Wed Sep 16 11:39:16 2015 +0800

----------------------------------------------------------------------
 examples/mlp/full/hidden_layer.cc | 72 ++++++++++++++++++++++++++
 examples/mlp/full/hidden_layer.h  | 22 ++++++++
 examples/mlp/full/job.conf        | 95 ++++++++++++++++++++++++++++++++++
 examples/mlp/full/main.cc         | 25 +++++++++
 examples/mlp/full/myproto.proto   | 11 ++++
 examples/mlp/hidden_layer.cc      | 20 ++-----
 examples/mlp/hidden_layer.h       | 11 +---
 examples/mlp/job.conf             | 42 ++-------------
 examples/mlp/main.cc              |  3 +-
 examples/mlp/myproto.proto        |  9 +---
 10 files changed, 237 insertions(+), 73 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/5a8a1a31/examples/mlp/full/hidden_layer.cc
----------------------------------------------------------------------
diff --git a/examples/mlp/full/hidden_layer.cc b/examples/mlp/full/hidden_layer.cc
new file mode 100644
index 0000000..f4dacf2
--- /dev/null
+++ b/examples/mlp/full/hidden_layer.cc
@@ -0,0 +1,72 @@
+#include "hidden_layer.h"
+
+#include "mshadow/tensor.h"
+#include "mshadow/cxxnet_op.h"
+
+namespace singa {
+using namespace mshadow;
+using mshadow::cpu;
+using mshadow::Shape1;
+using mshadow::Shape2;
+
+inline Tensor<cpu, 2> NewTensor2(Blob<float>* blob) {
+  const vector<int>& shape = blob->shape();
+  Tensor<cpu, 2> tensor(blob->mutable_cpu_data(),
+      Shape2(shape[0], blob->count() / shape[0]));
+  return tensor;
+}
+
+inline Tensor<cpu, 1> NewTensor1(Blob<float>* blob) {
+  Tensor<cpu, 1> tensor(blob->mutable_cpu_data(), Shape1(blob->count()));
+  return tensor;
+}
+
+
+HiddenLayer::~HiddenLayer() {
+  delete weight_;
+  delete bias_;
+}
+
+void HiddenLayer::Setup(const LayerProto& proto, int npartitions) {
+  Layer::Setup(proto, npartitions);
+  CHECK_EQ(srclayers_.size(), 1);
+  const auto& src = srclayers_[0]->data(this);
+  batchsize_ = src.shape()[0];
+  vdim_ = src.count() / batchsize_;
+  hdim_ = layer_proto_.GetExtension(hidden_conf).num_output();
+  data_.Reshape(vector<int>{batchsize_, hdim_});
+  grad_.ReshapeLike(data_);
+  weight_ = Param::Create(proto.param(0));
+  bias_ = Param::Create(proto.param(1));
+  weight_->Setup(vector<int>{hdim_, vdim_});
+  bias_->Setup(vector<int>{hdim_});
+}
+
+void HiddenLayer::ComputeFeature(int flag, Metric* perf) {
+  auto data = NewTensor2(&data_);
+  auto src = NewTensor2(srclayers_[0]->mutable_data(this));
+  auto weight = NewTensor2(weight_->mutable_data());
+  auto bias = NewTensor1(bias_->mutable_data());
+  data = dot(src, weight.T());
+  // repmat: repeat bias vector into batchsize rows
+  data += expr::repmat(bias, batchsize_);
+  data = expr::F<op::stanh>(data);
+}
+
+void HiddenLayer::ComputeGradient(int flag, Metric* perf) {
+  auto data = NewTensor2(&data_);
+  auto src = NewTensor2(srclayers_[0]->mutable_data(this));
+  auto grad = NewTensor2(&grad_);
+  auto weight = NewTensor2(weight_->mutable_data());
+  auto gweight = NewTensor2(weight_->mutable_grad());
+  auto gbias = NewTensor1(bias_->mutable_grad());
+
+  grad = expr::F<op::stanh_grad>(data) * grad;
+  gbias = expr::sum_rows(grad);
+  gweight = dot(grad.T(), src);
+  if (srclayers_[0]->mutable_grad(this) != nullptr) {
+    auto gsrc = NewTensor2(srclayers_[0]->mutable_grad(this));
+    gsrc = dot(grad, weight);
+  }
+}
+}

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/5a8a1a31/examples/mlp/full/hidden_layer.h
----------------------------------------------------------------------
diff --git a/examples/mlp/full/hidden_layer.h b/examples/mlp/full/hidden_layer.h
new file mode 100644
index 0000000..c305261
--- /dev/null
+++ b/examples/mlp/full/hidden_layer.h
@@ -0,0 +1,22 @@
+#include "neuralnet/layer.h"
+#include "myproto.pb.h"
+
+namespace singa {
+class HiddenLayer : public NeuronLayer {
+ public:
+  ~HiddenLayer();
+  void Setup(const LayerProto& proto, int npartitions) override;
+  void ComputeFeature(int flag, Metric* perf) override;
+  void ComputeGradient(int flag, Metric* perf) override;
+  const std::vector<Param*> GetParams() const override {
+    std::vector<Param*> params{weight_, bias_};
+    return params;
+  }
+
+ private:
+  int batchsize_;
+  int vdim_, hdim_;
+  bool transpose_;
+  Param *weight_, *bias_;
+};
+}

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/5a8a1a31/examples/mlp/full/job.conf
----------------------------------------------------------------------
diff --git a/examples/mlp/full/job.conf b/examples/mlp/full/job.conf
new file mode 100644
index 0000000..163bc19
--- /dev/null
+++ b/examples/mlp/full/job.conf
@@ -0,0 +1,95 @@
+name: "mlp"
+train_steps: 1200
+test_steps:10
+test_freq:60
+disp_freq:10
+train_one_batch {
+  alg: kBP
+}
+updater{
+  type: kSGD
+  learning_rate{
+    type : kStep
+    base_lr: 0.001
+    step_conf{
+      change_freq: 60
+      gamma: 0.997
+    }
+  }
+}
+neuralnet {
+  layer {
+    name: "data"
+    type: kShardData
+    sharddata_conf {
+      path: "examples/mnist/mnist_train_shard"
+      batchsize: 1000
+    }
+    exclude: kTest
+  }
+
+  layer {
+    name: "data"
+    type: kShardData
+    sharddata_conf {
+      path: "examples/mnist/mnist_test_shard"
+      batchsize: 1000
+    }
+    exclude: kTrain
+  }
+
+  layer{
+    name:"mnist"
+    type: kMnist
+    srclayers: "data"
+    mnist_conf {
+      norm_a: 127.5
+      norm_b: 1
+    }
+  }
+  layer{
+    name: "label"
+    type: kLabel
+    srclayers: "data"
+  }
+
+
+  layer{
+    name: "hid1"
+    user_type: "kHidden"
+    srclayers:"mnist"
+    [singa.hidden_conf] {
+      num_output: 10
+    }
+    param{
+      name: "w1"
+      init {
+        type: kUniform
+        low:-0.05
+        high:0.05
+      }
+    }
+    param{
+      name: "b1"
+      init {
+        type : kUniform
+        low: -0.05
+        high:0.05
+      }
+    }
+  }
+ layer{
+    name: "loss"
+    type:kSoftmaxLoss
+    softmaxloss_conf{
+      topk:1
+    }
+    srclayers:"hid1"
+    srclayers:"label"
+  }
+}
+cluster {
+  nworker_groups: 1
+  nserver_groups: 1
+  workspace: "examples/mnist"
+}

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/5a8a1a31/examples/mlp/full/main.cc
----------------------------------------------------------------------
diff --git a/examples/mlp/full/main.cc b/examples/mlp/full/main.cc
new file mode 100644
index 0000000..c27f38d
--- /dev/null
+++ b/examples/mlp/full/main.cc
@@ -0,0 +1,25 @@
+#include <string>
+#include "singa.h"
+#include "hidden_layer.h"
+#include "myproto.pb.h"
+#include "utils/common.h"
+
+int main(int argc, char **argv) {
+  //  must create driver at the beginning and call its Init method.
+  singa::Driver driver;
+  driver.Init(argc, argv);
+
+  //  if -resume in argument list, set resume to true; otherwise false
+  int resume_pos = singa::ArgPos(argc, argv, "-resume");
+  bool resume = (resume_pos != -1);
+
+  //  users can register new subclasses of layer, updater, etc.
+  driver.RegisterLayer<singa::HiddenLayer, std::string>("kHidden");
+
+  //  get the job conf, and custmize it if need
+  singa::JobProto jobConf = driver.job_conf();
+
+  //  submit the job
+  driver.Submit(resume, jobConf);
+  return 0;
+}

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/5a8a1a31/examples/mlp/full/myproto.proto
----------------------------------------------------------------------
diff --git a/examples/mlp/full/myproto.proto b/examples/mlp/full/myproto.proto
new file mode 100644
index 0000000..ba62ac2
--- /dev/null
+++ b/examples/mlp/full/myproto.proto
@@ -0,0 +1,11 @@
+package singa;
+import "job.proto";
+
+message HiddenProto {
+  required int32 num_output = 1;
+  optional bool use_stanh = 2 [default = true];
+}
+
+extend LayerProto {
+  optional HiddenProto hidden_conf = 102;
+}

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/5a8a1a31/examples/mlp/hidden_layer.cc
----------------------------------------------------------------------
diff --git a/examples/mlp/hidden_layer.cc b/examples/mlp/hidden_layer.cc
index f4dacf2..14eee23 100644
--- a/examples/mlp/hidden_layer.cc
+++ b/examples/mlp/hidden_layer.cc
@@ -32,14 +32,8 @@ void HiddenLayer::Setup(const LayerProto& proto, int npartitions) {
   CHECK_EQ(srclayers_.size(), 1);
   const auto& src = srclayers_[0]->data(this);
   batchsize_ = src.shape()[0];
-  vdim_ = src.count() / batchsize_;
-  hdim_ = layer_proto_.GetExtension(hidden_conf).num_output();
-  data_.Reshape(vector<int>{batchsize_, hdim_});
-  grad_.ReshapeLike(data_);
-  weight_ = Param::Create(proto.param(0));
-  bias_ = Param::Create(proto.param(1));
-  weight_->Setup(vector<int>{hdim_, vdim_});
-  bias_->Setup(vector<int>{hdim_});
+
+  // please add code for setup this layer
 }
 
 void HiddenLayer::ComputeFeature(int flag, Metric* perf) {
@@ -47,10 +41,7 @@ void HiddenLayer::ComputeFeature(int flag, Metric* perf) {
   auto src = NewTensor2(srclayers_[0]->mutable_data(this));
   auto weight = NewTensor2(weight_->mutable_data());
   auto bias = NewTensor1(bias_->mutable_data());
-  data = dot(src, weight.T());
-  // repmat: repeat bias vector into batchsize rows
-  data += expr::repmat(bias, batchsize_);
-  data = expr::F<op::stanh>(data);
+  // please add code for computing the hidden feature
 }
 
 void HiddenLayer::ComputeGradient(int flag, Metric* perf) {
@@ -61,9 +52,8 @@ void HiddenLayer::ComputeGradient(int flag, Metric* perf) {
   auto gweight = NewTensor2(weight_->mutable_grad());
   auto gbias = NewTensor1(bias_->mutable_grad());
 
-  grad = expr::F<op::stanh_grad>(data) * grad;
-  gbias = expr::sum_rows(grad);
-  gweight = dot(grad.T(), src);
+  // please add code for computing the gradients
+
   if (srclayers_[0]->mutable_grad(this) != nullptr) {
     auto gsrc = NewTensor2(srclayers_[0]->mutable_grad(this));
     gsrc = dot(grad, weight);

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/5a8a1a31/examples/mlp/hidden_layer.h
----------------------------------------------------------------------
diff --git a/examples/mlp/hidden_layer.h b/examples/mlp/hidden_layer.h
index c305261..5dbae12 100644
--- a/examples/mlp/hidden_layer.h
+++ b/examples/mlp/hidden_layer.h
@@ -8,15 +8,6 @@ class HiddenLayer : public NeuronLayer {
   void Setup(const LayerProto& proto, int npartitions) override;
   void ComputeFeature(int flag, Metric* perf) override;
   void ComputeGradient(int flag, Metric* perf) override;
-  const std::vector<Param*> GetParams() const override {
-    std::vector<Param*> params{weight_, bias_};
-    return params;
-  }
 
- private:
-  int batchsize_;
-  int vdim_, hdim_;
-  bool transpose_;
-  Param *weight_, *bias_;
-};
+// please fill HiddenLayer class declaration
 }

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/5a8a1a31/examples/mlp/job.conf
----------------------------------------------------------------------
diff --git a/examples/mlp/job.conf b/examples/mlp/job.conf
index 163bc19..6f57677 100644
--- a/examples/mlp/job.conf
+++ b/examples/mlp/job.conf
@@ -3,20 +3,9 @@ train_steps: 1200
 test_steps:10
 test_freq:60
 disp_freq:10
-train_one_batch {
-  alg: kBP
-}
-updater{
-  type: kSGD
-  learning_rate{
-    type : kStep
-    base_lr: 0.001
-    step_conf{
-      change_freq: 60
-      gamma: 0.997
-    }
-  }
-}
+
+# please add the config for updater and train_one_batch
+
 neuralnet {
   layer {
     name: "data"
@@ -53,31 +42,8 @@ neuralnet {
     srclayers: "data"
   }
 
+# please add the config for the hidden layer
 
-  layer{
-    name: "hid1"
-    user_type: "kHidden"
-    srclayers:"mnist"
-    [singa.hidden_conf] {
-      num_output: 10
-    }
-    param{
-      name: "w1"
-      init {
-        type: kUniform
-        low:-0.05
-        high:0.05
-      }
-    }
-    param{
-      name: "b1"
-      init {
-        type : kUniform
-        low: -0.05
-        high:0.05
-      }
-    }
-  }
  layer{
     name: "loss"
     type:kSoftmaxLoss

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/5a8a1a31/examples/mlp/main.cc
----------------------------------------------------------------------
diff --git a/examples/mlp/main.cc b/examples/mlp/main.cc
index c27f38d..cf104e6 100644
--- a/examples/mlp/main.cc
+++ b/examples/mlp/main.cc
@@ -13,8 +13,7 @@ int main(int argc, char **argv) {
   int resume_pos = singa::ArgPos(argc, argv, "-resume");
   bool resume = (resume_pos != -1);
 
-  //  users can register new subclasses of layer, updater, etc.
-  driver.RegisterLayer<singa::HiddenLayer, std::string>("kHidden");
+  //  please register the HiddenLayer here
 
   //  get the job conf, and custmize it if need
   singa::JobProto jobConf = driver.job_conf();

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/5a8a1a31/examples/mlp/myproto.proto
----------------------------------------------------------------------
diff --git a/examples/mlp/myproto.proto b/examples/mlp/myproto.proto
index ba62ac2..deea1d4 100644
--- a/examples/mlp/myproto.proto
+++ b/examples/mlp/myproto.proto
@@ -1,11 +1,4 @@
 package singa;
 import "job.proto";
 
-message HiddenProto {
-  required int32 num_output = 1;
-  optional bool use_stanh = 2 [default = true];
-}
-
-extend LayerProto {
-  optional HiddenProto hidden_conf = 102;
-}
+// please add the definition of HiddenProto


Mime
View raw message