singa-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From wan...@apache.org
Subject [1/2] incubator-singa git commit: SINGA-37 Enable users to set parameter sharing in model configuration
Date Tue, 21 Jul 2015 14:06:23 GMT
Repository: incubator-singa
Updated Branches:
  refs/heads/master 189261f0e -> 5bf1c9280


SINGA-37 Enable users to set parameter sharing in model configuration

A share_from field is added into the ParamProto.
It indicates the name of the Param that owns the values of this param.
The NeuralNet class will then share the data_ Blob from the owner Param.
In specific, it will consider share_from firstly and then consider param sharing
due to model partition (on dim 0).

Added a header file tinydir.h for directory management, e.g., list files under the directory

Copy param config for share_from; users do not need to config a param (e.g., init_method)
if this param has a share_from field; NeuralNet will copy the config from share_from proto.

Fixbug from copying param proto for sharing params, which overwrites the name of the param
because MergeFrom/CopyFrom overwrite singular field.


Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/4502f41c
Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/4502f41c
Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/4502f41c

Branch: refs/heads/master
Commit: 4502f41cd0b4c84803d398972549f506e7ac6aa4
Parents: 729a5c4
Author: wang wei <wangwei@comp.nus.edu.sg>
Authored: Sun Jul 19 11:45:26 2015 +0800
Committer: Wei Wang <wangwei@comp.nus.edu.sg>
Committed: Tue Jul 21 21:08:12 2015 +0800

----------------------------------------------------------------------
 bin/singa-run.sh           |   2 +-
 examples/mnist/model.conf  |  24 +-
 include/utils/param.h      |   3 +
 include/utils/tinydir.h    | 562 ++++++++++++++++++++++++++++++++++++++++
 src/neuralnet/neuralnet.cc |  56 +++-
 src/proto/model.proto      |  11 +-
 src/test/test_msg.cc       |  81 ++++++
 src/test/test_neuralnet.cc |   9 +
 8 files changed, 727 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/4502f41c/bin/singa-run.sh
----------------------------------------------------------------------
diff --git a/bin/singa-run.sh b/bin/singa-run.sh
index 37e7c98..0a8c9f6 100755
--- a/bin/singa-run.sh
+++ b/bin/singa-run.sh
@@ -89,7 +89,7 @@ elif [ $# = 1 ] ; then
   -oUserKnownHostsFile=/dev/null \
   -oLogLevel=quiet"
   hosts=(`cat $host_path |cut -d ' ' -f 1`)
-  cmd="./singa -cluster=$conf_path/cluster.conf -model=$conf_path/model.conf -resume=true"
+  cmd="./singa -cluster=$conf_path/cluster.conf -model=$conf_path/model.conf"
   ssh_cmd="cd $BASE; "$cmd
   for i in ${hosts[@]} ; do
     if [ $i = localhost ] ; then

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/4502f41c/examples/mnist/model.conf
----------------------------------------------------------------------
diff --git a/examples/mnist/model.conf b/examples/mnist/model.conf
index cd113db..4b704bc 100644
--- a/examples/mnist/model.conf
+++ b/examples/mnist/model.conf
@@ -67,13 +67,13 @@ layer{
     num_output: 2500
   }
   param{
-    name: "weight"
+    name: "w1"
     init_method: kUniform
     low:-0.05
     high:0.05
   }
   param{
-    name: "bias"
+    name: "b1"
     init_method: kUniform
     low: -0.05
     high:0.05
@@ -93,13 +93,13 @@ layer{
     num_output: 2000
   }
   param{
-    name: "weight"
+    name: "w2"
     init_method: kUniform
     low:-0.05
     high:0.05
   }
   param{
-    name: "bias"
+    name: "b2"
     init_method: kUniform
     low: -0.05
     high:0.05
@@ -119,13 +119,13 @@ layer{
     num_output: 1500
   }
   param{
-    name: "weight"
+    name: "w3"
     init_method: kUniform
     low:-0.05
     high:0.05
   }
   param{
-    name: "bias"
+    name: "b3"
     init_method: kUniform
     low: -0.05
     high:0.05
@@ -146,13 +146,13 @@ layer{
     num_output: 1000
   }
   param{
-    name: "weight"
+    name: "w4"
     init_method: kUniform
     low:-0.05
     high:0.05
   }
   param{
-    name: "bias"
+    name: "b4"
     init_method: kUniform
     low: -0.05
     high:0.05
@@ -173,13 +173,13 @@ layer{
     num_output: 500
   }
   param{
-    name: "weight"
+    name: "w5"
     init_method: kUniform
     low:-0.05
     high:0.05
   }
   param{
-    name: "bias"
+    name: "b5"
     init_method: kUniform
     low: -0.05
     high:0.05
@@ -200,13 +200,13 @@ layer{
     num_output: 10
   }
   param{
-    name: "weight"
+    name: "w6"
     init_method: kUniform
     low:-0.05
     high:0.05
   }
   param{
-    name: "bias"
+    name: "b6"
     init_method: kUniform
     low: -0.05
     high:0.05

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/4502f41c/include/utils/param.h
----------------------------------------------------------------------
diff --git a/include/utils/param.h b/include/utils/param.h
index eaa7084..0273519 100644
--- a/include/utils/param.h
+++ b/include/utils/param.h
@@ -115,6 +115,9 @@ class Param {
   void set_local_version(int v) {
     local_version_=v;
   }
+  const std::string& share_from() const {
+    return proto_.share_from();
+  }
    /**
     * @return num of floats.
     */

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/4502f41c/include/utils/tinydir.h
----------------------------------------------------------------------
diff --git a/include/utils/tinydir.h b/include/utils/tinydir.h
new file mode 100644
index 0000000..abb7000
--- /dev/null
+++ b/include/utils/tinydir.h
@@ -0,0 +1,562 @@
+/*
+Copyright (c) 2013-2014, Cong Xu, Baudouin Feildel
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef TINYDIR_H
+#define TINYDIR_H
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef _WIN32
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#ifdef _MSC_VER
+#pragma warning (disable : 4996)
+#endif
+#else
+#include <dirent.h>
+#include <libgen.h>
+#include <sys/stat.h>
+#endif
+
+
+/* types */
+
+#define _TINYDIR_PATH_MAX 4096
+#ifdef _WIN32
+/* extra chars for the "\\*" mask */
+#define _TINYDIR_PATH_EXTRA 2
+#else
+#define _TINYDIR_PATH_EXTRA 0
+#endif
+#define _TINYDIR_FILENAME_MAX 256
+
+#ifdef _MSC_VER
+#define _TINYDIR_FUNC static __inline
+#else
+#define _TINYDIR_FUNC static __inline__
+#endif
+
+/* Allow user to use a custom allocator by defining _TINYDIR_MALLOC and _TINYDIR_FREE. */
+#if    defined(_TINYDIR_MALLOC) &&  defined(_TINYDIR_FREE)
+#elif !defined(_TINYDIR_MALLOC) && !defined(_TINYDIR_FREE)
+#else
+#error "Either define both alloc and free or none of them!"
+#endif
+
+#if !defined(_TINYDIR_MALLOC)
+	#define _TINYDIR_MALLOC(_size) malloc(_size)
+	#define _TINYDIR_FREE(_ptr)    free(_ptr)
+#endif //!defined(_TINYDIR_MALLOC)
+
+typedef struct
+{
+	char path[_TINYDIR_PATH_MAX];
+	char name[_TINYDIR_FILENAME_MAX];
+	char *extension;
+	int is_dir;
+	int is_reg;
+
+#ifdef _WIN32
+#else
+	struct stat _s;
+#endif
+} tinydir_file;
+
+typedef struct
+{
+	char path[_TINYDIR_PATH_MAX];
+	int has_next;
+	size_t n_files;
+
+	tinydir_file *_files;
+#ifdef _WIN32
+	HANDLE _h;
+	WIN32_FIND_DATAA _f;
+#else
+	DIR *_d;
+	struct dirent *_e;
+#endif
+} tinydir_dir;
+
+
+/* declarations */
+
+_TINYDIR_FUNC
+int tinydir_open(tinydir_dir *dir, const char *path);
+_TINYDIR_FUNC
+int tinydir_open_sorted(tinydir_dir *dir, const char *path);
+_TINYDIR_FUNC
+void tinydir_close(tinydir_dir *dir);
+
+_TINYDIR_FUNC
+int tinydir_next(tinydir_dir *dir);
+_TINYDIR_FUNC
+int tinydir_readfile(const tinydir_dir *dir, tinydir_file *file);
+_TINYDIR_FUNC
+int tinydir_readfile_n(const tinydir_dir *dir, tinydir_file *file, size_t i);
+_TINYDIR_FUNC
+int tinydir_open_subdir_n(tinydir_dir *dir, size_t i);
+
+_TINYDIR_FUNC
+void _tinydir_get_ext(tinydir_file *file);
+_TINYDIR_FUNC
+int _tinydir_file_cmp(const void *a, const void *b);
+
+
+/* definitions*/
+
+_TINYDIR_FUNC
+int tinydir_open(tinydir_dir *dir, const char *path)
+{
+	if (dir == NULL || path == NULL || strlen(path) == 0)
+	{
+		errno = EINVAL;
+		return -1;
+	}
+	if (strlen(path) + _TINYDIR_PATH_EXTRA >= _TINYDIR_PATH_MAX)
+	{
+		errno = ENAMETOOLONG;
+		return -1;
+	}
+
+	/* initialise dir */
+	dir->_files = NULL;
+#ifdef _WIN32
+	dir->_h = INVALID_HANDLE_VALUE;
+#else
+	dir->_d = NULL;
+#endif
+	tinydir_close(dir);
+
+	strcpy(dir->path, path);
+#ifdef _WIN32
+	strcat(dir->path, "\\*");
+	dir->_h = FindFirstFileA(dir->path, &dir->_f);
+	dir->path[strlen(dir->path) - 2] = '\0';
+	if (dir->_h == INVALID_HANDLE_VALUE)
+#else
+	dir->_d = opendir(path);
+	if (dir->_d == NULL)
+#endif
+	{
+		errno = ENOENT;
+		goto bail;
+	}
+
+	/* read first file */
+	dir->has_next = 1;
+#ifndef _WIN32
+	dir->_e = readdir(dir->_d);
+	if (dir->_e == NULL)
+	{
+		dir->has_next = 0;
+	}
+#endif
+
+	return 0;
+
+bail:
+	tinydir_close(dir);
+	return -1;
+}
+
+_TINYDIR_FUNC
+int tinydir_open_sorted(tinydir_dir *dir, const char *path)
+{
+	/* Count the number of files first, to pre-allocate the files array */
+	size_t n_files = 0;
+	if (tinydir_open(dir, path) == -1)
+	{
+		return -1;
+	}
+	while (dir->has_next)
+	{
+		n_files++;
+		if (tinydir_next(dir) == -1)
+		{
+			goto bail;
+		}
+	}
+	tinydir_close(dir);
+
+	if (tinydir_open(dir, path) == -1)
+	{
+		return -1;
+	}
+
+	dir->n_files = 0;
+	dir->_files = (tinydir_file *)_TINYDIR_MALLOC(sizeof *dir->_files * n_files);
+	if (dir->_files == NULL)
+	{
+		errno = ENOMEM;
+		goto bail;
+	}
+	while (dir->has_next)
+	{
+		tinydir_file *p_file;
+		dir->n_files++;
+
+		p_file = &dir->_files[dir->n_files - 1];
+		if (tinydir_readfile(dir, p_file) == -1)
+		{
+			goto bail;
+		}
+
+		if (tinydir_next(dir) == -1)
+		{
+			goto bail;
+		}
+
+		/* Just in case the number of files has changed between the first and
+		second reads, terminate without writing into unallocated memory */
+		if (dir->n_files == n_files)
+		{
+			break;
+		}
+	}
+
+	qsort(dir->_files, dir->n_files, sizeof(tinydir_file), _tinydir_file_cmp);
+
+	return 0;
+
+bail:
+	tinydir_close(dir);
+	return -1;
+}
+
+_TINYDIR_FUNC
+void tinydir_close(tinydir_dir *dir)
+{
+	if (dir == NULL)
+	{
+		return;
+	}
+
+	memset(dir->path, 0, sizeof(dir->path));
+	dir->has_next = 0;
+	dir->n_files = 0;
+	if (dir->_files != NULL)
+	{
+		_TINYDIR_FREE(dir->_files);
+	}
+	dir->_files = NULL;
+#ifdef _WIN32
+	if (dir->_h != INVALID_HANDLE_VALUE)
+	{
+		FindClose(dir->_h);
+	}
+	dir->_h = INVALID_HANDLE_VALUE;
+#else
+	if (dir->_d)
+	{
+		closedir(dir->_d);
+	}
+	dir->_d = NULL;
+	dir->_e = NULL;
+#endif
+}
+
+_TINYDIR_FUNC
+int tinydir_next(tinydir_dir *dir)
+{
+	if (dir == NULL)
+	{
+		errno = EINVAL;
+		return -1;
+	}
+	if (!dir->has_next)
+	{
+		errno = ENOENT;
+		return -1;
+	}
+
+#ifdef _WIN32
+	if (FindNextFileA(dir->_h, &dir->_f) == 0)
+#else
+	dir->_e = readdir(dir->_d);
+	if (dir->_e == NULL)
+#endif
+	{
+		dir->has_next = 0;
+#ifdef _WIN32
+		if (GetLastError() != ERROR_SUCCESS &&
+			GetLastError() != ERROR_NO_MORE_FILES)
+		{
+			tinydir_close(dir);
+			errno = EIO;
+			return -1;
+		}
+#endif
+	}
+
+	return 0;
+}
+
+_TINYDIR_FUNC
+int tinydir_readfile(const tinydir_dir *dir, tinydir_file *file)
+{
+	if (dir == NULL || file == NULL)
+	{
+		errno = EINVAL;
+		return -1;
+	}
+#ifdef _WIN32
+	if (dir->_h == INVALID_HANDLE_VALUE)
+#else
+	if (dir->_e == NULL)
+#endif
+	{
+		errno = ENOENT;
+		return -1;
+	}
+	if (strlen(dir->path) +
+		strlen(
+#ifdef _WIN32
+			dir->_f.cFileName
+#else
+			dir->_e->d_name
+#endif
+		) + 1 + _TINYDIR_PATH_EXTRA >=
+		_TINYDIR_PATH_MAX)
+	{
+		/* the path for the file will be too long */
+		errno = ENAMETOOLONG;
+		return -1;
+	}
+	if (strlen(
+#ifdef _WIN32
+			dir->_f.cFileName
+#else
+			dir->_e->d_name
+#endif
+		) >= _TINYDIR_FILENAME_MAX)
+	{
+		errno = ENAMETOOLONG;
+		return -1;
+	}
+
+	strcpy(file->path, dir->path);
+	strcat(file->path, "/");
+	strcpy(file->name,
+#ifdef _WIN32
+		dir->_f.cFileName
+#else
+		dir->_e->d_name
+#endif
+	);
+	strcat(file->path, file->name);
+#ifndef _WIN32
+	if (stat(file->path, &file->_s) == -1)
+	{
+		return -1;
+	}
+#endif
+	_tinydir_get_ext(file);
+
+	file->is_dir =
+#ifdef _WIN32
+		!!(dir->_f.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY);
+#else
+		S_ISDIR(file->_s.st_mode);
+#endif
+	file->is_reg =
+#ifdef _WIN32
+		!!(dir->_f.dwFileAttributes & FILE_ATTRIBUTE_NORMAL) ||
+		(
+			!(dir->_f.dwFileAttributes & FILE_ATTRIBUTE_DEVICE) &&
+			!(dir->_f.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) &&
+			!(dir->_f.dwFileAttributes & FILE_ATTRIBUTE_ENCRYPTED) &&
+#ifdef FILE_ATTRIBUTE_INTEGRITY_STREAM
+			!(dir->_f.dwFileAttributes & FILE_ATTRIBUTE_INTEGRITY_STREAM) &&
+#endif
+#ifdef FILE_ATTRIBUTE_NO_SCRUB_DATA
+			!(dir->_f.dwFileAttributes & FILE_ATTRIBUTE_NO_SCRUB_DATA) &&
+#endif
+			!(dir->_f.dwFileAttributes & FILE_ATTRIBUTE_OFFLINE) &&
+			!(dir->_f.dwFileAttributes & FILE_ATTRIBUTE_TEMPORARY));
+#else
+		S_ISREG(file->_s.st_mode);
+#endif
+
+	return 0;
+}
+
+_TINYDIR_FUNC
+int tinydir_readfile_n(const tinydir_dir *dir, tinydir_file *file, size_t i)
+{
+	if (dir == NULL || file == NULL)
+	{
+		errno = EINVAL;
+		return -1;
+	}
+	if (i >= dir->n_files)
+	{
+		errno = ENOENT;
+		return -1;
+	}
+
+	memcpy(file, &dir->_files[i], sizeof(tinydir_file));
+	_tinydir_get_ext(file);
+
+	return 0;
+}
+
+_TINYDIR_FUNC
+int tinydir_open_subdir_n(tinydir_dir *dir, size_t i)
+{
+	char path[_TINYDIR_PATH_MAX];
+	if (dir == NULL)
+	{
+		errno = EINVAL;
+		return -1;
+	}
+	if (i >= dir->n_files || !dir->_files[i].is_dir)
+	{
+		errno = ENOENT;
+		return -1;
+	}
+
+	strcpy(path, dir->_files[i].path);
+	tinydir_close(dir);
+	if (tinydir_open_sorted(dir, path) == -1)
+	{
+		return -1;
+	}
+
+	return 0;
+}
+
+/* Open a single file given its path */
+_TINYDIR_FUNC
+int tinydir_file_open(tinydir_file *file, const char *path)
+{
+	tinydir_dir dir;
+	int result = 0;
+	int found = 0;
+	char dir_name_buf[_TINYDIR_PATH_MAX];
+	char file_name_buf[_TINYDIR_FILENAME_MAX];
+	char *dir_name;
+	char *base_name;
+#ifdef _WIN32
+	char drive_buf[_TINYDIR_PATH_MAX];
+	char ext_buf[_TINYDIR_FILENAME_MAX];
+#endif
+	
+	if (file == NULL || path == NULL || strlen(path) == 0)
+	{
+		errno = EINVAL;
+		return -1;
+	}
+	if (strlen(path) + _TINYDIR_PATH_EXTRA >= _TINYDIR_PATH_MAX)
+	{
+		errno = ENAMETOOLONG;
+		return -1;
+	}
+
+	/* Get the parent path */
+#ifdef _WIN32
+	if (_splitpath_s(
+			path,
+			drive_buf, sizeof drive_buf,
+			dir_name_buf, sizeof dir_name_buf,
+			file_name_buf, sizeof file_name_buf,
+			ext_buf, sizeof ext_buf))
+	{
+		errno = EINVAL;
+		return -1;
+	}
+	/* Concatenate the drive letter and dir name to form full dir name */
+	strcat(drive_buf, dir_name_buf);
+	dir_name = drive_buf;
+	/* Concatenate the file name and extension to form base name */
+	strcat(file_name_buf, ext_buf);
+	base_name = file_name_buf;
+#else
+	strcpy(dir_name_buf, path);
+	dir_name = dirname(dir_name_buf);
+	strcpy(file_name_buf, path);
+	base_name = basename(file_name_buf);
+#endif
+	
+	/* Open the parent directory */
+	if (tinydir_open(&dir, dir_name) == -1)
+	{
+		return -1;
+	}
+
+	/* Read through the parent directory and look for the file */
+	while (dir.has_next)
+	{
+		if (tinydir_readfile(&dir, file) == -1)
+		{
+			result = -1;
+			goto bail;
+		}
+		if (strcmp(file->name, base_name) == 0)
+		{
+			/* File found */
+			found = 1;
+			goto bail;
+		}
+		tinydir_next(&dir);
+	}
+	if (!found)
+	{
+		result = -1;
+		errno = ENOENT;
+	}
+	
+bail:
+	tinydir_close(&dir);
+	return result;
+}
+
+_TINYDIR_FUNC
+void _tinydir_get_ext(tinydir_file *file)
+{
+	char *period = strrchr(file->name, '.');
+	if (period == NULL)
+	{
+		file->extension = &(file->name[strlen(file->name)]);
+	}
+	else
+	{
+		file->extension = period + 1;
+	}
+}
+
+_TINYDIR_FUNC
+int _tinydir_file_cmp(const void *a, const void *b)
+{
+	const tinydir_file *fa = (const tinydir_file *)a;
+	const tinydir_file *fb = (const tinydir_file *)b;
+	if (fa->is_dir != fb->is_dir)
+	{
+		return -(fa->is_dir - fb->is_dir);
+	}
+	return strncmp(fa->name, fb->name, _TINYDIR_FILENAME_MAX);
+}
+
+#endif

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/4502f41c/src/neuralnet/neuralnet.cc
----------------------------------------------------------------------
diff --git a/src/neuralnet/neuralnet.cc b/src/neuralnet/neuralnet.cc
index 1097c0b..173f08f 100644
--- a/src/neuralnet/neuralnet.cc
+++ b/src/neuralnet/neuralnet.cc
@@ -43,6 +43,9 @@ shared_ptr<NeuralNet> NeuralNet::Create(
   NetProto conf;
   conf.CopyFrom(net_conf);
   conf.clear_layer();
+  // for sharing param conf
+  std::unordered_map<string, ParamProto*> name2param;
+  std::vector<ParamProto*> shares;
   // exclude layers according to phase
   for (const auto& layer : net_conf.layer()) {
     bool include = true;
@@ -56,8 +59,30 @@ shared_ptr<NeuralNet> NeuralNet::Create(
       // using net partition if layer partition is not set
       if (!layer_conf->has_partition_dim())
         layer_conf->set_partition_dim(net_conf.partition_dim());
+      for (int i = 0; i < layer_conf->param_size(); i++) {
+        ParamProto* param = layer_conf->mutable_param(i);
+        if (param->has_name() && param->name() != "") {
+          CHECK(name2param.find(param->name()) == name2param.end())
+            << "param name is repeated: " << param->name();
+          name2param[param->name()] = param;
+        }
+        if (param->has_share_from() && param->share_from() != "")
+          shares.push_back(param);
+      }
     }
   }
+  for (auto param : shares) {
+    const std::string from = param->share_from();
+    const std::string name = param->name();
+    CHECK(name2param.find(from) != name2param.end())
+      << "can't find param " << from;
+    // CopyFrom will overwrite the name and share_from fields
+    param->CopyFrom(*name2param.at(from));
+    param->set_name(name);
+    param->set_share_from(from);
+  }
+
+  for (auto layer : net_conf.layer())
   LOG(INFO) << "NeuralNet config is\n" << conf.DebugString();
 
   // TODO(wangwei) create net based on net type, e.g., directed, undirected, etc
@@ -120,10 +145,35 @@ void NeuralNet::CreateNetFromGraph(Graph* graph, int npartitions) {
       share_param_layers[node->origin].push_back(layer);
   }
   LOG(INFO) << "Neural net structure\n"  << graph->ToJson(layerinfo);
-  // share Params for layers generated from the same origin layer
+
+  // create map from param name to param ptr
+  std::unordered_map<string, Param*> name2param;
+  for (auto layer : layers_) {
+    for (auto param : layer->GetParams()) {
+      name2param[param->name()] = param;
+    }
+  }
+  for (auto & entry : share_param_layers) {
+    // overwrite entries for replicated params due to layer partition (dim 0).
+    for (auto *param : entry.second.front()->GetParams())
+      name2param.at(param->name()) = param;
+  }
+  // share params based on share_from field
+  for (auto & entry : name2param) {
+    Param* param = entry.second;
+    const string share_from = param->share_from();
+    if (param->share_from() != "") {
+      if(name2param.find(share_from) != name2param.end()) {
+        param->ShareFrom(*name2param.at(param->share_from()));
+      } else {
+        LOG(FATAL) << "No param with the name (share_from) " << share_from;
+      }
+    }
+  }
+  // share Params for layers generated (partitioned) from the same origin layer
   for (auto & entry : share_param_layers) {
-    auto owner = entry.second.begin();
-    auto owner_params = (*owner)->GetParams();
+    const auto& owner = entry.second.begin();
+    const auto& owner_params = (*owner)->GetParams();
     for (auto it = owner + 1; it != entry.second.end(); it++) {
       auto params = (*it)->GetParams();
       CHECK_EQ(params.size(), owner_params.size());

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/4502f41c/src/proto/model.proto
----------------------------------------------------------------------
diff --git a/src/proto/model.proto b/src/proto/model.proto
index e6bd834..f3b8dfe 100644
--- a/src/proto/model.proto
+++ b/src/proto/model.proto
@@ -93,7 +93,7 @@ message ParamProto {
     // <a href="http://deeplearning.net/tutorial/mlp.html"> Theano MLP</a>
     kUniformSqrtFanInOut = 6;
   }
-  required InitMethod init_method = 1 [default = kGaussian];
+  optional InitMethod init_method = 1 [default = kGaussian];
   // constant init
   optional float value = 5 [default = 1];
   // for uniform sampling
@@ -110,15 +110,16 @@ message ParamProto {
   optional int32 partition_dim = 30;
   // usually, the program will infer the param shape
   repeated int32 shape = 31;
-
   // used for identifying the same params from diff models and display deug info
   optional string name =  61 [default = ""];
+  // name of the owner param from which this param shares the values
+  optional string share_from = 62;
   // used interally
-  optional int32 id = 62;
+  optional int32 id = 63;
   // parameter slice limit (Google Protobuf also has size limit)
-  optional int32 split_threshold = 63 [default = 5000000];
+  optional int32 split_threshold = 64 [default = 5000000];
   // used internally
-  optional int32 owner = 64 [default = -1];
+  optional int32 owner = 65 [default = -1];
 }
 
 enum PartitionType{

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/4502f41c/src/test/test_msg.cc
----------------------------------------------------------------------
diff --git a/src/test/test_msg.cc b/src/test/test_msg.cc
new file mode 100644
index 0000000..61263d9
--- /dev/null
+++ b/src/test/test_msg.cc
@@ -0,0 +1,81 @@
+#include "gtest/gtest.h"
+#include "communication/msg.h"
+using namespace singa;
+TEST(MsgTest, AddrTest) {
+  int src_grp = 1, src_worker = 2;
+  int dst_grp = 0, dst_server = 1;
+  int src_addr = Addr(src_grp, src_worker, 0);
+  int dst_addr = Addr(dst_grp, dst_server, 1);
+  Msg msg(src_addr, dst_addr);
+  msg.set_trgt(123, -1);
+  ASSERT_EQ(AddrGrp(msg.src()), src_grp);
+  ASSERT_EQ(AddrID(msg.src()), src_worker);
+  ASSERT_EQ(AddrType(msg.src()), 0);
+
+  msg.SwapAddr();
+  ASSERT_EQ(AddrGrp(msg.src()), dst_grp);
+  ASSERT_EQ(AddrID(msg.src()), dst_server);
+  ASSERT_EQ(AddrType(msg.src()), 1);
+  ASSERT_EQ(msg.trgt_val(), 123);
+  ASSERT_EQ(msg.trgt_version(), -1);
+}
+
+TEST(MsgTest, AddFrameTest) {
+  int buf[5]={1,2,3,4,5};
+  Msg msg;
+  msg.AddFrame("abcdefg", 7);
+  msg.AddFrame(buf, sizeof(int) * 5);
+
+  msg.FirstFrame();
+  char* str = msg.FrameStr();
+  ASSERT_STREQ(str, "abcdefg");
+  delete str;
+  ASSERT_EQ(msg.NextFrame(), true);
+  int *val = static_cast<int*>(msg.FrameData());
+  ASSERT_EQ(val[3], 4);
+  ASSERT_EQ(msg.NextFrame(), false);
+
+  msg.FirstFrame();
+  str = msg.FrameStr();
+  ASSERT_STREQ(str, "abcdefg");
+  msg.LastFrame();
+  val = static_cast<int*>(msg.FrameData());
+  ASSERT_EQ(val[2], 3);
+}
+
+TEST(MsgTest, AddFormatFrame) {
+  int x = 5;
+  Msg msg;
+  msg.AddFormatFrame("i", 12);
+  msg.AddFormatFrame("f", 10.f);
+  msg.AddFormatFrame("s", "abc");
+  msg.AddFormatFrame("p", &x);
+  msg.AddFormatFrame("isfp", 12, "abc", 10.f, &x);
+
+  msg.FirstFrame();
+  int y;
+  msg.ParseFormatFrame("i", &y);
+  ASSERT_EQ(y, 12);
+  ASSERT_EQ(msg.NextFrame(), true);
+
+  float z;
+  msg.ParseFormatFrame("f", &z);
+  ASSERT_EQ(z, 10.f);
+  ASSERT_EQ(msg.NextFrame(), true);
+
+  char buf[10];
+  msg.ParseFormatFrame("s", buf);
+  ASSERT_STREQ(buf, "abc");
+  ASSERT_EQ(msg.NextFrame(), true);
+
+  int *p;
+  msg.ParseFormatFrame("p", &p);
+  ASSERT_EQ(p, &x);
+  ASSERT_EQ(msg.NextFrame(), true);
+
+  msg.ParseFormatFrame("isfp", &y, buf, &z, &p);
+  ASSERT_EQ(y, 12);
+  ASSERT_STREQ(buf, "abc");
+  ASSERT_EQ(z, 10.f);
+  ASSERT_EQ(p, &x);
+}

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/4502f41c/src/test/test_neuralnet.cc
----------------------------------------------------------------------
diff --git a/src/test/test_neuralnet.cc b/src/test/test_neuralnet.cc
new file mode 100644
index 0000000..9317ce0
--- /dev/null
+++ b/src/test/test_neuralnet.cc
@@ -0,0 +1,9 @@
+#include "gtest/gtest.h"
+#include "neuralnet/neuralnet.h"
+using namespace singa;
+
+TEST(NeuralNet, ParamShareFrom) {
+  NetProto conf;
+  // add net.conf file into test folder, e.g., conf/net.conf
+  // add data shard example in test folder, e.g., data/test.shard
+}


Mime
View raw message