singa-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From wang...@apache.org
Subject [4/8] incubator-singa git commit: SINGA-247 Add windows support for singa
Date Mon, 10 Oct 2016 15:46:10 GMT
SINGA-247 Add windows support for singa

Fix warnings.


Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/37d3b39e
Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/37d3b39e
Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/37d3b39e

Branch: refs/heads/master
Commit: 37d3b39e942d9e1d03344524a2e958a9d7aa83c3
Parents: d0317a7
Author: xiezl <xiezhongle@comp.nus.edu.sg>
Authored: Tue Sep 20 14:02:47 2016 +0800
Committer: Wei Wang <wangwei@comp.nus.edu.sg>
Committed: Mon Oct 10 17:44:53 2016 +0800

----------------------------------------------------------------------
 include/singa/core/tensor.h       |  2 +-
 include/singa/utils/string.h      |  2 +-
 src/CMakeLists.txt                |  1 +
 src/core/tensor/tensor.cc         | 32 ++++++++++++++++----------------
 src/core/tensor/tensor_math_cpp.h |  4 ++--
 src/io/binfile_reader.cc          |  2 +-
 src/io/binfile_writer.cc          |  8 ++++----
 src/model/feed_forward_net.cc     | 22 +++++++++++-----------
 src/model/layer/convolution.cc    |  8 +++-----
 src/model/layer/pooling.cc        | 12 ++++++------
 src/utils/logging.cc              |  5 ++++-
 11 files changed, 50 insertions(+), 48 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/37d3b39e/include/singa/core/tensor.h
----------------------------------------------------------------------
diff --git a/include/singa/core/tensor.h b/include/singa/core/tensor.h
index 00df60b..a41afbc 100644
--- a/include/singa/core/tensor.h
+++ b/include/singa/core/tensor.h
@@ -211,7 +211,7 @@ class Tensor {
   /// Note: block_ is allocated in lazy manner to avoid frequent malloc/free.
   /// If you want to get an allocated Block, use block() instead of block_.
   Block *block_ = nullptr;
-  Shape shape_;// = {};
+  Shape shape_ = {};
 };
 
 typedef Shape::iterator ShapeIter;

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/37d3b39e/include/singa/utils/string.h
----------------------------------------------------------------------
diff --git a/include/singa/utils/string.h b/include/singa/utils/string.h
index b4c7c24..35177e2 100644
--- a/include/singa/utils/string.h
+++ b/include/singa/utils/string.h
@@ -84,7 +84,7 @@ class Tokenizer {
     auto pos = buf_.find_first_of(sep_, start);
     if (pos == std::string::npos)
       pos = buf_.length();
-    start_ = pos + 1;
+    start_ = (unsigned int)(pos + 1);
     out = buf_.substr(start, pos);
     return *this;
   }

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/37d3b39e/src/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index adafdf2..6704960 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -81,6 +81,7 @@ ENDFOREACH()
 ADD_LIBRARY(singa_objects OBJECT ${singa_sources})
 IF(WIN32) 
   IF (MSVC)
+	ADD_DEFINITIONS(-D_CRT_SECURE_NO_DEPRECATE)
 	#SET_TARGET_PROPERTIES(singa_objects 
 	#  PROPERTIES COMPILE_FLAGS "/wd4244 /wd4267 /wd4018 /wd4005 /wd4804 /wd4800")
   ENDIF()

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/37d3b39e/src/core/tensor/tensor.cc
----------------------------------------------------------------------
diff --git a/src/core/tensor/tensor.cc b/src/core/tensor/tensor.cc
index 40d85a2..d96b2ec 100644
--- a/src/core/tensor/tensor.cc
+++ b/src/core/tensor/tensor.cc
@@ -36,26 +36,26 @@ Tensor::Tensor(const Shape &shape, DataType dtype)
     : data_type_(dtype), device_(defaultDevice), shape_(shape) {
   size_t size = Product(shape_) * SizeOf(data_type_);
   if (size)
-    block_ = device_->NewBlock(size);
+    block_ = device_->NewBlock((int)size);
 }
 Tensor::Tensor(Shape &&shape, DataType dtype)
     : data_type_(dtype), device_(defaultDevice), shape_(shape) {
   size_t size = Product(shape_) * SizeOf(data_type_);
   if (size)
-    block_ = device_->NewBlock(size);
+    block_ = device_->NewBlock((int)size);
 }
 Tensor::Tensor(const Shape &shape, std::shared_ptr<Device> device,
                DataType dtype)
     : data_type_(dtype), device_(device), shape_(shape) {
   size_t size = Product(shape_) * SizeOf(data_type_);
   if (size)
-    block_ = device_->NewBlock(size);
+    block_ = device_->NewBlock((int)size);
 }
 Tensor::Tensor(Shape &&shape, std::shared_ptr<Device> device, DataType dtype)
     : data_type_(dtype), device_(device), shape_(shape) {
   size_t size = Product(shape_) * SizeOf(data_type_);
   if (size)
-    block_ = device_->NewBlock(size);
+    block_ = device_->NewBlock((int)size);
 }
 Tensor::Tensor(const Tensor &in)
     : transpose_(in.transpose_),
@@ -89,7 +89,7 @@ void Tensor::ResetLike(const Tensor &in) {
       device_->FreeBlock(block_);
     device_ = in.device_;
     data_type_ = in.data_type_;
-    block_ = device_->NewBlock(in.MemSize());
+    block_ = device_->NewBlock((int)in.MemSize());
   }
   shape_ = in.shape_;
 }
@@ -98,7 +98,7 @@ void Tensor::Reshape(const Shape &shape) {
   if (Product(shape_) != Product(shape)) {
     if (block_ != nullptr && block_->DecRefCount() == 0)
       device_->FreeBlock(block_);
-    block_ = device_->NewBlock(Product(shape) * SizeOf(data_type_));
+    block_ = device_->NewBlock((int)(Product(shape) * SizeOf(data_type_)));
   }
   shape_ = shape;
 }
@@ -107,7 +107,7 @@ void Tensor::Reshape(Shape &&shape) {
   if (Product(shape_) != Product(shape)) {
     if (block_ != nullptr && block_->DecRefCount() == 0)
       device_->FreeBlock(block_);
-    block_ = device_->NewBlock(Product(shape) * SizeOf(data_type_));
+    block_ = device_->NewBlock((int)(Product(shape) * SizeOf(data_type_)));
   }
   shape_ = std::move(shape);
 }
@@ -116,7 +116,7 @@ void Tensor::AsType(const DataType type) {
   if (data_type_ != type) {
     if (block_ != nullptr && block_->DecRefCount() == 0)
       device_->FreeBlock(block_);
-    block_ = device_->NewBlock(Product(shape_) * SizeOf(type));
+    block_ = device_->NewBlock((int)(Product(shape_) * SizeOf(type)));
     data_type_ = type;
   }
 }
@@ -182,20 +182,20 @@ void Tensor::FromProto(const singa::TensorProto &proto) {
     case kFloat32: {
       std::unique_ptr<float[]> data_ptr(new float[Product(shape_)]);
       for (size_t i = 0; i < Product(shape_); ++i)
-        data_ptr[i] = static_cast<float>(proto.float_data(i));
+        data_ptr[i] = static_cast<float>(proto.float_data((int)i));
       CopyDataFromHostPtr<float>(data_ptr.get(), Product(shape_));
       break;
     }
     case kDouble: {
       std::unique_ptr<double[]> data(new double[Product(shape_)]);
       for (size_t i = 0; i < Product(shape_); ++i)
-        data[i] = proto.double_data(i);
+        data[i] = proto.double_data((int)i);
       CopyDataFromHostPtr<double>(data.get(), Product(shape_));
       break;
     }
     case kInt: {
       std::unique_ptr<int[]> data(new int[Product(shape_)]);
-      for (size_t i = 0; i < Product(shape_); ++i) data[i] = proto.int_data(i);
+      for (size_t i = 0; i < Product(shape_); ++i) data[i] = proto.int_data((int)i);
       CopyDataFromHostPtr<int>(data.get(), Product(shape_));
       break;
     }
@@ -369,17 +369,17 @@ void CopyDataToFrom(Tensor *dst, const Tensor &src, const size_t
num,
   if (dst_dev->lang() != src_dev->lang()) {
     // let the none cpp device conduct copy op
     if (dst_dev->lang() == kCpp) {
-      src_dev->CopyDataToFrom(to, from, nBytes, kDeviceToHost, d_offset,
-                              s_offset);
+      src_dev->CopyDataToFrom(to, from, nBytes, kDeviceToHost, (int)d_offset,
+                              (int)s_offset);
     } else if (src_dev->lang() == kCpp) {
-      dst_dev->CopyDataToFrom(to, from, nBytes, kHostToDevice, d_offset,
-                              s_offset);
+      dst_dev->CopyDataToFrom(to, from, nBytes, kHostToDevice, (int)d_offset,
+							  (int)s_offset);
     } else {
       LOG(FATAL) << "Not support mem copy betwee Cuda and OpenCL device";
     }
   } else {
     auto direct = src_dev->lang() == kCpp ? kHostToHost : kDeviceToDevice;
-    src_dev->CopyDataToFrom(to, from, nBytes, direct, d_offset, s_offset);
+    src_dev->CopyDataToFrom(to, from, nBytes, direct, (int)d_offset, (int)s_offset);
   }
 }
 //============================================================================

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/37d3b39e/src/core/tensor/tensor_math_cpp.h
----------------------------------------------------------------------
diff --git a/src/core/tensor/tensor_math_cpp.h b/src/core/tensor/tensor_math_cpp.h
index 8c8a40a..e978e8e 100644
--- a/src/core/tensor/tensor_math_cpp.h
+++ b/src/core/tensor/tensor_math_cpp.h
@@ -462,7 +462,7 @@ void GEMM<float, lang::Cpp>(const bool transA, const bool transB,
   const float *BPtr = static_cast<const float *>(B->data());
   float *CPtr = static_cast<float *>(C->mutable_data());
   cblas_sgemm(CblasRowMajor, transa, transb, nrowA, ncolB, ncolA, alpha, APtr,
-              lda, BPtr, ldb, beta, CPtr, ldc);
+	  lda, BPtr, ldb, beta, CPtr, ldc);
 }
 
 #else
@@ -597,7 +597,7 @@ void RowMax<float, lang::Cpp>(const size_t nrow, const size_t ncol,
   const float *inPtr = static_cast<const float *>(in->data());
   float *outPtr = static_cast<float *>(out->mutable_data());
   for (size_t r = 0; r < nrow; r++) {
-    int offset = r * ncol;
+    int offset = (int)(r * ncol);
     float maxval = inPtr[offset];
     for (size_t c = 1; c < ncol; c++)
       maxval = std::max(maxval, inPtr[offset + c]);

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/37d3b39e/src/io/binfile_reader.cc
----------------------------------------------------------------------
diff --git a/src/io/binfile_reader.cc b/src/io/binfile_reader.cc
index 9b52a5d..9167451 100644
--- a/src/io/binfile_reader.cc
+++ b/src/io/binfile_reader.cc
@@ -125,7 +125,7 @@ bool BinFileReader::PrepareNextField(int size) {
       return false;
     } else {
       fdat_.read(buf_ + bufsize_, capacity_ - bufsize_);
-      bufsize_ += fdat_.gcount();
+      bufsize_ += (int) fdat_.gcount();
       CHECK_LE(size, bufsize_) << "Field size is too large: " << size;
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/37d3b39e/src/io/binfile_writer.cc
----------------------------------------------------------------------
diff --git a/src/io/binfile_writer.cc b/src/io/binfile_writer.cc
index adc910e..1231c45 100644
--- a/src/io/binfile_writer.cc
+++ b/src/io/binfile_writer.cc
@@ -54,10 +54,10 @@ bool BinFileWriter::Write(const std::string& key, const std::string&
value) {
   magic[3] = 0;
   if (key.size() == 0) {
     magic[2] = 0;
-    size = sizeof(magic) + sizeof(size_t) + value.size();
+    size = (int) (sizeof(magic) + sizeof(size_t) + value.size());
   } else {
     magic[2] = 1;
-    size = sizeof(magic) + 2 * sizeof(size_t) + key.size() + value.size();
+    size = (int) (sizeof(magic) + 2 * sizeof(size_t) + key.size() + value.size());
   }
 
   if (bufsize_ + size > capacity_) {
@@ -73,12 +73,12 @@ bool BinFileWriter::Write(const std::string& key, const std::string&
value) {
     *reinterpret_cast<size_t*>(buf_ + bufsize_) = key.size();
     bufsize_ += sizeof(size_t);
     std::memcpy(buf_ + bufsize_, key.data(), key.size());
-    bufsize_ += key.size();
+    bufsize_ += (int) key.size();
   }
   *reinterpret_cast<size_t*>(buf_ + bufsize_) = value.size();
   bufsize_ += sizeof(size_t);
   std::memcpy(buf_ + bufsize_, value.data(), value.size());
-  bufsize_ += value.size();
+  bufsize_ += (int) value.size();
   return true;
 }
 

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/37d3b39e/src/model/feed_forward_net.cc
----------------------------------------------------------------------
diff --git a/src/model/feed_forward_net.cc b/src/model/feed_forward_net.cc
index c5c894e..ff781d5 100644
--- a/src/model/feed_forward_net.cc
+++ b/src/model/feed_forward_net.cc
@@ -146,7 +146,7 @@ void FeedForwardNet::Train(size_t batchsize, int nb_epoch, const Tensor&
x,
                            const Tensor& y, const Tensor& val_x,
                            const Tensor& val_y) {
   CHECK_EQ(x.shape(0), y.shape(0)) << "Diff num of sampels in x and y";
-  int num_extra_samples = x.shape(0) % batchsize;
+  int num_extra_samples = (int)x.shape(0) % batchsize;
   if (num_extra_samples != 0)
     LOG(WARNING) << "Pls set batchsize to make num_total_samples "
                  << "% batchsize == 0. Otherwise, the last "
@@ -219,12 +219,12 @@ const vector<Tensor> FeedForwardNet::Backward(int flag, const
Tensor& grad) {
   vector<Tensor> param_grads;
   std::stack<Tensor> buf;
   Tensor tmp = grad;
-  for (int i = layers_.size() - 1; i >= 0; i--) {
+  for (int i = (int)layers_.size() - 1; i >= 0; i--) {
     // LOG(INFO) << layers_.at(i)->name() << " : " << tmp.L1();
     auto ret = layers_.at(i)->Backward(flag, tmp);
     tmp = ret.first;
     if (ret.second.size()) {
-      for (int k = ret.second.size() - 1; k >= 0; k--) {
+      for (int k = (int)ret.second.size() - 1; k >= 0; k--) {
         buf.push(ret.second[k]);
         // LOG(INFO) <<  "      " << buf.top().L1();
       }
@@ -242,10 +242,10 @@ std::pair<Tensor, Tensor> FeedForwardNet::Evaluate(const Tensor&
x,
                                                    size_t batchsize) {
   CHECK_EQ(x.shape(0), y.shape(0)) << "Diff num of sampels in x and y";
   CHECK_GE(x.shape(0), batchsize);
-  int num_extra_samples = x.shape(0) % batchsize;
+  int num_extra_samples = (int)x.shape(0) % batchsize;
   Tensor loss(Shape{x.shape(0)}), metric(Shape{x.shape(0)});
   for (size_t b = 0; b < x.shape(0) / batchsize; b++) {
-    int start = b * batchsize, end = start + batchsize;
+    int start = (int)(b * batchsize), end = (int)(start + batchsize);
     const Tensor bx = CopyRows(x, start, end);
     const Tensor by = CopyRows(y, start, end);
     const auto ret = EvaluateOnBatch(bx, by);
@@ -253,12 +253,12 @@ std::pair<Tensor, Tensor> FeedForwardNet::Evaluate(const Tensor&
x,
     CopyDataToFrom(&metric, ret.second, batchsize, start, 0);
   }
   {
-    int start = x.shape(0) - batchsize, end = x.shape(0);
+    int start = (int)(x.shape(0) - batchsize), end = (int)x.shape(0);
     const Tensor bx = CopyRows(x, start, end);
     const Tensor by = CopyRows(y, start, end);
     const auto ret = EvaluateOnBatch(bx, by);
-    int dst_offset = x.shape(0) - num_extra_samples;
-    int src_offset = batchsize - num_extra_samples;
+    int dst_offset = (int)(x.shape(0) - num_extra_samples);
+    int src_offset = (int)(batchsize - num_extra_samples);
     CopyDataToFrom(&loss, ret.first, num_extra_samples, dst_offset, src_offset);
     CopyDataToFrom(&metric, ret.second, num_extra_samples, dst_offset,
                    src_offset);
@@ -277,17 +277,17 @@ std::pair<Tensor, Tensor> FeedForwardNet::EvaluateOnBatch(const
Tensor& x,
 
 const Tensor FeedForwardNet::Predict(const Tensor& x, size_t batchsize) {
   CHECK_GE(x.shape(0), batchsize);
-  int num_extra_samples = x.shape(0) % batchsize;
+  int num_extra_samples = (int)(x.shape(0) % batchsize);
   const auto outshape = layers_.back()->GetOutputSampleShape();
   Tensor y(Shape{x.shape(0), Product(outshape)}, x.device());
   for (size_t b = 0; b < x.shape(0) / batchsize; b++) {
-    int start = b * batchsize, end = start + batchsize;
+    int start = (int)(b * batchsize), end = (int)(start + batchsize);
     const Tensor bx = CopyRows(x, start, end);
     CopyDataToFrom(&y, PredictOnBatch(bx), batchsize * y.shape(1),
                    start * y.shape(1), 0);
   }
   if (num_extra_samples > 0) {
-    int start = x.shape(0) - batchsize, end = x.shape(0);
+    int start = (int)(x.shape(0) - batchsize), end = (int)(x.shape(0));
     const Tensor bx = CopyRows(x, start, end);
     CopyDataToFrom(&y, PredictOnBatch(bx), num_extra_samples * y.shape(1),
                    (x.shape(0) - num_extra_samples) * y.shape(1),

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/37d3b39e/src/model/layer/convolution.cc
----------------------------------------------------------------------
diff --git a/src/model/layer/convolution.cc b/src/model/layer/convolution.cc
index bd7cc00..327d018 100644
--- a/src/model/layer/convolution.cc
+++ b/src/model/layer/convolution.cc
@@ -112,7 +112,7 @@ const Tensor Convolution::Forward(int flag, const Tensor &input) {
   auto in_data = input.data<float>();
   for (size_t b = 0; b < batchsize; b++) {
     Im2col(in_data + b * imagesize, channels_, height_, width_, kernel_h_,
-           kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, data_col);
+        kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, data_col);
     col_data.CopyDataFromHostPtr(data_col, col_height_ * col_width_);
     Tensor each = Mult(weight_, col_data);
     if (bias_term_) {
@@ -151,7 +151,7 @@ const std::pair<Tensor, vector<Tensor>> Convolution::Backward(
 
     SumRows(tmp3, &db);
   }
-  
+
   auto in_data = src_data.data<float>();
   Tensor col_data(Shape{col_height_, col_width_});
   float *data_col = new float[col_height_ * col_width_];
@@ -159,17 +159,15 @@ const std::pair<Tensor, vector<Tensor>> Convolution::Backward(
   for (size_t b = 0; b < batchsize; b++) {
     Im2col(in_data + b * imagesize, channels_, height_, width_, kernel_h_,
            kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, data_col);
-    
+
     col_data.CopyDataFromHostPtr(data_col, col_height_ * col_width_);
     Tensor grad_b(Shape{num_filters_, conv_height_ * conv_width_});
     CopyDataToFrom(&grad_b, grad, grad_b.Size(), 0, b * grad_b.Size());
     dw += Mult(grad_b, col_data.T());
     Tensor dcol_b = Mult(weight_.T(), grad_b);
     auto dcol_data = dcol_b.data<float>();
-    
     Col2im(dcol_data, channels_, height_, width_, kernel_h_, kernel_w_, pad_h_,
            pad_w_, stride_h_, stride_w_, dx_b);
-    
     dx.CopyDataFromHostPtr(dx_b, imagesize, b * imagesize);
   }
   param_grad.push_back(dw);

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/37d3b39e/src/model/layer/pooling.cc
----------------------------------------------------------------------
diff --git a/src/model/layer/pooling.cc b/src/model/layer/pooling.cc
index cf42891..2ddfc1c 100644
--- a/src/model/layer/pooling.cc
+++ b/src/model/layer/pooling.cc
@@ -108,14 +108,14 @@ const std::pair<Tensor, vector<Tensor>>
 Pooling::Backward(int flag, const Tensor& grad) {
   CHECK_EQ(grad.device()->lang(), kCpp);
   CHECK_EQ(grad.nDim(), 4u);
-  
+
   vector<Tensor> param_grad;
-  
+
   auto batchsize = grad.shape(0);
   auto dtype = grad.data_type();
   auto dev = grad.device();
   Shape shape{batchsize, channels_, height_, width_};
-  
+
   Tensor dx(shape, dev, dtype);
   auto gradptr = grad.data<float>();
   float* dxptr = new float[dx.Size()];
@@ -190,7 +190,7 @@ void Pooling::ForwardMaxPooling(const float* bottom, const int num,
 void Pooling::BackwardMaxPooling(const float* top, const float* mask,
                                  const int num, const int channels,
                                  const int height, const int width,
-                                 const int pooled_h, const int pooled_w, 
+                                 const int pooled_h, const int pooled_w,
                                  const int kernel_h, const int kernel_w,
                                  const int pad_h, const int pad_w,
                                  const int stride_h, const int stride_w,
@@ -215,7 +215,7 @@ void Pooling::BackwardMaxPooling(const float* top, const float* mask,
 }
 
 void Pooling::ForwardAvgPooling(const float* bottom, const int num,
-                                const int channels, 
+                                const int channels,
                                 const int height, const int width,
                                 const int pooled_h, const int pooled_w,
                                 const int kernel_h, const int kernel_w,
@@ -261,7 +261,7 @@ void Pooling::ForwardAvgPooling(const float* bottom, const int num,
 
 void Pooling::BackwardAvgPooling(const float* top, const int num,
                                  const int channels,
-                                 const int height, const int width, 
+                                 const int height, const int width,
                                  const int pooled_h, const int pooled_w,
                                  const int kernel_h, const int kernel_w,
                                  const int pad_h, const int pad_w,

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/37d3b39e/src/utils/logging.cc
----------------------------------------------------------------------
diff --git a/src/utils/logging.cc b/src/utils/logging.cc
index 50fa7dc..304d431 100644
--- a/src/utils/logging.cc
+++ b/src/utils/logging.cc
@@ -23,8 +23,11 @@
 
 #include <stdlib.h>
 #include <sys/types.h>
-//#include <unistd.h>
+#ifdef _MSC_VER
 #include <io.h>
+#else
+#include <unistd.h>
+#endif
 
 namespace singa {
 


Mime
View raw message