singa-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From wang...@apache.org
Subject [04/15] incubator-singa git commit: SINGA-290 Upgrade to Python 3
Date Fri, 04 Aug 2017 08:32:48 GMT
SINGA-290 Upgrade to Python 3

Replace old_div with the corresponding / or //
Remove 'from future.standard_library import install_aliases' when not necessary
TODO test on both py2 and py3 env


Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/01b7a74b
Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/01b7a74b
Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/01b7a74b

Branch: refs/heads/master
Commit: 01b7a74bafb782fb37bd86cafe5ab9e6000f442d
Parents: ff1806b
Author: Wei Wang <wangwei@comp.nus.edu.sg>
Authored: Fri Jul 14 21:58:20 2017 +0800
Committer: Wei Wang <wangwei@comp.nus.edu.sg>
Committed: Thu Aug 3 18:16:00 2017 +0800

----------------------------------------------------------------------
 examples/caffe/predict.py         |  20 +--
 examples/char-rnn/sample.py       |  21 ++-
 examples/char-rnn/train.py        |  43 +++---
 examples/cifar10/predict.py       |   9 +-
 examples/cifar10/resnet.py        |   8 --
 examples/cifar10/train.py         |  25 ++--
 examples/imagenet/resnet/serve.py |  43 +++---
 examples/mnist/train.py           |  41 +++---
 python/rafiki/agent.py            |  48 +++----
 python/setup.py.in                |   7 +-
 python/singa/command.py           | 244 ---------------------------------
 python/singa/image_tool.py        |  11 +-
 python/singa/initializer.py       |   8 +-
 python/singa/layer.py             |   1 +
 python/singa/loss.py              |  11 +-
 python/singa/metric.py            |  28 ++--
 python/singa/net.py               |  11 +-
 python/singa/optimizer.py         |  17 ++-
 python/singa/tensor.py            |   5 +-
 test/python/test_loss.py          |   6 +-
 test/python/test_metric.py        |  32 ++---
 test/python/test_net.py           |   7 +-
 test/python/test_optimizer.py     |  14 +-
 test/python/test_tensor.py        |   5 +-
 tool/opencl/clsrc_to_str.py       |  10 +-
 25 files changed, 203 insertions(+), 472 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/01b7a74b/examples/caffe/predict.py
----------------------------------------------------------------------
diff --git a/examples/caffe/predict.py b/examples/caffe/predict.py
index 62e6a86..0a93b1c 100644
--- a/examples/caffe/predict.py
+++ b/examples/caffe/predict.py
@@ -1,6 +1,3 @@
-from __future__ import print_function
-from builtins import input
-from builtins import range
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -17,6 +14,10 @@ from builtins import range
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # =============================================================================
+from __future__ import print_function
+from builtins import input
+from builtins import range
+
 import numpy as np
 import os
 import argparse
@@ -25,11 +26,10 @@ from PIL import Image
 from singa import device
 from singa import tensor
 from singa import converter
-from singa import layer
-from singa import net
 
-#for debug: print norm of each layer
-#net.verbose = True
+
+# for debug: print norm of each layer
+# net.verbose = True
 
 
 def convert_model(prototxt, caffemodel):
@@ -47,7 +47,7 @@ def read_image(img_path):
     # According to the VGG paper(Very Deep Convolutional Networks for
     # Large-Scale Image Recognition), the input images are zero-centered by
     # mean pixel(rather than mean image) substraction.
-    mean_RGB =[123.68, 116.779, 103.939]
+    mean_RGB = [123.68, 116.779, 103.939]
 
     img = Image.open(img_path)
     img = img.convert('RGB')
@@ -79,7 +79,7 @@ def predict(net, dev, synset_list, topk=5):
             print('Path is invalid')
             continue
         img = read_image(img_path)
-        x = tensor.from_numpy(img.astype(np.float32)[np.newaxis,:])
+        x = tensor.from_numpy(img.astype(np.float32)[np.newaxis, :])
         x.to_device(dev)
         y = net.predict(x)
         y.to_host()
@@ -91,7 +91,7 @@ def predict(net, dev, synset_list, topk=5):
 if __name__ == '__main__':
     parser = argparse.ArgumentParser(
         description='Convert caffe vgg into singa. \
-            This tool only supports caffe model in current version(29-Nov-2016). \
+            This tool only supports caffe model from version as 29-Nov-2016. \
             You can use caffe tool to update previous model')
     parser.add_argument('model_txt', default='./vgg16.prototxt')
     parser.add_argument('model_bin', default='./vgg16.caffemodel')

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/01b7a74b/examples/char-rnn/sample.py
----------------------------------------------------------------------
diff --git a/examples/char-rnn/sample.py b/examples/char-rnn/sample.py
index 5b0b66a..eb745cf 100644
--- a/examples/char-rnn/sample.py
+++ b/examples/char-rnn/sample.py
@@ -15,22 +15,21 @@
 # limitations under the License.
 # =============================================================================
 '''Sample characters from the pre-trained model'''
+
 from __future__ import division
 from __future__ import print_function
-from future import standard_library
-standard_library.install_aliases()
 from builtins import range
-from past.utils import old_div
 import sys
-import pickle as pickle
 import numpy as np
 import argparse
+try:
+    import pickle
+except ImportError:
+    import cPickle as pickle
 
-# sys.path.append(os.path.join(os.path.dirname(__file__), '../../build/python'))
 from singa import layer
 from singa import tensor
 from singa import device
-from singa.proto import model_pb2
 
 
 def sample(model_path, nsamples=100, seed_text='', do_sample=True):
@@ -67,15 +66,15 @@ def sample(model_path, nsamples=100, seed_text='', do_sample=True):
             tx = tensor.from_numpy(x)
             tx.to_device(cuda)
             inputs = [tx, hx, cx]
-            outputs = rnn.forward(model_pb2.kEval, inputs)
-            y = dense.forward(model_pb2.kEval, outputs[0])
+            outputs = rnn.forward(False, inputs)
+            y = dense.forward(False, outputs[0])
             y = tensor.softmax(y)
             hx = outputs[1]
             cx = outputs[2]
         sys.stdout.write(seed_text)
     else:
         y = tensor.Tensor((1, vocab_size), cuda)
-        y.set_value(old_div(1.0, vocab_size))
+        y.set_value(1.0 / vocab_size)
 
     for i in range(nsamples):
         y.to_host()
@@ -90,8 +89,8 @@ def sample(model_path, nsamples=100, seed_text='', do_sample=True):
         tx = tensor.from_numpy(x)
         tx.to_device(cuda)
         inputs = [tx, hx, cx]
-        outputs = rnn.forward(model_pb2.kEval, inputs)
-        y = dense.forward(model_pb2.kEval, outputs[0])
+        outputs = rnn.forward(False, inputs)
+        y = dense.forward(False, outputs[0])
         y = tensor.softmax(y)
         hx = outputs[1]
         cx = outputs[2]

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/01b7a74b/examples/char-rnn/train.py
----------------------------------------------------------------------
diff --git a/examples/char-rnn/train.py b/examples/char-rnn/train.py
index 0eeeb35..240e574 100644
--- a/examples/char-rnn/train.py
+++ b/examples/char-rnn/train.py
@@ -19,31 +19,27 @@ The model is created following https://github.com/karpathy/char-rnn
 The train file could be any text file,
 e.g., http://cs.stanford.edu/people/karpathy/char-rnn/
 '''
+
+
 from __future__ import division
 from __future__ import print_function
-from future import standard_library
-standard_library.install_aliases()
 from builtins import zip
 from builtins import range
 from builtins import object
-from past.utils import old_div
 import pickle as pickle
 import numpy as np
 import argparse
 
-# sys.path.append(os.path.join(os.path.dirname(__file__), '../../build/python'))
 from singa import layer
 from singa import loss
 from singa import device
 from singa import tensor
 from singa import optimizer
 from singa import initializer
-from singa.proto import model_pb2
 from singa import utils
 
 
 class Data(object):
-
     def __init__(self, fpath, batch_size=32, seq_length=100, train_ratio=0.8):
         '''Data object for loading a plain text file.
 
@@ -59,16 +55,16 @@ class Data(object):
         self.idx_to_char = {i: ch for i, ch in enumerate(chars)}
         data = [self.char_to_idx[c] for c in self.raw_data]
         # seq_length + 1 for the data + label
-        nsamples = old_div(len(data), (1 + seq_length))
+        nsamples = len(data) // (1 + seq_length)
         data = data[0:nsamples * (1 + seq_length)]
         data = np.asarray(data, dtype=np.int32)
         data = np.reshape(data, (-1, seq_length + 1))
         # shuffle all sequences
         np.random.shuffle(data)
         self.train_dat = data[0:int(data.shape[0]*train_ratio)]
-        self.num_train_batch = old_div(self.train_dat.shape[0], batch_size)
+        self.num_train_batch = self.train_dat.shape[0] // batch_size
         self.val_dat = data[self.train_dat.shape[0]:]
-        self.num_test_batch = old_div(self.val_dat.shape[0], batch_size)
+        self.num_test_batch = self.val_dat.shape[0] // batch_size
         print('train dat', self.train_dat.shape)
         print('val dat', self.val_dat.shape)
 
@@ -102,7 +98,7 @@ def convert(batch, batch_size, seq_length, vocab_size, dev):
 
 
 def get_lr(epoch):
-    return old_div(0.001, float(1 << (old_div(epoch, 50))))
+    return 0.001 / float(1 << (epoch // 50))
 
 
 def train(data, max_epoch, hidden_size=100, seq_length=100, batch_size=16,
@@ -152,30 +148,30 @@ def train(data, max_epoch, hidden_size=100, seq_length=100, batch_size=16,
             inputs.append(tensor.Tensor())
             inputs.append(tensor.Tensor())
 
-            outputs = rnn.forward(model_pb2.kTrain, inputs)[0:-2]
+            outputs = rnn.forward(True, inputs)[0:-2]
             grads = []
             batch_loss = 0
             g_dense_w.set_value(0.0)
             g_dense_b.set_value(0.0)
             for output, label in zip(outputs, labels):
-                act = dense.forward(model_pb2.kTrain, output)
-                lvalue = lossfun.forward(model_pb2.kTrain, act, label)
+                act = dense.forward(True, output)
+                lvalue = lossfun.forward(True, act, label)
                 batch_loss += lvalue.l1()
                 grad = lossfun.backward()
                 grad /= batch_size
-                grad, gwb = dense.backward(model_pb2.kTrain, grad)
+                grad, gwb = dense.backward(True, grad)
                 grads.append(grad)
                 g_dense_w += gwb[0]
                 g_dense_b += gwb[1]
                 # print output.l1(), act.l1()
             utils.update_progress(
                 b * 1.0 / data.num_train_batch, 'training loss = %f' %
-                (old_div(batch_loss, seq_length)))
+                (batch_loss / seq_length))
             train_loss += batch_loss
 
             grads.append(tensor.Tensor())
             grads.append(tensor.Tensor())
-            g_rnn_w = rnn.backward(model_pb2.kTrain, grads)[1][0]
+            g_rnn_w = rnn.backward(True, grads)[1][0]
             dense_w, dense_b = dense.param_values()
             opt.apply_with_lr(epoch, get_lr(epoch), g_rnn_w, rnn_w, 'rnnw')
             opt.apply_with_lr(
@@ -184,8 +180,8 @@ def train(data, max_epoch, hidden_size=100, seq_length=100, batch_size=16,
             opt.apply_with_lr(
                 epoch, get_lr(epoch),
                 g_dense_b, dense_b, 'dense_b')
-        print('\nEpoch %d, train loss is %f' % \
-            (epoch, train_loss / data.num_train_batch / seq_length))
+        print('\nEpoch %d, train loss is %f' %
+              (epoch, train_loss / data.num_train_batch / seq_length))
 
         eval_loss = 0
         for b in range(data.num_test_batch):
@@ -194,13 +190,12 @@ def train(data, max_epoch, hidden_size=100, seq_length=100, batch_size=16,
                                      data.vocab_size, cuda)
             inputs.append(tensor.Tensor())
             inputs.append(tensor.Tensor())
-            outputs = rnn.forward(model_pb2.kEval, inputs)[0:-2]
+            outputs = rnn.forward(False, inputs)[0:-2]
             for output, label in zip(outputs, labels):
-                output = dense.forward(model_pb2.kEval, output)
-                eval_loss += lossfun.forward(model_pb2.kEval,
-                                             output, label).l1()
-        print('Epoch %d, evaluation loss is %f' % \
-            (epoch, eval_loss / data.num_test_batch / seq_length))
+                output = dense.forward(True, output)
+                eval_loss += lossfun.forward(True, output, label).l1()
+        print('Epoch %d, evaluation loss is %f' %
+              (epoch, eval_loss / data.num_test_batch / seq_length))
 
         if (epoch + 1) % 30 == 0:
             # checkpoint the file model

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/01b7a74b/examples/cifar10/predict.py
----------------------------------------------------------------------
diff --git a/examples/cifar10/predict.py b/examples/cifar10/predict.py
index 7cab4b9..1432e12 100644
--- a/examples/cifar10/predict.py
+++ b/examples/cifar10/predict.py
@@ -16,14 +16,13 @@
 # =============================================================================
 '''Predicting the labels for new images using the pre-trained alexnet model'''
 from __future__ import print_function
-from future import standard_library
-standard_library.install_aliases()
 from builtins import range
-import pickle as pickle
+try:
+    import pickle
+except ImportError:
+    import cPickle as pickle
 import numpy as np
 
-# sys.path.append(os.path.join(os.path.dirname(__file__), '../../build/python'))
-
 from singa import device
 from singa import tensor
 import alexnet

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/01b7a74b/examples/cifar10/resnet.py
----------------------------------------------------------------------
diff --git a/examples/cifar10/resnet.py b/examples/cifar10/resnet.py
index 4b9bad0..fa4d1aa 100644
--- a/examples/cifar10/resnet.py
+++ b/examples/cifar10/resnet.py
@@ -20,16 +20,8 @@ The performance could be improved by tuning some hyper-parameters, including
 learning rate, weight decay, max_epoch, parameter initialization, etc.
 """
 from __future__ import print_function
-from future import standard_library
-standard_library.install_aliases()
 from builtins import zip
 
-import pickle as pickle
-
-# sys.path.append(os.path.join(os.path.dirname(__file__), '../../build/python'))
-# use the python modules by installing py singa in build/python
-# pip install -e .
-
 from singa import layer
 from singa import initializer
 from singa import metric

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/01b7a74b/examples/cifar10/train.py
----------------------------------------------------------------------
diff --git a/examples/cifar10/train.py b/examples/cifar10/train.py
index 9f90e58..9f1ffd2 100644
--- a/examples/cifar10/train.py
+++ b/examples/cifar10/train.py
@@ -20,14 +20,13 @@ includes 1 label & 3072 pixels.  3072 pixels are 3 channels of a 32x32 image
 """
 from __future__ import division
 from __future__ import print_function
-from future import standard_library
-standard_library.install_aliases()
 from builtins import zip
 from builtins import str
 from builtins import range
-from past.utils import old_div
-
-import pickle
+try:
+    import pickle
+except ImportError:
+    import cPickle as pickle
 import numpy as np
 import os
 import argparse
@@ -93,7 +92,7 @@ def normalize_for_alexnet(train_x, test_x):
 
 
 def vgg_lr(epoch):
-    return old_div(0.1, float(1 << ((old_div(epoch, 25)))))
+    return 0.1 / float(1 << (epoch // 25))
 
 
 def alexnet_lr(epoch):
@@ -139,8 +138,8 @@ def train(data, net, max_epoch, get_lr, weight_decay, batch_size=100,
     tx = tensor.Tensor((batch_size, 3, 32, 32), dev)
     ty = tensor.Tensor((batch_size,), dev, core_pb2.kInt)
     train_x, train_y, test_x, test_y = data
-    num_train_batch = old_div(train_x.shape[0], batch_size)
-    num_test_batch = old_div(test_x.shape[0], batch_size)
+    num_train_batch = train_x.shape[0] // batch_size
+    num_test_batch = test_x.shape[0] // batch_size
     idx = np.arange(train_x.shape[0], dtype=np.int32)
     for epoch in range(max_epoch):
         np.random.shuffle(idx)
@@ -160,7 +159,7 @@ def train(data, net, max_epoch, get_lr, weight_decay, batch_size=100,
             utils.update_progress(b * 1.0 / num_train_batch,
                                   'training loss = %f, accuracy = %f' % (l, a))
         info = '\ntraining loss = %f, training accuracy = %f, lr = %f' \
-            % (old_div(loss, num_train_batch), old_div(acc, num_train_batch), get_lr(epoch))
+            % ((loss / num_train_batch), (acc / num_train_batch), get_lr(epoch))
         print(info)
 
         loss, acc = 0.0, 0.0
@@ -173,14 +172,14 @@ def train(data, net, max_epoch, get_lr, weight_decay, batch_size=100,
             loss += l
             acc += a
 
-        print('test loss = %f, test accuracy = %f' \
-            % (old_div(loss, num_test_batch), old_div(acc, num_test_batch)))
+        print('test loss = %f, test accuracy = %f' %
+              ((loss / num_test_batch), (acc / num_test_batch)))
     net.save('model', 20)  # save model params into checkpoint file
 
 if __name__ == '__main__':
     parser = argparse.ArgumentParser(description='Train dcnn for cifar10')
     parser.add_argument('model', choices=['vgg', 'alexnet', 'resnet', 'caffe'],
-            default='alexnet')
+                        default='alexnet')
     parser.add_argument('data', default='cifar-10-batches-py')
     parser.add_argument('--use_cpu', action='store_true')
     args = parser.parse_args()
@@ -196,7 +195,7 @@ if __name__ == '__main__':
         train((train_x, train_y, test_x, test_y), net, 160, alexnet_lr, 0.004,
               use_cpu=args.use_cpu)
         # for cifar10_quick_train_test.prototxt
-        #train((train_x, train_y, test_x, test_y), net, 18, caffe_lr, 0.004,
+        # train((train_x, train_y, test_x, test_y), net, 18, caffe_lr, 0.004,
         #      use_cpu=args.use_cpu)
     elif args.model == 'alexnet':
         train_x, test_x = normalize_for_alexnet(train_x, test_x)

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/01b7a74b/examples/imagenet/resnet/serve.py
----------------------------------------------------------------------
diff --git a/examples/imagenet/resnet/serve.py b/examples/imagenet/resnet/serve.py
index fde06fe..4c7d897 100644
--- a/examples/imagenet/resnet/serve.py
+++ b/examples/imagenet/resnet/serve.py
@@ -1,8 +1,3 @@
-from __future__ import division
-from __future__ import print_function
-from builtins import str
-from builtins import range
-from past.utils import old_div
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -19,20 +14,20 @@ from past.utils import old_div
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import os
+from __future__ import division
+from __future__ import print_function
+from builtins import str
+from builtins import range
+
 import sys
 import time
 import numpy as np
-import threading
 import traceback
-from scipy.misc import imread, imresize
 from argparse import ArgumentParser
 
 from singa import device
 from singa import tensor
-from singa import data
 from singa import image_tool
-from singa import metric
 from rafiki.agent import Agent, MsgType
 import model
 
@@ -40,11 +35,14 @@ tool = image_tool.ImageTool()
 num_augmentation = 10
 crop_size = 224
 mean = np.array([0.485, 0.456, 0.406])
-std = np.array([ 0.229, 0.224, 0.225])
+std = np.array([0.229, 0.224, 0.225])
+
+
 def image_transform(img):
     '''Input an image path and return a set of augmented images (type Image)'''
     global tool
-    return tool.load(img).resize_by_list([256]).crop5((crop_size, crop_size), 5).flip(2).get()
+    return tool.load(img).resize_by_list([256]).crop5((crop_size, crop_size),
+                                                      5).flip(2).get()
 
 
 def predict(net, images, num=10):
@@ -57,7 +55,7 @@ def predict(net, images, num=10):
     '''
     prob = net.predict(images)
     prob = tensor.to_numpy(prob)
-    prob = prob.reshape((old_div(images.shape[0], num), num, -1))
+    prob = prob.reshape(((images.shape[0] // num), num, -1))
     prob = np.average(prob, 1)
     return prob
 
@@ -76,7 +74,7 @@ def serve(net, label_map, dev, agent, topk=5):
         label_map: a list of food names, corresponding to the index in meta_file
     '''
 
-    images =tensor.Tensor((num_augmentation, 3, crop_size, crop_size), dev)
+    images = tensor.Tensor((num_augmentation, 3, crop_size, crop_size), dev)
     while True:
         msg, val = agent.pull()
         if msg is None:
@@ -86,8 +84,10 @@ def serve(net, label_map, dev, agent, topk=5):
         if msg.is_request():
             try:
                 # process images
-                im = [np.array(x.convert('RGB'), dtype=np.float32).transpose(2, 0, 1) for x in image_transform(val['image'])]
-                im = old_div(np.array(im), 256)
+                im = [np.array(x.convert('RGB'),
+                               dtype=np.float32).transpose(2, 0, 1)
+                      for x in image_transform(val['image'])]
+                im = np.array(im) / 255
                 im -= mean[np.newaxis, :, np.newaxis, np.newaxis]
                 im /= std[np.newaxis, :, np.newaxis, np.newaxis]
                 images.copy_from_numpy(im)
@@ -98,7 +98,8 @@ def serve(net, label_map, dev, agent, topk=5):
                 # prepare results
                 response = ""
                 for i in range(topk):
-                    response += "%s:%f <br/>" % (label_map[idx[i]], prob[idx[i]])
+                    response += "%s:%f <br/>" % (label_map[idx[i]],
+                                                 prob[idx[i]])
             except:
                 traceback.print_exc()
                 response = "sorry, system error during prediction."
@@ -117,6 +118,7 @@ def serve(net, label_map, dev, agent, topk=5):
             break
     print("server stop")
 
+
 def main():
     try:
         # Setup argument parser
@@ -126,8 +128,11 @@ def main():
         parser.add_argument("--use_cpu", action="store_true",
                             help="If set, load models onto CPU devices")
         parser.add_argument("--parameter_file", default="wrn-50-2.pickle")
-        parser.add_argument("--model", choices = ['resnet', 'wrn', 'preact', 'addbn'], default='wrn')
-        parser.add_argument("--depth", type=int, choices = [18, 34, 50, 101, 152, 200], default='50')
+        parser.add_argument("--model", choices=['resnet', 'wrn', 'preact',
+                                                'addbn'], default='wrn')
+        parser.add_argument("--depth", type=int, choices=[18, 34, 50, 101,
+                                                          152, 200],
+                            default='50')
 
         # Process arguments
         args = parser.parse_args()

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/01b7a74b/examples/mnist/train.py
----------------------------------------------------------------------
diff --git a/examples/mnist/train.py b/examples/mnist/train.py
index 82d9a5a..88c9a44 100644
--- a/examples/mnist/train.py
+++ b/examples/mnist/train.py
@@ -1,9 +1,3 @@
-from __future__ import division
-from __future__ import print_function
-from future import standard_library
-standard_library.install_aliases()
-from builtins import range
-from past.utils import old_div
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -20,23 +14,25 @@ from past.utils import old_div
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # =============================================================================
+from __future__ import division
+from __future__ import print_function
+from builtins import range
 
 import numpy as np
 import os
 import gzip
 import argparse
-import pickle
+try:
+    import pickle
+except ImportError:
+    import cPickle as pickle
+
 from singa import initializer
-from singa import utils
 from singa import optimizer
 from singa import device
 from singa import tensor
 
 
-from singa.proto import core_pb2
-
-
-
 def load_train_data(file_path):
     f = gzip.open(file_path, 'rb')
     train_set, valid_set, test_set = pickle.load(f)
@@ -46,19 +42,18 @@ def load_train_data(file_path):
     return traindata, validdata
 
 
-
 def train(data_file, use_gpu, num_epoch=10, batch_size=100):
     print('Start intialization............')
     lr = 0.1   # Learning rate
-    weight_decay  = 0.0002
+    weight_decay = 0.0002
     hdim = 1000
     vdim = 784
     opt = optimizer.SGD(momentum=0.8, weight_decay=weight_decay)
 
     tweight = tensor.Tensor((vdim, hdim))
     tweight.gaussian(0.0, 0.1)
-    tvbias = tensor.from_numpy(np.zeros(vdim, dtype = np.float32))
-    thbias = tensor.from_numpy(np.zeros(hdim, dtype = np.float32))
+    tvbias = tensor.from_numpy(np.zeros(vdim, dtype=np.float32))
+    thbias = tensor.from_numpy(np.zeros(hdim, dtype=np.float32))
     opt = optimizer.SGD(momentum=0.5, weight_decay=weight_decay)
 
     print('Loading data ..................')
@@ -72,7 +67,7 @@ def train(data_file, use_gpu, num_epoch=10, batch_size=100):
     for t in [tweight, tvbias, thbias]:
         t.to_device(dev)
 
-    num_train_batch = old_div(train_x.shape[0], batch_size)
+    num_train_batch = train_x.shape[0] // batch_size
     print("num_train_batch = %d " % (num_train_batch))
     for epoch in range(num_epoch):
         trainerrorsum = 0.0
@@ -80,7 +75,7 @@ def train(data_file, use_gpu, num_epoch=10, batch_size=100):
         for b in range(num_train_batch):
             # positive phase
             tdata = tensor.from_numpy(
-                    train_x[(b * batch_size):((b + 1) * batch_size), : ])
+                    train_x[(b * batch_size):((b + 1) * batch_size), :])
             tdata.to_device(dev)
             tposhidprob = tensor.mult(tdata, tweight)
             tposhidprob.add_row(thbias)
@@ -100,14 +95,14 @@ def train(data_file, use_gpu, num_epoch=10, batch_size=100):
             error = tensor.sum(tensor.square((tdata - tnegdata)))
             trainerrorsum = error + trainerrorsum
 
-            tgweight = tensor.mult(tnegdata.T(), tneghidprob) -\
-                    tensor.mult(tdata.T(), tposhidprob)
+            tgweight = tensor.mult(tnegdata.T(), tneghidprob) \
+                - tensor.mult(tdata.T(), tposhidprob)
             tgvbias = tensor.sum(tnegdata, 0) - tensor.sum(tdata, 0)
             tghbias = tensor.sum(tneghidprob, 0) - tensor.sum(tposhidprob, 0)
 
-            opt.apply_with_lr(epoch, old_div(lr, batch_size), tgweight, tweight, 'w')
-            opt.apply_with_lr(epoch, old_div(lr, batch_size), tgvbias, tvbias, 'vb')
-            opt.apply_with_lr(epoch, old_div(lr, batch_size), tghbias, thbias, 'hb')
+            opt.apply_with_lr(epoch, lr / batch_size, tgweight, tweight, 'w')
+            opt.apply_with_lr(epoch, lr / batch_size, tgvbias, tvbias, 'vb')
+            opt.apply_with_lr(epoch, lr / batch_size, tghbias, thbias, 'hb')
 
         print('training errorsum = %f' % (trainerrorsum))
 

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/01b7a74b/python/rafiki/agent.py
----------------------------------------------------------------------
diff --git a/python/rafiki/agent.py b/python/rafiki/agent.py
index 98d9b01..5dc820d 100644
--- a/python/rafiki/agent.py
+++ b/python/rafiki/agent.py
@@ -1,5 +1,3 @@
-from builtins import str
-from builtins import object
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -17,6 +15,9 @@ from builtins import object
 # specific language governing permissions and limitations
 # under the License.
 # =============================================================================
+from builtins import str
+from builtins import object
+
 from multiprocessing import Process, Queue
 from flask import Flask,request, send_from_directory, jsonify
 from flask_cors import CORS, cross_origin
@@ -38,15 +39,15 @@ class MsgType(object):
        return str(self) == str(target)
 
    def is_info(self):
-       return self.name.startswith('kInfo') 
+       return self.name.startswith('kInfo')
    def is_command(self):
-       return self.name.startswith('kCommand') 
+       return self.name.startswith('kCommand')
    def is_status(self):
-       return self.name.startswith('kStatus') 
+       return self.name.startswith('kStatus')
    def is_request(self):
-       return self.name.startswith('kRequest') 
+       return self.name.startswith('kRequest')
    def is_response(self):
-       return self.name.startswith('kResponse') 
+       return self.name.startswith('kResponse')
 
    @staticmethod
    def parse(name):
@@ -59,7 +60,7 @@ class MsgType(object):
            return MsgType.kCommandPause
        if name=='resume':
            return MsgType.kCommandResume
-       return MsgType.kCommand 
+       return MsgType.kCommand
 
 types =  ['kInfo','kInfoMetric',
            'kCommand','kCommandStop','kCommandPause','kCommandResume',
@@ -91,7 +92,7 @@ class Agent(object):
             if msg.is_request():
                 data = pickle.loads(data)
             return msg,data
-        return None,None 
+        return None,None
 
     def push(self,msg,value):
         self.info_queue.put((msg,value))
@@ -165,10 +166,10 @@ def getTopKData():
         return failure("Internal Error")
     return success(data_[-k:])
 
-@app.route("/api", methods=['POST'])                                                                  
-@cross_origin()                                                                                           
+@app.route("/api", methods=['POST'])
+@cross_origin()
 def api():
-    global info_queue_,command_queue_ 
+    global info_queue_,command_queue_
     try:
         files=transformFile(request.files)
         values = CombinedMultiDict([request.args,request.form,files])
@@ -177,20 +178,20 @@ def api():
         msg,response=getDataFromInfoQueue(True)
         deleteFiles(files)
         return response
-    except:                                                                            
+    except:
         traceback.print_exc()
         return failure("Internal Error")
 
-@app.route("/command/<name>", methods=['GET','POST'])                                                                  
-@cross_origin()                                                                                           
+@app.route("/command/<name>", methods=['GET','POST'])
+@cross_origin()
 def command(name):
-    global info_queue_,command_queue_ 
+    global info_queue_,command_queue_
     try:
         command=MsgType.get_command(name)
         command_queue_.put((command,""))
         msg,response=getDataFromInfoQueue(True)
         return response
-    except:                                                                            
+    except:
         traceback.print_exc()
         return failure("Internal Error")
 
@@ -203,19 +204,20 @@ def failure(message):
     res = dict(result="message", message=message)
     return jsonify(res)
 
+
 def transformFile(files):
     result= MultiDict([])
-    for f in list(files.keys()):
+    for f in files:
         file = files[f]
         unique_filename = str(uuid.uuid4())+secure_filename(file.filename)
-        filepath=os.path.join(os.getcwd(),unique_filename)
+        filepath=os.path.join(os.getcwd(), unique_filename)
         file.save(filepath)
-        result.add(f,filepath)
+        result.add(f, filepath)
     return result
 
+
 def deleteFiles(files):
-    for f in list(files.keys()):
-        filepath = files[f]    
+    for f in files:
+        filepath = files[f]
         os.remove(filepath)
-        #print "remove",filepath
     return

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/01b7a74b/python/setup.py.in
----------------------------------------------------------------------
diff --git a/python/setup.py.in b/python/setup.py.in
index f667ae5..7a9f2c6 100644
--- a/python/setup.py.in
+++ b/python/setup.py.in
@@ -67,10 +67,9 @@ setup(
         'flask_cors>=3.0.2',
         'pillow>=2.3.0',
         'future',
-	'xmlrunner',
-	'tqdm',
-	'ipywidgets',
-	'matplotlib',
+        'xmlrunner',
+        'tqdm',
+        'ipywidgets',
         ],
 
     #List additional groups of dependencies here (e.g. development

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/01b7a74b/python/singa/command.py
----------------------------------------------------------------------
diff --git a/python/singa/command.py b/python/singa/command.py
deleted file mode 100644
index 90c51cc..0000000
--- a/python/singa/command.py
+++ /dev/null
@@ -1,244 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-# =============================================================================
-
-'''
-This script is the main entrance for user to run singa inside a model workspace
-
-To use this script, user sudo install these dependencies: flask pillow and protobuf
-'''
-from __future__ import print_function
-
-from future import standard_library
-standard_library.install_aliases()
-from builtins import str
-import sys, glob, os, random, shutil, time
-from flask import Flask, request, redirect, url_for
-import numpy as np
-import configparser
-import urllib.request, urllib.parse, urllib.error, traceback
-
-
-from argparse import ArgumentParser
-from argparse import RawDescriptionHelpFormatter
-sys.path.append(os.getcwd())
-
-__all__ = []
-__version__ = 0.1
-__date__ = '2016-07-20'
-__updated__ = '2016-07-20'
-__shortdesc__ = '''
-welcome to singa
-'''
-
-app = Flask(__name__)
-config = configparser.RawConfigParser()
-service = {}
-data_path = "data_"
-parameter_path = "parameter_"
-
-debug = False
-
-class CLIError(Exception):
-    '''Generic exception to raise and log different fatal errors.'''
-    def __init__(self, msg):
-        super(CLIError).__init__(type(self))
-        self.msg = "E: %s" % msg
-    def __str__(self):
-        return self.msg
-    def __unicode__(self):
-        return self.msg
-
-def main(argv=None): # IGNORE:C0111
-    '''Command line options.'''
-
-    from . import device
-
-    if argv is None:
-        argv = sys.argv
-    else:
-        sys.argv.extend(argv)
-
-    program_name = os.path.basename(sys.argv[0])
-    program_version = "v%s" % __version__
-    program_build_date = str(__updated__)
-    program_version_message = '%%(prog)s %s (%s)' % (program_version, program_build_date)
-    program_shortdesc = __shortdesc__
-    program_license = '''%s
-
-  Created by dbsystem group on %s.
-  Copyright 2016 NUS School of Computing. All rights reserved.
-
-  Licensed under the Apache License 2.0
-  http://www.apache.org/licenses/LICENSE-2.0
-
-  Distributed on an "AS IS" basis without warranties
-  or conditions of any kind, either express or implied.
-
-USAGE
-''' % (program_shortdesc, str(__date__))
-
-    global debug
-
-    try:
-        # Setup argument parser
-        parser = ArgumentParser(description=program_license, formatter_class=RawDescriptionHelpFormatter)
-        parser.add_argument("-p", "--port", dest="port", default=5000, help="the port to listen to, default is 5000")
-        parser.add_argument("-param", "--parameter", dest="parameter",  help="the parameter file path to be loaded")
-        parser.add_argument("-D", "--debug", dest="debug", action="store_true", help="whether need to debug")
-        parser.add_argument("-R", "--reload", dest="reload_data", action="store_true", help="whether need to reload data")
-        parser.add_argument("-C", "--cpu", dest="use_cpu", action="store_true", help="Using cpu or not, default is using gpu")
-        parser.add_argument("-m", "--mode", dest="mode", choices=['train','test','serve'], default='serve', help="On Which mode (train,test,serve) to run singa")
-        parser.add_argument('-V', '--version', action='version', version=program_version_message)
-
-        # Process arguments
-        args = parser.parse_args()
-
-        port = args.port
-        parameter_file = args.parameter
-        mode = args.mode
-        need_reload = args.reload_data
-        use_cpu = args.use_cpu
-        debug = args.debug
-
-        #prepare data files
-        config.read('file.cfg')
-        file_prepare(need_reload)
-
-
-        import network as net
-        model = net.create()
-
-        #load parameter
-        parameter_file=get_parameter(parameter_file)
-
-        if parameter_file:
-            print("load parameter file: %s" % parameter_file)
-            model.load(parameter_file)
-
-        if use_cpu:
-            raise CLIError("Currently cpu is not support!")
-        else:
-            print("runing with gpu")
-            d = device.create_cuda_gpu()
-
-        model.to_device(d)
-
-        if mode == "serve":
-            print("runing singa in serve mode, listen to  port: %s " % port)
-            global service
-            from serve import Service
-            service =Service(model,d)
-
-            app.debug = debug
-            app.run(host='0.0.0.0', port= port)
-        elif mode == "train":
-            print("runing singa in train mode")
-            global trainer
-            from train import Trainer
-            trainer= Trainer(model,d)
-            if not parameter_file:
-                trainer.initialize()
-            trainer.train()
-        else:
-            raise CLIError("Currently only serve mode is surpported!")
-        return 0
-    except KeyboardInterrupt:
-        ### handle keyboard interrupt ###
-        return 0
-    except Exception as e:
-        if debug:
-            traceback.print_exc()
-            raise(e)
-        indent = len(program_name) * " "
-        sys.stderr.write(program_name + ": " + str(e) + "\n")
-        sys.stderr.write(indent + "  for help use --help \n\n")
-        return 2
-
-def file_prepare(reload_data=False):
-    '''
-        download all files and generate data.py
-    '''
-    if not reload_data and os.path.exists("data_.py"):
-        return
-
-    print("download file")
-    #clean data
-    shutil.rmtree("data_.py",ignore_errors=True)
-    shutil.rmtree("data_",ignore_errors=True)
-
-    data_py=open("data_.py",'w')
-    data_py.write("#%s" % "This file is Generated by SINGA, please don't edit\n\n")
-    if config.has_section("data"):
-        file_list = config.items("data")
-        #download files
-        for f in file_list:
-            name,path=download_file(f[0],f[1],data_path)
-            data_py.write("%s=\"%s\"\n" % (name,path))
-
-    data_py.flush()
-    data_py.close()
-
-    if config.has_section("parameter"):
-        parameter_list = config.items("parameter")
-        for p in parameter_list:
-            download_file(p[0],p[1],parameter_path)
-
-def download_file(name,path,dest):
-    '''
-    download one file to dest
-    '''
-    if not os.path.exists(dest):
-        os.makedirs(dest)
-    if (path.startswith('http')):
-        file_name = path.split('/')[-1]
-        target = os.path.join(dest,file_name)
-        urllib.request.urlretrieve(path,target)
-    return name,target
-
-
-def get_parameter(file_name=None):
-    '''
-    get the paticular file name or get the last parameter file
-    '''
-    if not os.path.exists(parameter_path):
-        os.makedirs(parameter_path)
-        return
-
-    if file_name:
-	return os.path.join(parameter_path,file_name)
-
-    parameter_list = [ os.path.join(parameter_path,f) for f in os.listdir(parameter_path)]
-    if len(parameter_list)==0:
-        return
-    parameter_list.sort()
-
-    return parameter_list[-1]
-
-@app.route("/")
-def index():
-    return "Hello SINGA User!"
-
-@app.route('/predict', methods=['POST'])
-def predict():
-    if request.method == 'POST':
-        try:
-            response=service.serve(request)
-        except Exception as e:
-            return e
-        return response
-    return "error, should be post request"

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/01b7a74b/python/singa/image_tool.py
----------------------------------------------------------------------
diff --git a/python/singa/image_tool.py b/python/singa/image_tool.py
index 529f73b..534d3e2 100644
--- a/python/singa/image_tool.py
+++ b/python/singa/image_tool.py
@@ -32,7 +32,6 @@ from __future__ import division
 
 from builtins import range
 from builtins import object
-from past.utils import old_div
 import random
 import numpy as np
 from PIL import Image, ImageEnhance
@@ -73,7 +72,7 @@ def crop(img, patch, position):
     elif position == 'right_bottom':
         left, upper = img.size[0]-patch[0], img.size[1]-patch[1]
     elif position == 'center':
-        left, upper = old_div((img.size[0]-patch[0]),2), old_div((img.size[1]-patch[1]),2)
+        left, upper = (img.size[0]-patch[0]) // 2, (img.size[1]-patch[1]) // 2
     else:
         raise Exception('position is wrong')
 
@@ -96,8 +95,8 @@ def crop_and_resize(img, patch, position):
         left, upper = 0, 0
         right, bottom = size[1], size[1]
     elif position == 'center':
-        left, upper = old_div((size[0]-size[1]),2), 0
-        right, bottom = old_div((size[0]+size[1]),2), size[1]
+        left, upper = (size[0]-size[1]) // 2, 0
+        right, bottom = (size[0]+size[1]) // 2, size[1]
     elif position == 'right':
         left, upper = size[0]-size[1], 0
         right, bottom = size[0], size[1]
@@ -105,8 +104,8 @@ def crop_and_resize(img, patch, position):
         left, upper = 0, 0
         right, bottom = size[0], size[0]
     elif position == 'middle':
-        left, upper = 0, old_div((size[1]-size[0]),2)
-        right, bottom = size[0], old_div((size[1]+size[0]),2)
+        left, upper = 0, (size[1]-size[0]) // 2
+        right, bottom = size[0], (size[1]+size[0]) // 2
     elif position == 'bottom':
         left, upper = 0, size[1]-size[0]
         right, bottom = size[0], size[1]

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/01b7a74b/python/singa/initializer.py
----------------------------------------------------------------------
diff --git a/python/singa/initializer.py b/python/singa/initializer.py
index 5d5f31e..cb2f5a0 100644
--- a/python/singa/initializer.py
+++ b/python/singa/initializer.py
@@ -27,8 +27,6 @@ Example usages::
     initializer.uniform(x, 3, 0)  # use only fan_in
 '''
 from __future__ import division
-
-from past.utils import old_div
 import math
 
 
@@ -91,7 +89,7 @@ def xavier(t):
         t (Tensor): the parater tensor
     '''
 
-    scale = math.sqrt(old_div(6.0, (t.shape[0] + t.shape[1])))
+    scale = math.sqrt(6.0 / (t.shape[0] + t.shape[1]))
     t.uniform(-scale, scale)
 
 
@@ -104,7 +102,7 @@ def glorot(t):
     Args:
         t (Tensor): the parater tensor
     '''
-    scale = math.sqrt(old_div(2.0, (t.shape[0] + t.shape[1])))
+    scale = math.sqrt(2.0 / (t.shape[0] + t.shape[1]))
     t.gaussian(0, 1)
     t *= scale
 
@@ -121,4 +119,4 @@ def msra(t):
     Args:
         t (Tensor): the parater tensor
     '''
-    t.gaussian(0, math.sqrt(old_div(2.0, t.shape[0])))
+    t.gaussian(0, math.sqrt(2.0 / t.shape[0]))

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/01b7a74b/python/singa/layer.py
----------------------------------------------------------------------
diff --git a/python/singa/layer.py b/python/singa/layer.py
index 153768b..8a32279 100644
--- a/python/singa/layer.py
+++ b/python/singa/layer.py
@@ -51,6 +51,7 @@ from builtins import str
 from builtins import range
 from builtins import object
 from builtins import set
+
 from . import singa_wrap
 from .proto import model_pb2
 from . import tensor

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/01b7a74b/python/singa/loss.py
----------------------------------------------------------------------
diff --git a/python/singa/loss.py b/python/singa/loss.py
index 800a113..2c86146 100644
--- a/python/singa/loss.py
+++ b/python/singa/loss.py
@@ -36,14 +36,11 @@ Example usage::
 '''
 from __future__ import division
 from __future__ import absolute_import
-
-
-from past.utils import old_div
 from builtins import object
+
 from . import singa_wrap as singa
-from .proto import model_pb2
 from . import tensor
-import numpy as np
+from .proto import model_pb2
 
 
 class Loss(object):
@@ -160,7 +157,7 @@ class SigmoidCrossEntropy(Loss):
             dx = pi - yi.
         '''
         assert self.truth is not None, 'must call forward in a prior'
-        dx =  self.prob - self.truth
+        dx = self.prob - self.truth
         self.truth = None
         return dx
 
@@ -214,4 +211,4 @@ class SquaredError(Loss):
         Returns:
             a float value as the averaged error
         '''
-        return old_div(tensor.sum(tensor.square(x - y) * 0.5), x.size())
+        return tensor.sum(tensor.square(x - y)) * 0.5 / x.size()

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/01b7a74b/python/singa/metric.py
----------------------------------------------------------------------
diff --git a/python/singa/metric.py b/python/singa/metric.py
index 8495c0c..893b139 100644
--- a/python/singa/metric.py
+++ b/python/singa/metric.py
@@ -37,10 +37,9 @@ Example usage::
 from __future__ import division
 from __future__ import absolute_import
 
-
 from builtins import range
-from past.utils import old_div
 from builtins import object
+
 from . import singa_wrap as singa
 from . import tensor
 import numpy as np
@@ -92,8 +91,6 @@ class Accuracy(Metric):
         self.swig_metric = singa.Accuracy()
 
 
-
-
 class Precision(Metric):
     '''Make the top-k labels of max probability as the prediction
 
@@ -102,8 +99,6 @@ class Precision(Metric):
     def __init__(self, top_k):
         self.top_k = top_k
 
-
-
     def forward(self, x, y):
         '''Compute the precision for each sample.
 
@@ -124,17 +119,17 @@ class Precision(Metric):
         x_np = tensor.to_numpy(x)
         y_np = tensor.to_numpy(y)
 
-        pred_np = np.argsort(-x_np)[:, 0:self.top_k] #Sort in descending order
+        pred_np = np.argsort(-x_np)[:, 0:self.top_k]  # Sort in descending order
 
         prcs_np = np.zeros(pred_np.shape[0], dtype=np.float32)
 
         for i in range(pred_np.shape[0]):
-            #groundtruth labels
+            # groundtruth labels
             label_np = np.argwhere(y_np[i])
 
-            #Num of common labels among prediction and groundtruth
+            # num of common labels among prediction and groundtruth
             num_intersect = np.intersect1d(pred_np[i], label_np).size
-            prcs_np[i] = old_div(num_intersect, float(self.top_k))
+            prcs_np[i] = num_intersect / float(self.top_k)
 
         precision = tensor.from_numpy(prcs_np)
 
@@ -144,7 +139,6 @@ class Precision(Metric):
 
         return precision
 
-
     def evaluate(self, x, y):
         '''Compute the averaged precision over all samples.
 
@@ -166,7 +160,6 @@ class Recall(Metric):
     def __init__(self, top_k):
         self.top_k = top_k
 
-
     def forward(self, x, y):
         '''Compute the recall for each sample.
 
@@ -187,17 +180,17 @@ class Recall(Metric):
         x_np = tensor.to_numpy(x)
         y_np = tensor.to_numpy(y)
 
-        pred_np = np.argsort(-x_np)[:, 0:self.top_k] #Sort in descending order
+        pred_np = np.argsort(-x_np)[:, 0:self.top_k]  # Sort in descending order
 
         recall_np = np.zeros(pred_np.shape[0], dtype=np.float32)
 
         for i in range(pred_np.shape[0]):
-            #Return the index of non-zero dimension of i-th sample
+            # Return the index of non-zero dimension of i-th sample
             label_np = np.argwhere(y_np[i])
 
-            #Num of common labels among prediction and groundtruth
+            # Num of common labels among prediction and groundtruth
             num_intersect = np.intersect1d(pred_np[i], label_np).size
-            recall_np[i] = old_div(float(num_intersect), label_np.size)
+            recall_np[i] = float(num_intersect) / label_np.size
 
         recall = tensor.from_numpy(recall_np)
 
@@ -207,7 +200,6 @@ class Recall(Metric):
 
         return recall
 
-
     def evaluate(self, x, y):
         '''Compute the averaged precision over all samples.
 
@@ -218,4 +210,4 @@ class Recall(Metric):
             a float value for the averaged metric
         '''
 
-        return tensor.average(self.forward(x,y))
+        return tensor.average(self.forward(x, y))

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/01b7a74b/python/singa/net.py
----------------------------------------------------------------------
diff --git a/python/singa/net.py b/python/singa/net.py
index a53fc68..501b8bc 100644
--- a/python/singa/net.py
+++ b/python/singa/net.py
@@ -56,19 +56,22 @@ Example usages::
 from __future__ import print_function
 from __future__ import absolute_import
 
-from future import standard_library
-standard_library.install_aliases()
 from builtins import zip
 from builtins import str
 from builtins import object
+import os
+
 from .proto.model_pb2 import kTrain, kEval
 from .__init__ import __version__
 from . import tensor
 from . import layer
 from . import snapshot
-import pickle as pickle
 
-import os
+try:
+    import pickle
+except ImportError:
+    import cPickle as pickle
+
 
 '''For display training information, e.g L1 value of layer data'''
 verbose = False

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/01b7a74b/python/singa/optimizer.py
----------------------------------------------------------------------
diff --git a/python/singa/optimizer.py b/python/singa/optimizer.py
index 54b81e8..a86c537 100644
--- a/python/singa/optimizer.py
+++ b/python/singa/optimizer.py
@@ -34,13 +34,14 @@ Example usage::
 from __future__ import division
 from __future__ import absolute_import
 
-from past.utils import old_div
 from builtins import object
 import math
+
 from . import singa_wrap as singa
 from . import tensor
 from .proto import model_pb2
 
+
 class Optimizer(object):
     '''The base python optimizer class.
 
@@ -213,7 +214,8 @@ class SGD(Optimizer):
         grad = self.apply_regularizer_constraint(epoch, value, grad, name, step)
         if name is not None and name in self.learning_rate_multiplier:
             lr = lr * self.learning_rate_multiplier[name]
-        self.opt.Apply(epoch, lr, name.encode(), grad.singa_tensor, value.singa_tensor)
+        self.opt.Apply(epoch, lr, name.encode(), grad.singa_tensor,
+                       value.singa_tensor)
         return value
 
 
@@ -241,7 +243,8 @@ class Nesterov(Optimizer):
         grad = self.apply_regularizer_constraint(epoch, value, grad, name, step)
         if name is not None and name in self.learning_rate_multiplier:
             lr = lr * self.learning_rate_multiplier[name]
-        self.opt.Apply(epoch, lr, name.encode(), grad.singa_tensor, value.singa_tensor)
+        self.opt.Apply(epoch, lr, name.encode(), grad.singa_tensor,
+                       value.singa_tensor)
         return value
 
 
@@ -272,7 +275,8 @@ class RMSProp(Optimizer):
         grad = self.apply_regularizer_constraint(epoch, value, grad, name, step)
         if name is not None and name in self.learning_rate_multiplier:
             lr = lr * self.learning_rate_multiplier[name]
-        self.opt.Apply(step, lr,  name.encode(), grad.singa_tensor, value.singa_tensor)
+        self.opt.Apply(step, lr,  name.encode(), grad.singa_tensor,
+                       value.singa_tensor)
         return value
 
 
@@ -302,7 +306,8 @@ class AdaGrad(Optimizer):
         grad = self.apply_regularizer_constraint(epoch, value, grad, name, step)
         if name is not None and name in self.learning_rate_multiplier:
             lr = lr * self.learning_rate_multiplier[name]
-        self.opt.Apply(epoch, lr,  name.encode(), grad.singa_tensor, value.singa_tensor)
+        self.opt.Apply(epoch, lr,  name.encode(), grad.singa_tensor,
+                       value.singa_tensor)
         return value
 
 
@@ -437,5 +442,5 @@ class L2Constraint(Constraint):
 
     def apply(self, epoch, value, grad, step=-1):
         nrm = grad.l2()
-        grad *= old_div(self.threshold, nrm)
+        grad *= self.threshold / nrm
         return grad

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/01b7a74b/python/singa/tensor.py
----------------------------------------------------------------------
diff --git a/python/singa/tensor.py b/python/singa/tensor.py
index 144ed61..10ff01b 100644
--- a/python/singa/tensor.py
+++ b/python/singa/tensor.py
@@ -57,10 +57,10 @@ from __future__ import division
 from __future__ import print_function
 from __future__ import absolute_import
 
-from past.utils import old_div
 from builtins import object
 import numpy as np
 from functools import reduce
+
 from .proto import core_pb2
 from . import singa_wrap as singa
 from . import device as pydevice
@@ -68,6 +68,7 @@ from . import device as pydevice
 int32 = core_pb2.kInt
 float32 = core_pb2.kFloat32
 
+
 class Tensor(object):
     '''Create a Py Tensor, which wraps a swig converted Tensor from CPP Tensor
 
@@ -766,7 +767,7 @@ def average(t, axis=None):
     if t.ndim() > 1:
         return _call_singa_func(singa.Average, t.singa_tensor, axis)
     else:
-        return old_div(singa.SumAsFloat(t.singa_tensor), t.size())
+        return singa.SumAsFloat(t.singa_tensor) / t.size()
 
 
 def softmax(t, out=None):

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/01b7a74b/test/python/test_loss.py
----------------------------------------------------------------------
diff --git a/test/python/test_loss.py b/test/python/test_loss.py
index eb06b81..31784ce 100644
--- a/test/python/test_loss.py
+++ b/test/python/test_loss.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from past.utils import old_div
 #
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
@@ -17,9 +15,9 @@ from past.utils import old_div
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
+from __future__ import division
 
 import unittest
-
 import numpy as np
 
 from singa import loss
@@ -47,7 +45,7 @@ class TestLoss(unittest.TestCase):
         sig.backward()
         l2 = sig.evaluate(True, self.x, self.y)
 
-        p = old_div(1.0, (1 + np.exp(-self.x_np)))
+        p = 1.0 / (1 + np.exp(-self.x_np))
         l = - (self.y_np * np.log(p) + (1-self.y_np) * np.log(1-p))
         self.assertAlmostEqual(l1.l1(), l2)
         self.assertAlmostEqual(l1.l1(), np.average(l))

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/01b7a74b/test/python/test_metric.py
----------------------------------------------------------------------
diff --git a/test/python/test_metric.py b/test/python/test_metric.py
index 8a22372..80e2b7b 100644
--- a/test/python/test_metric.py
+++ b/test/python/test_metric.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from past.utils import old_div
 #
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
@@ -17,9 +15,9 @@ from past.utils import old_div
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
+from __future__ import division
 
 import unittest
-
 import numpy as np
 
 from singa import metric
@@ -30,57 +28,55 @@ class TestPrecision(unittest.TestCase):
     def setUp(self):
         x_np = np.asarray([[0.7, 0.2, 0.1],
                            [0.2, 0.4, 0.5],
-                           [0.2,0.4,0.4]],
+                           [0.2, 0.4, 0.4]],
                           dtype=np.float32)
 
         y_np = np.asarray([[1, 0, 1],
                            [0, 1, 1],
                            [1, 0, 0]],
-                           dtype=np.int32)
+                          dtype=np.int32)
 
         self.prcs = metric.Precision(top_k=2)
         self.x = tensor.from_numpy(x_np)
         self.y = tensor.from_numpy(y_np)
 
-
     def test_forward(self):
-        p = self.prcs.forward(self.x,self.y)
+        p = self.prcs.forward(self.x, self.y)
         self.assertAlmostEqual(tensor.to_numpy(p)[0], 0.5)
         self.assertAlmostEqual(tensor.to_numpy(p)[1], 1)
         self.assertAlmostEqual(tensor.to_numpy(p)[2], 0)
 
-
     def test_evaluate(self):
-        e = self.prcs.evaluate(self.x,self.y)
-        self.assertAlmostEqual(e, old_div((0.5 + 1 + 0), 3))
+        e = self.prcs.evaluate(self.x, self.y)
+        self.assertAlmostEqual(e, (0.5 + 1 + 0) / 3)
+
 
 class TestRecall(unittest.TestCase):
     def setUp(self):
         x_np = np.asarray([[0.7, 0.2, 0.1],
                            [0.2, 0.4, 0.5],
-                           [0.2,0.4,0.4]],
+                           [0.2, 0.4, 0.4]],
                           dtype=np.float32)
 
         y_np = np.asarray([[1, 0, 1],
                            [1, 1, 1],
                            [1, 0, 0]],
-                           dtype=np.int32)
+                          dtype=np.int32)
 
         self.recall = metric.Recall(top_k=2)
         self.x = tensor.from_numpy(x_np)
         self.y = tensor.from_numpy(y_np)
 
-
     def test_forward(self):
-        r = self.recall.forward(self.x,self.y)
+        r = self.recall.forward(self.x, self.y)
         self.assertAlmostEqual(tensor.to_numpy(r)[0], 0.5)
-        self.assertAlmostEqual(tensor.to_numpy(r)[1], old_div(2.0, 3))
+        self.assertAlmostEqual(tensor.to_numpy(r)[1], 2.0 / 3)
         self.assertAlmostEqual(tensor.to_numpy(r)[2], 0)
 
-
     def test_evaluate(self):
-        e = self.recall.evaluate(self.x,self.y)
-        self.assertAlmostEqual(e, old_div((0.5 + old_div(2.0, 3) + 0), 3))
+        e = self.recall.evaluate(self.x, self.y)
+        self.assertAlmostEqual(e, (0.5 + (2.0 / 3) + 0) / 3)
+
 
 if __name__ == '__main__':
     unittest.main()

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/01b7a74b/test/python/test_net.py
----------------------------------------------------------------------
diff --git a/test/python/test_net.py b/test/python/test_net.py
index afabc0d..8522f15 100644
--- a/test/python/test_net.py
+++ b/test/python/test_net.py
@@ -1,6 +1,3 @@
-from __future__ import division
-from builtins import zip
-from past.utils import old_div
 #
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
@@ -18,6 +15,8 @@ from past.utils import old_div
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
+from __future__ import division
+from builtins import zip
 
 import unittest
 import math
@@ -44,7 +43,7 @@ class TestFeedForwardNet(unittest.TestCase):
         y.set_value(0)
         out, _ = ffn.evaluate(x, y)
         self.assertAlmostEqual(out * 3,
-                               - math.log(old_div(1.0,(1+math.exp(1)))) -
+                               - math.log(1.0 / (1+math.exp(1))) -
                                math.log(0.5) - math.log(0.5),
                                5)
 

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/01b7a74b/test/python/test_optimizer.py
----------------------------------------------------------------------
diff --git a/test/python/test_optimizer.py b/test/python/test_optimizer.py
index 11374f5..f5c5471 100644
--- a/test/python/test_optimizer.py
+++ b/test/python/test_optimizer.py
@@ -1,7 +1,3 @@
-from __future__ import division
-from builtins import zip
-from builtins import range
-from past.utils import old_div
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -19,6 +15,10 @@ from past.utils import old_div
 # specific language governing permissions and limitations
 # under the License.
 # =============================================================================
+from __future__ import division
+from builtins import zip
+from builtins import range
+
 import unittest
 import math
 import numpy as np
@@ -35,7 +35,7 @@ if singa_wrap.USE_CUDA:
 
 def np_adam(plist, glist, mlist, vlist, lr, t, b1=0.9, b2=0.999):
     for p, g, m, v in zip(plist, glist, mlist, vlist):
-        m *=b1
+        m *= b1
         m += (1-b1) * g
         v *= b2
         v += (1-b2) * g * g
@@ -110,7 +110,7 @@ class TestOptimizer(unittest.TestCase):
         cons = opt.L2Constraint(threshold)
         cons.apply(0, self.W, self.g)
         g = tensor.to_numpy(self.g)
-        nrm = old_div(np.linalg.norm(self.np_g), self.np_g.size)
+        nrm = np.linalg.norm(self.np_g) / self.np_g.size
         for i in range(g.size):
             self.assertAlmostEqual(g[i], self.np_g[i] * threshold / nrm)
 
@@ -122,7 +122,7 @@ class TestOptimizer(unittest.TestCase):
         cons.apply(0, self.W, self.g)
         self.g.to_host()
         g = tensor.to_numpy(self.g)
-        nrm = old_div(np.linalg.norm(self.np_g), self.np_g.size)
+        nrm = np.linalg.norm(self.np_g) / self.np_g.size
         for i in range(g.size):
             self.assertAlmostEqual(g[i], self.np_g[i] * threshold / nrm)
 

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/01b7a74b/test/python/test_tensor.py
----------------------------------------------------------------------
diff --git a/test/python/test_tensor.py b/test/python/test_tensor.py
index 3f86899..dc3ff13 100644
--- a/test/python/test_tensor.py
+++ b/test/python/test_tensor.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from past.utils import old_div
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -17,6 +15,7 @@ from past.utils import old_div
 # specific language governing permissions and limitations
 # under the License.
 # =============================================================================
+from __future__ import division
 
 import math
 import unittest
@@ -152,7 +151,7 @@ class TestTensorMethods(unittest.TestCase):
     def test_rdiv(self):
         x = tensor.Tensor((3,))
         x.set_value(1)
-        y = old_div(2, x)
+        y = 2 / x
         self.assertEqual(tensor.average(y), 2.)
 
     def test_numpy_convert(self):

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/01b7a74b/tool/opencl/clsrc_to_str.py
----------------------------------------------------------------------
diff --git a/tool/opencl/clsrc_to_str.py b/tool/opencl/clsrc_to_str.py
index 8ca94a0..9faea7d 100755
--- a/tool/opencl/clsrc_to_str.py
+++ b/tool/opencl/clsrc_to_str.py
@@ -22,19 +22,21 @@
 This file is executed only if .cl files are updated.
 It is executed in the ROOT folder of SINGA source repo.
 '''
-
+from future.utils import iteritems
 
 distribution = "./src/core/tensor/distribution.cl"
 tensormath = "./src/core/tensor/tensor_math_opencl.cl"
 im2col = "./src/model/layer/im2col.cl"
 pooling = "./src/model/layer/pooling.cl"
+files = {"distribution_str": distribution, "tensormath_str": tensormath,
+         "im2col_str": im2col, "pooling_str": pooling}
 
-files = {"distribution_str" : distribution, "tensormath_str" : tensormath, "im2col_str" : im2col, "pooling_str" : pooling}
 
 if __name__ == "__main__":
     fullpath = './src/core/device/opencl_func.h'
     with open(fullpath, 'w') as fout:
-        fout.write("// This file is auto-generated by tool/opencl/clsrc_to_str, do not edit manually.\n")
+        fout.write("// This file is auto-generated by tool/opencl/clsrc_to_str."
+                   " do not edit manually.\n")
         license = """
 /**
  * Licensed to the Apache Software Foundation (ASF) under one
@@ -57,7 +59,7 @@ if __name__ == "__main__":
         fout.write(license)
         fout.write("#include <string>\n\n")
         fout.write("namespace singa {\n namespace opencl {\n")
-        for name, path in list(files.items()):
+        for name, path in iteritems(files):
             with open(path, 'r') as fin:
                 src = fin.read()
                 src = repr(src)


Mime
View raw message