tvm-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From GitBox <...@apache.org>
Subject [GitHub] [incubator-tvm] adobay commented on a change in pull request #5289: add tensorflow cumsum
Date Tue, 21 Apr 2020 14:51:15 GMT

adobay commented on a change in pull request #5289:
URL: https://github.com/apache/incubator-tvm/pull/5289#discussion_r412253968



##########
File path: topi/include/topi/transform.h
##########
@@ -1105,6 +1105,88 @@ inline tvm::te::Tensor matmul(const tvm::te::Tensor& A,
   return tvm::te::compute(output_shape, l, name, tag);
 }
 
+/**
+ * Compute the cumulative sum of the tensor `A` along `axis`.
+ *
+ *  By default, this operation performs an inclusive cumsum, which means that the first
+ *  element of the input is identical to the first element of the output:
+ *
+ *  ```python
+ *  cumsum([a, b, c])  # [a, a + b, a + b + c]
+ *  ```
+ *
+ *  By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed
+ *  instead:
+ *
+ *  ```python
+ *  cumsum([a, b, c], exclusive=True)  # [0, a, a + b]
+ *  ```
+ *
+ *  By setting the `reverse` kwarg to `True`, the cumsum is performed in the
+ *  opposite direction:
+ *
+ *  ```python
+ *  cumsum([a, b, c], reverse=True)  # [a + b + c, b + c, c]
+ *  ```
+ *
+ *  The `reverse` and `exclusive` kwargs can also be combined:
+ *
+ *  ```python
+ *  cumsum([a, b, c], exclusive=True, reverse=True)  # [b + c, c, 0]
+ *  ```
+ *
+ * @param A             Input tensor
+ * @param axis          Must be in the range `[-rank(x), rank(x))`
+ * @param exclusive     Perform exclusive cumsum
+ * @param reverse       Performed in the opposite direction
+ * @param name          The name of the operation
+ * @param tag           The tag to mark the operation
+ * @return              A Tensor whose op member is the cumsum operation
+ */
+inline tvm::te::Tensor cumsum(const tvm::te::Tensor& A,
+                              int axis,
+                              bool exclusive = false,
+                              bool reverse = false,
+                              std::string name = "T_cumsum",
+                              std::string tag = kCumsum) {
+    int totalSize = static_cast<int>(A->shape.size());
+    if (axis < 0) {
+        axis = totalSize + axis;
+    }
+    auto maxLength = A->shape[axis];
+    auto l = [&](const Array<Var>& input_indices) {

Review comment:
       @tqchen 
   I implemented cumsum using scan and sumited, but I got an error when running the test case.
Could you please help me see this error ```AttributeError: <class 'tvm.te.tensor.ScanOp'>
has no attribute axis```?
   
   ```
   Traceback (most recent call last):
   
     File "tests/python/frontend/tensorflow/test_forward.py", line 3359, in <module>
       test_forward_cumsum()
   
     File "tests/python/frontend/tensorflow/test_forward.py", line 1373, in test_forward_cumsum
       _cumsum((4, ), 0, exclusive, reverse)
   
     File "tests/python/frontend/tensorflow/test_forward.py", line 1367, in _cumsum
       compare_tf_with_tvm([np_data], ['in_data:0'], 'cumsum:0')
   
     File "tests/python/frontend/tensorflow/test_forward.py", line 192, in compare_tf_with_tvm
       cuda_layout=cuda_layout)
   
     File "tests/python/frontend/tensorflow/test_forward.py", line 127, in run_tvm_graph
       graph, lib, params = relay.build(mod, target, target_host, params)
   
     File "/Users/adobay/Documents/code/tvm/tvm/python/tvm/relay/build_module.py", line 251,
in build
       graph_json, mod, params = bld_mod.build(mod, target, target_host, params)
   
     File "/Users/adobay/Documents/code/tvm/tvm/python/tvm/relay/build_module.py", line 120,
in build
       self._build(mod, target, target_host)
   
     File "/Users/adobay/Documents/code/tvm/tvm/python/tvm/_ffi/_ctypes/packed_func.py", line
216, in __call__
       raise get_last_ffi_error()
   
   AttributeError: Traceback (most recent call last):
     [bt] (8) 9   libtvm.dylib                        0x0000000128d92dc8 tvm::relay::ExprFunctor<std::__1::vector<tvm::relay::backend::GraphNodeRef,
std::__1::allocator<tvm::relay::backend::GraphNodeRef> > (tvm::RelayExpr const&)>::InitVTable()::'lambda4'(tvm::runtime::ObjectRef
const&, tvm::relay::ExprFunctor<std::__1::vector<tvm::relay::backend::GraphNodeRef,
std::__1::allocator<tvm::relay::backend::GraphNodeRef> > (tvm::RelayExpr const&)>*)::__invoke(tvm::runtime::ObjectRef
const&, tvm::relay::ExprFunctor<std::__1::vector<tvm::relay::backend::GraphNodeRef,
std::__1::allocator<tvm::relay::backend::GraphNodeRef> > (tvm::RelayExpr const&)>*)
+ 24
     [bt] (7) 8   libtvm.dylib                        0x0000000128d90048 tvm::relay::backend::GraphRuntimeCodegen::VisitExpr_(tvm::relay::CallNode
const*) + 3752
     [bt] (6) 7   libtvm.dylib                        0x0000000128d84d74 std::__1::__function::__func<void
tvm::runtime::TypedPackedFunc<tvm::relay::CachedFunc (tvm::relay::CompileEngine, tvm::relay::CCacheKey)>::AssignTypedLambda<tvm::relay::$_8>(tvm::relay::$_8)::'lambda'(tvm::runtime::TVMArgs
const&, tvm::runtime::TVMRetValue*), std::__1::allocator<void tvm::runtime::TypedPackedFunc<tvm::relay::CachedFunc
(tvm::relay::CompileEngine, tvm::relay::CCacheKey)>::AssignTypedLambda<tvm::relay::$_8>(tvm::relay::$_8)::'lambda'(tvm::runtime::TVMArgs
const&, tvm::runtime::TVMRetValue*)>, void (tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*)>::operator()(tvm::runtime::TVMArgs&&,
tvm::runtime::TVMRetValue*&&) + 356
     [bt] (5) 6   libtvm.dylib                        0x0000000128d70d62 tvm::relay::CompileEngineImpl::Lower(tvm::relay::CCacheKey
const&) + 18
     [bt] (4) 5   libtvm.dylib                        0x0000000128d73016 tvm::relay::CompileEngineImpl::LowerInternal(tvm::relay::CCacheKey
const&) + 1382
     [bt] (3) 4   libtvm.dylib                        0x0000000128d743d6 tvm::relay::CompileEngineImpl::CreateSchedule(tvm::relay::Function
const&, tvm::Target const&) + 70
     [bt] (2) 3   libtvm.dylib                        0x0000000128d75c4f tvm::relay::ScheduleGetter::Create(tvm::relay::Function
const&) + 3151
     [bt] (1) 2   libtvm.dylib                        0x0000000128df80a7 tvm::relay::OpImplementation::Schedule(tvm::Attrs
const&, tvm::Array<tvm::te::Tensor, void> const&, tvm::Target const&) +
167
     [bt] (0) 1   libtvm.dylib                        0x0000000128eb86c5 std::__1::__function::__func<TVMFuncCreateFromCFunc::$_2,
std::__1::allocator<TVMFuncCreateFromCFunc::$_2>, void (tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*)>::operator()(tvm::runtime::TVMArgs&&,
tvm::runtime::TVMRetValue*&&) + 213
     File "/Users/adobay/Documents/code/tvm/tvm/python/tvm/runtime/object.py", line 54, in
__getattr__
       return _ffi_node_api.NodeGetAttr(self, name)
     File "/Users/adobay/Documents/code/tvm/tvm/python/tvm/_ffi/_ctypes/packed_func.py", line
216, in __call__
       raise get_last_ffi_error()
     [bt] (6) 7   Python                              0x00007fff5bff9990 Python + 140730441898384
     [bt] (5) 6   _ctypes.cpython-36m-darwin.so       0x000000010482a237 ffi_call_unix64 +
79
     [bt] (4) 5   libtvm.dylib                        0x0000000128eb5f08 TVMFuncCall + 72
     [bt] (3) 4   libtvm.dylib                        0x000000012862497b std::__1::__function::__func<void
(*)(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*), std::__1::allocator<void (*)(tvm::runtime::TVMArgs,
tvm::runtime::TVMRetValue*)>, void (tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*)>::operator()(tvm::runtime::TVMArgs&&,
tvm::runtime::TVMRetValue*&&) + 59
     [bt] (2) 3   libtvm.dylib                        0x000000012862158a tvm::NodeGetAttr(tvm::runtime::TVMArgs,
tvm::runtime::TVMRetValue*) + 378
     [bt] (1) 2   libtvm.dylib                        0x000000012861ff5a tvm::ReflectionVTable::GetAttr(tvm::runtime::Object*,
std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char>
> const&) const + 682
     [bt] (0) 1   libtvm.dylib                        0x00000001284c4ea9 dmlc::LogMessageFatal::~LogMessageFatal()
+ 57
     File "/Users/adobay/Documents/code/tvm/tvm/src/node/reflection.cc", line 113
     File "/Users/adobay/Documents/code/tvm/tvm/python/tvm/_ffi/_ctypes/packed_func.py", line
78, in cfun
       rv = local_pyfunc(*pyargs)
     File "/Users/adobay/Documents/code/tvm/tvm/python/tvm/relay/op/strategy/x86.py", line
32, in schedule_injective_cpu
       return topi.x86.schedule_injective(outs)
     File "/Users/adobay/Documents/code/tvm/tvm/topi/python/topi/x86/injective.py", line 73,
in schedule_injective
       schedule_injective_from_existing(s, x)
     File "/Users/adobay/Documents/code/tvm/tvm/topi/python/topi/x86/injective.py", line 37,
in schedule_injective_from_existing
       if len(sch[out].op.axis) >= 5:
     File "/Users/adobay/Documents/code/tvm/tvm/python/tvm/runtime/object.py", line 57, in
__getattr__
       "%s has no attribute %s" % (str(type(self)), name))
   AttributeError: ScanOp object has no attributed axis
   During handling of the above exception, another exception occurred:
   
   AttributeError: <class 'tvm.te.tensor.ScanOp'> has no attribute axis
   ```




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



Mime
View raw message