tvm-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From GitBox <...@apache.org>
Subject [GitHub] [incubator-tvm] jroesch commented on a change in pull request #5039: [Relay] GradientCell Relay Pass
Date Mon, 23 Mar 2020 23:38:31 GMT
jroesch commented on a change in pull request #5039: [Relay] GradientCell Relay Pass
URL: https://github.com/apache/incubator-tvm/pull/5039#discussion_r396823122
 
 

 ##########
 File path: src/relay/transforms/gradient_cell.cc
 ##########
 @@ -0,0 +1,304 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ *
+ * \file gradient_cell.cc
+ *
+ * \brief Convert all tensors to a Gradient Cell
+ * 
+ * This pass delays or removes memory allocation by converting tensors into 
+ * GradCell, an algebraic data type defined in gradient.rly
+ * 
+ * This will delay or decrease memory usage. All calls to
+ * ones, ones_like, zeros, zeros_like will call the One or Zero constructor
+ * of GradCell, which will not instantiate in memory until needed. All other cases result
+ * in using the Raw constructor which means the tensor is instantiated in memory.
+ * 
+ * It also overloads + and * operation which can increase performance when doing
+ * operations involving tensors with values of only 0 or 1.
+ * 
+ * Note: this pass can only be used with functions where the input/output types are
+ * a combination of TupleTypes and TensorTypes
+ * 
+ * This pass optimizes 6 ops:
+ * - add
+ * - multiply
+ * - ones
+ * - ones_like
+ * - zeros
+ * - zeros_like
+ * 
+ * This pass makes use of three visitor. The most important one visits the entire function,
+ * one is used for wrap inputs and one to unwrap outputs.
+ * 
+ * For example:
+ * fn: TensorType[(10,10), float32] -> TensorType[(10,10), float32]
+ * 
+ * After this pass
+ * fn: GradCell[TensorType[(10,10), float32]] -> GradCell[TensorType[(10,10), float32]]
+ * 
+ * Thus, it is necessary to wrap this outer function so that the input/output types remain
the same
+ */
+
+#include <tvm/relay/analysis.h>
+#include <tvm/relay/expr_functor.h>
+#include <tvm/ir/type_functor.h>
+#include <tvm/relay/transform.h>
+#include "let_list.h"
+
+namespace tvm {
+namespace relay {
+
+/*!
+* \brief Visitor to wrap inputs
+*/
+class InputVisitor: public ExprFunctor<Expr(const Expr&, const Type&)> {
+ public:
+  explicit InputVisitor(IRModule module): module_(module) {}
+
+  Expr wrapExpr(const Expr expr, const Type& type) {
+    if (type.as<TensorTypeNode>()) {
+      return Call(module_->GetConstructor("GradCell", "Raw"),
+                          {expr}, Attrs(), {type});
+    } else if (auto* type_anno = type.as<TupleTypeNode>()) {
+      tvm::Array<Expr> fields;
+      for (size_t i = 0; i < type_anno->fields.size(); i++) {
+        const Type& t = type_anno->fields[i];
+        fields.push_back(this->VisitExpr(TupleGetItem(expr, i), t));
+      }
+      Expr tuple = Tuple(fields);
+      return tuple;
+    }
+
+    return expr;
+  }
+
+  Expr VisitExpr_(const VarNode* op, const Type& t) final {
+    std::cout << op->type_annotation << std::endl;
+    return wrapExpr(GetRef<Var>(op), op->type_annotation);
+  }
+
+  Expr VisitExpr_(const TupleGetItemNode* op, const Type& t) final {
+    return wrapExpr(GetRef<TupleGetItem>(op), t);
+  }
+ private:
+  IRModule module_;
+};
+
+/*!
+* \brief Visitor to unwrap output
+*/
+class OutputVisitor: public ExprFunctor<Expr(const Expr&, const Type&)> {
+ public:
+  explicit OutputVisitor(IRModule module): module_(module) {}
+
+  Expr unwrapExpr(const Expr expr, const Type& type) {
+    if (auto* type_call = type.as<TypeCallNode>()) {
+      if (type_call->func.same_as(module_->GetGlobalTypeVar("GradCell"))) {
+        return Call(module_->GetGlobalVar("FromGradCell"), {expr});
+      }
+      return expr;
+    } else if (auto* type_anno = type.as<TupleTypeNode>()) {
+      tvm::Array<Expr> fields;
+      for (size_t i = 0; i < type_anno->fields.size(); i++) {
+        const Type& t = type_anno->fields[i];
+        fields.push_back(this->VisitExpr(TupleGetItem(expr, i), t));
+      }
+      Expr tuple = Tuple(fields);
+      return tuple;
+    }
+
+    return expr;
+  }
+
+  Expr VisitExpr_(const CallNode* op, const Type& t) final {
+    return unwrapExpr(GetRef<Call>(op), t);
+  }
+
+  Expr VisitExpr_(const TupleGetItemNode* op, const Type& t) final {
+    return unwrapExpr(GetRef<TupleGetItem>(op), t);
+  }
+ private:
+  IRModule module_;
+};
+
+class GradientCellTransform: public ExprMutator, public TypeMutator {
+ public:
+  explicit GradientCellTransform(IRModule module):
+    module_(module) {
+      module_->ImportFromStd("gradient.rly");
+    }
+
+  /*!
+  * \brief apply GradientCell transformation and wrap function
+  * so that function type stays the same
+  * 
+  * input/output types should only be a combination of TupleTypes and TensorTypes
+  */
+  Expr transform(const Expr& e) {
 
 Review comment:
   this does not match the C++ style guide

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services

Mime
View raw message