tvm-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From tqc...@apache.org
Subject [incubator-tvm] branch master updated: Remove deprecated opengl files (#5711)
Date Tue, 02 Jun 2020 00:53:45 GMT
This is an automated email from the ASF dual-hosted git repository.

tqchen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-tvm.git


The following commit(s) were added to refs/heads/master by this push:
     new f272e06  Remove deprecated opengl files (#5711)
f272e06 is described below

commit f272e06095dd402b7b5e77e4923573acf35be3ee
Author: Tianqi Chen <tqchen@users.noreply.github.com>
AuthorDate: Mon Jun 1 17:53:33 2020 -0700

    Remove deprecated opengl files (#5711)
---
 python/tvm/__init__.py                   |   2 +-
 python/tvm/_ffi/runtime_ctypes.py        |   2 -
 python/tvm/relay/op/strategy/__init__.py |   1 -
 python/tvm/relay/op/strategy/opengl.py   |  83 ---------------------
 python/tvm/rpc/client.py                 |   4 -
 python/tvm/runtime/__init__.py           |   2 +-
 python/tvm/runtime/ndarray.py            |  16 ----
 python/tvm/target/__init__.py            |   2 +-
 python/tvm/target/target.py              |  12 ---
 python/tvm/te/schedule.py                |   7 --
 topi/python/topi/__init__.py             |   1 -
 topi/python/topi/opengl/__init__.py      |  26 -------
 topi/python/topi/opengl/conv2d_nchw.py   |  73 -------------------
 topi/python/topi/opengl/dense.py         |  67 -----------------
 topi/python/topi/opengl/injective.py     |  62 ----------------
 topi/python/topi/opengl/pooling.py       | 121 -------------------------------
 topi/python/topi/opengl/softmax.py       |  58 ---------------
 topi/python/topi/testing/common.py       |   2 -
 topi/tests/python/test_topi_dense.py     |   1 -
 topi/tests/python/test_topi_softmax.py   |   1 -
 20 files changed, 3 insertions(+), 540 deletions(-)

diff --git a/python/tvm/__init__.py b/python/tvm/__init__.py
index b9fbcb2..6db8655 100644
--- a/python/tvm/__init__.py
+++ b/python/tvm/__init__.py
@@ -30,7 +30,7 @@ from ._ffi import register_object, register_func, register_extension, get_global
 # tvm.runtime
 from .runtime.object import Object
 from .runtime.ndarray import context, cpu, gpu, opencl, cl, vulkan, metal, mtl
-from .runtime.ndarray import vpi, rocm, opengl, ext_dev, micro_dev, hexagon
+from .runtime.ndarray import vpi, rocm, ext_dev, micro_dev, hexagon
 from .runtime import ndarray as nd
 
 # tvm.error
diff --git a/python/tvm/_ffi/runtime_ctypes.py b/python/tvm/_ffi/runtime_ctypes.py
index 0d6e5ac..db89854 100644
--- a/python/tvm/_ffi/runtime_ctypes.py
+++ b/python/tvm/_ffi/runtime_ctypes.py
@@ -143,7 +143,6 @@ class TVMContext(ctypes.Structure):
         8 : 'metal',
         9 : 'vpi',
         10: 'rocm',
-        11: 'opengl',
         12: 'ext_dev',
         13: 'micro_dev',
         14: 'hexagon',
@@ -166,7 +165,6 @@ class TVMContext(ctypes.Structure):
         'metal': 8,
         'vpi': 9,
         'rocm': 10,
-        'opengl': 11,
         'ext_dev': 12,
         'micro_dev': 13,
         'hexagon': 14,
diff --git a/python/tvm/relay/op/strategy/__init__.py b/python/tvm/relay/op/strategy/__init__.py
index 59adf82..8d0543b 100644
--- a/python/tvm/relay/op/strategy/__init__.py
+++ b/python/tvm/relay/op/strategy/__init__.py
@@ -26,6 +26,5 @@ from . import cuda
 from . import hls
 from . import mali
 from . import bifrost
-from . import opengl
 from . import rocm
 from . import intel_graphics
diff --git a/python/tvm/relay/op/strategy/opengl.py b/python/tvm/relay/op/strategy/opengl.py
deleted file mode 100644
index 12c288c..0000000
--- a/python/tvm/relay/op/strategy/opengl.py
+++ /dev/null
@@ -1,83 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-"""Definition of OpenGL operator strategy."""
-# pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import
-import topi
-from .generic import *
-from .. import op as _op
-
-@schedule_injective.register("opengl")
-def schedule_injective_opengl(attrs, outs, target):
-    """schedule injective ops for opengl"""
-    with target:
-        return topi.opengl.schedule_injective(outs)
-
-@schedule_concatenate.register("opengl")
-def schedule_concatenate_opengl(attrs, outs, target):
-    """schedule concatenate for opengl"""
-    with target:
-        return topi.opengl.schedule_injective(outs)
-
-@schedule_pool.register("opengl")
-def schedule_pool_opengl(attrs, outs, target):
-    """schedule pooling ops for opengl"""
-    with target:
-        return topi.opengl.schedule_pool(outs, attrs.layout)
-
-@schedule_adaptive_pool.register("opengl")
-def schedule_adaptive_pool_opengl(attrs, outs, target):
-    """schedule adative pooling ops for opengl"""
-    with target:
-        return topi.opengl.schedule_adaptive_pool(outs)
-
-@softmax_strategy.register("opengl")
-def softmax_strategy_opengl(attrs, inputs, out_type, target):
-    """softmax opengl strategy"""
-    strategy = _op.OpStrategy()
-    strategy.add_implementation(
-        wrap_compute_softmax(topi.nn.softmax),
-        wrap_topi_schedule(topi.opengl.schedule_softmax),
-        name="softmax.opengl")
-    return strategy
-
-@schedule_log_softmax.register("opengl")
-def schedule_log_softmax_opengl(attrs, outs, target):
-    """schedule log_softmax for opengl"""
-    with target:
-        return topi.opengl.schedule_softmax(outs)
-
-@conv2d_strategy.register("opengl")
-def conv2d_strategy_opengl(attrs, inputs, out_type, target):
-    """conv2d opengl strategy"""
-    strategy = _op.OpStrategy()
-    groups = attrs.groups
-    layout = attrs.data_layout
-    assert groups == 1, "Don't support group conv2d on OpenGL"
-    assert layout == "NCHW", "Only support conv2d layout NCHW for OpenGL"
-    strategy.add_implementation(wrap_compute_conv2d(topi.nn.conv2d),
-                                wrap_topi_schedule(topi.opengl.schedule_conv2d_nchw),
-                                name="conv2d_nchw.opengl")
-    return strategy
-
-@dense_strategy.register("opengl")
-def dense_strategy_opengl(attrs, inputs, out_type, target):
-    """dense opengl strategy"""
-    strategy = _op.OpStrategy()
-    strategy.add_implementation(wrap_compute_dense(topi.nn.dense),
-                                wrap_topi_schedule(topi.opengl.schedule_dense),
-                                name="dense.opengl")
-    return strategy
diff --git a/python/tvm/rpc/client.py b/python/tvm/rpc/client.py
index 3f38c4f..2f96c9b 100644
--- a/python/tvm/rpc/client.py
+++ b/python/tvm/rpc/client.py
@@ -182,10 +182,6 @@ class RPCSession(object):
         """Construct Metal device."""
         return self.context(8, dev_id)
 
-    def opengl(self, dev_id=0):
-        """Construct OpenGL device."""
-        return self.context(11, dev_id)
-
     def ext_dev(self, dev_id=0):
         """Construct extension device."""
         return self.context(12, dev_id)
diff --git a/python/tvm/runtime/__init__.py b/python/tvm/runtime/__init__.py
index 235ef0c..59574e6 100644
--- a/python/tvm/runtime/__init__.py
+++ b/python/tvm/runtime/__init__.py
@@ -26,6 +26,6 @@ from .module import Module
 # function exposures
 from .object_generic import convert_to_object, convert, const
 from .ndarray import context, cpu, gpu, opencl, cl, vulkan, metal, mtl
-from .ndarray import vpi, rocm, opengl, ext_dev, micro_dev
+from .ndarray import vpi, rocm, ext_dev, micro_dev
 from .module import load_module, enabled, system_lib
 from .container import String
diff --git a/python/tvm/runtime/ndarray.py b/python/tvm/runtime/ndarray.py
index 9f5f0f6..6629cc6 100644
--- a/python/tvm/runtime/ndarray.py
+++ b/python/tvm/runtime/ndarray.py
@@ -409,22 +409,6 @@ def vulkan(dev_id=0):
     return TVMContext(7, dev_id)
 
 
-def opengl(dev_id=0):
-    """Construct a OpenGL device
-
-    Parameters
-    ----------
-    dev_id : int, optional
-        The integer device id
-
-    Returns
-    -------
-    ctx : TVMContext
-        The created context
-    """
-    return TVMContext(11, dev_id)
-
-
 def ext_dev(dev_id=0):
     """Construct a extension device
 
diff --git a/python/tvm/target/__init__.py b/python/tvm/target/__init__.py
index eac939b..2553fed 100644
--- a/python/tvm/target/__init__.py
+++ b/python/tvm/target/__init__.py
@@ -55,7 +55,7 @@ We can use :py:func:`tvm.target.create` to create a tvm.target.Target from
the t
 We can also use other specific function in this module to create specific targets.
 """
 from .target import Target, create
-from .target import cuda, rocm, mali, intel_graphics, opengl, arm_cpu, rasp, vta, bifrost,
hexagon
+from .target import cuda, rocm, mali, intel_graphics, arm_cpu, rasp, vta, bifrost, hexagon
 from .generic_func import GenericFunc
 from .generic_func import generic_func, get_native_generic_func, override_native_generic_func
 from . import datatype
diff --git a/python/tvm/target/target.py b/python/tvm/target/target.py
index fd15ff9..3335e12 100644
--- a/python/tvm/target/target.py
+++ b/python/tvm/target/target.py
@@ -172,18 +172,6 @@ def intel_graphics(model='unknown', options=None):
     return _ffi_api.TargetCreate("opencl", *opts)
 
 
-def opengl(model='unknown', options=None):
-    """Returns a OpenGL target.
-
-    Parameters
-    ----------
-    options : str or list of str
-        Additional options
-    """
-    opts = _merge_opts(["-model=%s" % model], options)
-    return _ffi_api.TargetCreate("opengl", *opts)
-
-
 def arm_cpu(model='unknown', options=None):
     """Returns a ARM CPU target.
     This function will also download pre-tuned op parameters when there is none.
diff --git a/python/tvm/te/schedule.py b/python/tvm/te/schedule.py
index f8bbe09..b611954 100644
--- a/python/tvm/te/schedule.py
+++ b/python/tvm/te/schedule.py
@@ -509,13 +509,6 @@ class Stage(Object):
         """
         _ffi_api.StageDoubleBuffer(self)
 
-    def opengl(self):
-        """The special OpenGL schedule
-
-        Maps each output element to a pixel.
-        """
-        _ffi_api.StageOpenGL(self)
-
 
 @tvm._ffi.register_object
 class SpecializedCondition(Object):
diff --git a/topi/python/topi/__init__.py b/topi/python/topi/__init__.py
index f1019e6..2f06f4e 100644
--- a/topi/python/topi/__init__.py
+++ b/topi/python/topi/__init__.py
@@ -48,7 +48,6 @@ from . import arm_cpu
 from . import mali
 from . import bifrost
 from . import intel_graphics
-from . import opengl
 from . import util
 from . import rocm
 from . import vision
diff --git a/topi/python/topi/opengl/__init__.py b/topi/python/topi/opengl/__init__.py
deleted file mode 100644
index 0ddbea0..0000000
--- a/topi/python/topi/opengl/__init__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-# pylint: disable=redefined-builtin, wildcard-import
-"""CUDA specific declaration and schedules."""
-from __future__ import absolute_import as _abs
-
-from .conv2d_nchw import schedule_conv2d_nchw
-from .injective import schedule_injective, schedule_elemwise, schedule_broadcast
-from .softmax import schedule_softmax
-from .dense import schedule_dense
-from .pooling import schedule_pool, schedule_adaptive_pool
diff --git a/topi/python/topi/opengl/conv2d_nchw.py b/topi/python/topi/opengl/conv2d_nchw.py
deleted file mode 100644
index c93bcc2..0000000
--- a/topi/python/topi/opengl/conv2d_nchw.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#pylint: disable=invalid-name, no-member, too-many-locals, too-many-statements, too-many-arguments,
too-many-branches, line-too-long
-"""Schedule for conv2d_nchw with auto fusion"""
-import tvm
-from tvm import te
-from .. import tag
-
-def schedule_conv2d_nchw(outs):
-    """Schedule for conv2d_nchw.
-
-    Parameters
-    ----------
-    outs: Array of Tensor
-        The computation graph description of conv2d_nchw
-        in the format of an array of tensors.
-
-    Returns
-    -------
-    s: Schedule
-        The computation schedule for conv2d_nchw.
-    """
-    outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
-    s = te.create_schedule([x.op for x in outs])
-    scheduled_ops = []
-
-    def _schedule(conv2d, data):
-        if conv2d.op in s.outputs:
-            Out = conv2d
-        else:
-            Out = outs[0].op.output(0)
-            s[conv2d].opengl()
-        s[Out].opengl()
-        s[data].opengl()
-
-    def traverse(OP):
-        """Internal traverse function"""
-        # inline all one-to-one-mapping operators except the last stage (output)
-        if tag.is_broadcast(OP.tag):
-            if OP not in s.outputs:
-                s[OP].opengl()
-            for tensor in OP.input_tensors:
-                if isinstance(tensor.op, tvm.te.ComputeOp) and tensor.op not in scheduled_ops:
-                    traverse(tensor.op)
-        # schedule conv2d_nchw
-        elif OP.tag.startswith('conv2d_nchw'):
-            conv2d = OP.output(0)
-            data = OP.input_tensors[0]
-            kernel = OP.input_tensors[1]
-            if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
-                s[kernel].compute_inline()
-            _schedule(conv2d, data)
-        else:
-            raise RuntimeError("Unsupported operator: %s" % OP.tag)
-
-        scheduled_ops.append(OP)
-
-    traverse(outs[0].op)
-    return s
diff --git a/topi/python/topi/opengl/dense.py b/topi/python/topi/opengl/dense.py
deleted file mode 100644
index 715f713..0000000
--- a/topi/python/topi/opengl/dense.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-# pylint: disable=invalid-name, unused-variable
-"""Schedule for dense operator"""
-from tvm import te
-from .. import tag
-
-def schedule_dense(outs):
-    """Schedule for dense operator.
-
-    Parameters
-    ----------
-    outs: Array of Tensor
-        The computation graph description of dense
-        in the format of an array of tensors.
-
-    Returns
-    -------
-    s: Schedule
-        The computation schedule for dense.
-    """
-    outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
-    s = te.create_schedule([x.op for x in outs])
-    scheduled_ops = []
-
-    def _schedule(Dense):
-        if Dense.op in s.outputs:
-            Out = Dense
-        else:
-            Out = outs[0].op.output(0)
-            s[Dense].opengl()
-        s[Out].opengl()
-
-    def traverse(OP):
-        """Internal traverse function"""
-        # inline all one-to-one-mapping operators except the last stage (output)
-        if tag.is_broadcast(OP.tag):
-            if OP not in s.outputs:
-                s[OP].compute_inline()
-            for tensor in OP.input_tensors:
-                if isinstance(tensor.op, te.tensor.ComputeOp) and tensor.op not in scheduled_ops:
-                    traverse(tensor.op)
-        # schedule dense
-        elif OP.tag == 'dense':
-            Dense = OP.output(0)
-            _schedule(Dense)
-        else:
-            raise RuntimeError("Unsupported operator: %s" % OP.tag)
-
-        scheduled_ops.append(OP)
-
-    traverse(outs[0].op)
-    return s
diff --git a/topi/python/topi/opengl/injective.py b/topi/python/topi/opengl/injective.py
deleted file mode 100644
index a5944f7..0000000
--- a/topi/python/topi/opengl/injective.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-# pylint: disable=invalid-name, unused-variable,
-"""Schedule for composition of injective operator"""
-from tvm import te
-
-def schedule_injective_from_existing(sch, out):
-    """Schedule for injective op from existing schedule.
-
-    Parameters
-    ----------
-    sch: Schedule
-         The schedule to update.
-    out: Tensor
-         The tensor representing the injective op.
-
-    Returns
-    -------
-    sch: Schedule
-         The updated schedule.
-    """
-    sch[out].opengl()
-    return sch
-
-def schedule_injective(outs):
-    """Schedule for injective op.
-
-    Parameters
-    ----------
-    outs: Array of Tensor
-          The computation graph description of injective in the format
-          of an array of tensors.
-
-    Returns
-    -------
-    sch: Schedule
-        The computation schedule for the op.
-    """
-    outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
-    s = te.create_schedule([x.op for x in outs])
-
-    te.schedule.AutoInlineInjective(s)
-    for out in outs:
-        schedule_injective_from_existing(s, out)
-    return s
-
-schedule_elemwise = schedule_injective
-schedule_broadcast = schedule_injective
diff --git a/topi/python/topi/opengl/pooling.py b/topi/python/topi/opengl/pooling.py
deleted file mode 100644
index c30389c..0000000
--- a/topi/python/topi/opengl/pooling.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-# pylint: disable=invalid-name, unused-variable, unused-argument
-"""Schedule for pooling operators"""
-from tvm import te
-from .. import tag
-
-def schedule_adaptive_pool(outs):
-    """Schedule for adaptive pool.
-
-    Parameters
-    ----------
-    outs: Array of Tensor
-        The computation graph description of global_pool
-        in the format of an array of tensors.
-
-    Returns
-    -------
-    s: Schedule
-        The computation schedule for adaptive pool.
-    """
-    outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
-    s = te.create_schedule([x.op for x in outs])
-    scheduled_ops = []
-
-    def _schedule(Pool):
-        if Pool.op in s.outputs:
-            Out = Pool
-        else:
-            Out = outs[0].op.output(0)
-            s[Pool].opengl()
-        s[Out].opengl()
-
-    def traverse(OP):
-        """Internal traverse function"""
-        # inline all one-to-one-mapping operators except the last stage (output)
-        if tag.is_broadcast(OP.tag):
-            if OP not in s.outputs:
-                s[OP].opengl()
-            for tensor in OP.input_tensors:
-                if isinstance(tensor.op, te.tensor.ComputeOp) and tensor.op not in scheduled_ops:
-                    traverse(tensor.op)
-        # schedule global_pool
-        elif OP.tag.startswith('adaptive_pool'):
-            Pool = OP.output(0)
-            _schedule(Pool)
-        else:
-            raise RuntimeError("Unsupported operator: %s" % OP.tag)
-
-        scheduled_ops.append(OP)
-
-    traverse(outs[0].op)
-    return s
-
-
-def schedule_pool(outs, layout):
-    """Schedule for pool.
-
-    Parameters
-    ----------
-    outs: Array of Tensor
-        The computation graph description of pool
-        in the format of an array of tensors.
-
-    layout: str
-        Data layout.
-
-    Returns
-    -------
-    s: Schedule
-        The computation schedule for pool.
-    """
-    outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
-    s = te.create_schedule([x.op for x in outs])
-    scheduled_ops = []
-
-    def _schedule(PaddedInput, Pool):
-        if isinstance(PaddedInput.op, te.tensor.ComputeOp):
-            s[PaddedInput].opengl()
-        if Pool.op in s.outputs:
-            Out = Pool
-        else:
-            Out = outs[0].op.output(0)
-            s[Pool].opengl()
-        s[Out].opengl()
-
-    def traverse(OP):
-        """Internal traverse function"""
-        # inline all one-to-one-mapping operators except the last stage (output)
-        if tag.is_broadcast(OP.tag):
-            if OP not in s.outputs:
-                s[OP].compute_inline()
-            for tensor in OP.input_tensors:
-                if tensor.op not in scheduled_ops and isinstance(tensor.op, te.tensor.ComputeOp):
-                    traverse(tensor.op)
-        # schedule pool
-        elif OP.tag.startswith('pool'):
-            PaddedInput = OP.input_tensors[0]
-            Pool = OP.output(0)
-            _schedule(PaddedInput, Pool)
-        else:
-            raise RuntimeError("Unsupported operator: %s" % OP.tag)
-
-        scheduled_ops.append(OP)
-
-    traverse(outs[0].op)
-    return s
diff --git a/topi/python/topi/opengl/softmax.py b/topi/python/topi/opengl/softmax.py
deleted file mode 100644
index 7b15a53..0000000
--- a/topi/python/topi/opengl/softmax.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-# pylint: disable=invalid-name, unused-variable, trailing-whitespace
-"""Schedule for softmax operator"""
-from tvm import te
-
-def schedule_softmax(outs):
-    """Schedule for softmax op.
-
-    Parameters
-    ----------
-    outs: Array of Tensor
-          The computation graph description of softmax in the format
-          of an array of tensors.
-
-    Returns
-    -------
-    sch: Schedule
-        The computation schedule for the op.
-    """
-    outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
-    s = te.create_schedule([x.op for x in outs])
-    softmax = outs[0]
-
-    op_tag = softmax.op.tag
-    if op_tag == 'softmax_output':
-        expsum = softmax.op.input_tensors[1]
-        exp = softmax.op.input_tensors[0]
-        max_elem = s[exp].op.input_tensors[1]
-    elif op_tag == 'log_softmax_output':
-        exp = None
-        max_elem = softmax.op.input_tensors[1]
-        expsum = softmax.op.input_tensors[2]
-    else:
-        raise ValueError('Tag is expected to be softmax_output or log_softmax_output. \
-                         Got {0}'.format(op_tag))
-
-    if exp is not None:
-        s[exp].opengl()
-
-    s[max_elem].opengl()
-    s[expsum].opengl()
-    s[softmax].opengl()
-    return s
diff --git a/topi/python/topi/testing/common.py b/topi/python/topi/testing/common.py
index 5817513..7bc5c5d 100644
--- a/topi/python/topi/testing/common.py
+++ b/topi/python/topi/testing/common.py
@@ -26,7 +26,6 @@ _injective_schedule = {
     "arm_cpu": topi.arm_cpu.schedule_injective,
     "gpu": topi.cuda.schedule_injective,
     "hls": topi.hls.schedule_injective,
-    "opengl": topi.opengl.schedule_injective
 }
 
 _reduce_schedule = {
@@ -64,7 +63,6 @@ _conv2d_nchw_implement = {
              topi.mali.schedule_conv2d_nchw_spatial_pack),
     "bifrost": (topi.bifrost.conv2d_nchw_spatial_pack,
                 topi.bifrost.schedule_conv2d_nchw_spatial_pack),
-    "opengl": (topi.nn.conv2d_nchw, topi.opengl.schedule_conv2d_nchw),
     "intel_graphics": (topi.intel_graphics.conv2d_nchw,
                        topi.intel_graphics.schedule_conv2d_nchw),
     "hls": (topi.nn.conv2d_nchw, topi.hls.schedule_conv2d_nchw)
diff --git a/topi/tests/python/test_topi_dense.py b/topi/tests/python/test_topi_dense.py
index 7498c00..6294c7d 100644
--- a/topi/tests/python/test_topi_dense.py
+++ b/topi/tests/python/test_topi_dense.py
@@ -33,7 +33,6 @@ _dense_implement = {
             (topi.cuda.dense_large_batch, topi.cuda.schedule_dense_large_batch)],
     "mali": [(topi.mali.dense, topi.mali.schedule_dense)],
     "bifrost": [(topi.bifrost.dense, topi.bifrost.schedule_dense)],
-    "opengl": [(topi.nn.dense, topi.opengl.schedule_dense)],
     "rocm": [(topi.rocm.dense, topi.rocm.schedule_dense)],
     "hls": [(topi.nn.dense, topi.hls.schedule_dense)],
 }
diff --git a/topi/tests/python/test_topi_softmax.py b/topi/tests/python/test_topi_softmax.py
index 4857387..e213074 100644
--- a/topi/tests/python/test_topi_softmax.py
+++ b/topi/tests/python/test_topi_softmax.py
@@ -31,7 +31,6 @@ _softmax_schedule = {
     "cpu": topi.x86.schedule_softmax,
     "gpu": topi.cuda.schedule_softmax,
     "hls": topi.hls.schedule_softmax,
-    "opengl": topi.opengl.schedule_softmax,
 }
 
 def check_device(A, B, a_np, b_np, device, name):


Mime
View raw message