From commits-return-10465-archive-asf-public=cust-asf.ponee.io@tvm.apache.org Sat Apr 4 22:14:06 2020 Return-Path: X-Original-To: archive-asf-public@cust-asf.ponee.io Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [207.244.88.153]) by mx-eu-01.ponee.io (Postfix) with SMTP id 8D44A18065C for ; Sun, 5 Apr 2020 00:14:06 +0200 (CEST) Received: (qmail 2416 invoked by uid 500); 4 Apr 2020 22:14:06 -0000 Mailing-List: contact commits-help@tvm.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@tvm.apache.org Delivered-To: mailing list commits@tvm.apache.org Received: (qmail 2407 invoked by uid 99); 4 Apr 2020 22:14:05 -0000 Received: from ec2-52-202-80-70.compute-1.amazonaws.com (HELO gitbox.apache.org) (52.202.80.70) by apache.org (qpsmtpd/0.29) with ESMTP; Sat, 04 Apr 2020 22:14:05 +0000 From: GitBox To: commits@tvm.apache.org Subject: [GitHub] [incubator-tvm] jwfromm commented on a change in pull request #5186: [Relay][Topi][AutoTVM] Winograd support for Conv3D Message-ID: <158603844580.16212.8834743578509744139.gitbox@gitbox.apache.org> References: In-Reply-To: Date: Sat, 04 Apr 2020 22:14:05 -0000 Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: 8bit jwfromm commented on a change in pull request #5186: [Relay][Topi][AutoTVM] Winograd support for Conv3D URL: https://github.com/apache/incubator-tvm/pull/5186#discussion_r403539442 ########## File path: topi/python/topi/cuda/conv3d_winograd.py ########## @@ -0,0 +1,627 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name,unused-variable,unused-argument +"""Winograd template for cuda backend""" + +import logging +import tvm +from tvm import te +from tvm import autotvm + +from .. import nn +from ..util import get_const_int, get_const_tuple, traverse_inline, simplify +from ..nn.winograd_util import winograd_transform_matrices + +logger = logging.getLogger('conv3d_winograd') + + +def _infer_tile_size(data, kernel): + N, CI, D, H, W = get_const_tuple(data.shape) + + if H % 8 == 0: + return 4 + return 2 + + +def winograd_cuda(cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed): + """Compute declaration for winograd""" + tile_size = _infer_tile_size(data, kernel) + + N, CI, D, H, W = get_const_tuple(data.shape) + + if isinstance(dilation, int): + dilation_d = dilation_h = dilation_w = dilation + else: + dilation_d, dilation_h, dilation_w = dilation + DSTR, HSTR, WSTR = (strides, strides, strides) if isinstance(strides, int) else strides + + if not pre_computed: # kernel tensor is raw tensor, do strict check + if dilation_d != 1 or dilation_h != 1 or dilation_w != 1: + kernel = nn.dilate(kernel, (1, 1, dilation_d, dilation_h, dilation_w)) + CO, CI, KD, KH, KW = get_const_tuple(kernel.shape) + alpha = KW + tile_size - 1 + assert DSTR == 1 and HSTR == 1 and WSTR == 1 and KD == KH and KH == KW + else: + # kernel tensor is pre-transfomred. this op is created by alter op layout. + # dilation is not supported + alpha, _, _, CI, CO = get_const_tuple(kernel.shape) + KD = KH = KW = alpha + 1 - tile_size + assert DSTR == 1 and HSTR == 1 and WSTR == 1 and \ + dilation_d == 1 and dilation_h == 1 and dilation_w == 1 + + pf, pt, pl, pb, pd, pr = nn.get_pad_tuple3d(padding, (KD, KH, KW)) + data_pad = nn.pad(data, (0, 0, pf, pt, pl), (0, 0, pb, pd, pr), name="data_pad") + + r = KW + m = tile_size + A, B, G = winograd_transform_matrices(m, r, out_dtype) + + D = (D + pf + pb - KD) // DSTR + 1 + H = (H + pt + pd - KH) // HSTR + 1 + W = (W + pl + pr - KW) // WSTR + 1 + nD, nH, nW = (D + m - 1) // m, (H + m - 1) // m, (W + m - 1) // m + P = N * nD * nH * nW + + # transform kernel + if not pre_computed: + # Check if we are currently tuning, if so we want to avoid counting + # prepacking in time costs. Just use a placeholder with the packed shape instead. + if autotvm.GLOBAL_SCOPE.in_tuning: + kernel_pack = te.placeholder((alpha, alpha, alpha, CI, CO), + dtype=kernel.dtype, + name='kernel_pack') + else: + r_kd = te.reduce_axis((0, KD), name='r_kd') + r_kh = te.reduce_axis((0, KH), name='r_kh') + r_kw = te.reduce_axis((0, KW), name='r_kw') + kernel_pack = te.compute( + (alpha, alpha, alpha, CI, CO), + lambda omg, eps, nu, ci, co: te.sum( + kernel[co][ci][r_kd][r_kh][r_kw] * G[omg][r_kd] * G[eps][r_kh] * G[nu][r_kw], + axis=[r_kd, r_kh, r_kw]), + name='kernel_pack') + else: + kernel_pack = kernel + + idxdiv = tvm.tir.indexdiv + idxmod = tvm.tir.indexmod + # pack input tile + input_tile = te.compute((CI, P, alpha, alpha, alpha), + lambda c, p, omg, eps, nu: data_pad[idxdiv(p, (nD * nH * nW))] + [c] + [idxmod(idxdiv(p, nH * nW), nD) * m + omg] + [idxmod(idxdiv(p, nW), nH) * m + eps] + [idxmod(p, nW) * m + nu], + name='d') + + # transform data + r_a = te.reduce_axis((0, alpha), 'r_a') + r_b = te.reduce_axis((0, alpha), 'r_b') + r_c = te.reduce_axis((0, alpha), 'r_c') + data_pack = te.compute( + (alpha, alpha, alpha, CI, P), + lambda omg, eps, nu, ci, p: te.sum( + input_tile[ci][p][r_a][r_b][r_c] * B[r_a][omg] * B[r_b][eps] * B[r_c][nu], + axis=[r_a, r_b, r_c]), + name='data_pack') + + # do batch gemm + ci = te.reduce_axis((0, CI), name='ci') + bgemm = te.compute( + (alpha, alpha, alpha, CO, P), + lambda omg, eps, nu, co, p: te.sum( + kernel_pack[omg][eps][nu][ci][co] * data_pack[omg][eps][nu][ci][p], axis=[ci]), Review comment: My latest commit includes the `ci` `co` axes swap and it does seem to slightly improve performance. ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: users@infra.apache.org With regards, Apache Git Services