超神经HyperAI · 1月6日

【TVM 教程】在 NVIDIA GPU 上调优高性能卷积(上)

微信图片_20241016191925.png
Apache TVM 是一个端到端的深度学习编译框架,适用于 CPU、GPU 和各种机器学习加速芯片。更多 TVM 中文文档可访问 →https://tvm.hyper.ai/

作者:Lianmin Zheng

本教程介绍如何为 NVIDIA GPU 编写高性能可调模板。通过在此模板上运行自动调优器,可在许多情况下胜过供应商提供的 cuDNN 库。

注意,本教程不会在 Windows 或最新版本的 macOS 上运行。如需运行,请将本教程的主体放在 if name == "__main__": 代码块中。

安装依赖

要在 TVM 中使用 autotvm 包,需要安装额外的依赖(如果用的是 Python2,请将「3」更改为「2」):

pip3 install --user psutil xgboost tornado cloudpickle

为了让 TVM 在调优中运行更快,推荐使用 Cython 作为 TVM 的 FFI。在 TVM 的根目录下,执行如下命令:

pip3 install --user cython
sudo make cython3

在 Python 代码中导入包:

import logging
import sys
import numpy as np

import tvm
from tvm import te, topi, testing
from tvm.topi.testing import conv2d_nchw_python
import tvm.testing

from tvm import autotvm

第 1 步:定义搜索空间

TVM 中有很多有用的调度原语,详细教程,例如(1)如何在 GPU 上优化卷积(2)在 NVIDIA GPU 上优化 DepthwiseConv

但是,它们的实现是针对一些特殊的输入 shape 手动调整的。本节将构建足够大的空间,涵盖这些教程中使用的技术,然后依靠高效的自动调优器,对空间进行搜索并选择合适的配置。

熟悉 CUDA schedule 的开发者,对以下通用模板并不陌生。可以修改此模板以调整其他算子,例如深度卷积和 GEMM。要完全理解这个模板,需要熟悉调度原语和自动调优 API,可以参考上面的教程和 AutoTVM 教程

需要注意 conv2d 算子的搜索空间可能非常大(某些输入 shape 为 10^9 级别)

@autotvm.template("tutorial/conv2d_no_batching")
def conv2d_no_batching(N, H, W, CO, CI, KH, KW, stride, padding):
    assert N == 1, "Only consider batch_size = 1 in this template"

    data = te.placeholder((N, CI, H, W), name="data")
    kernel = te.placeholder((CO, CI, KH, KW), name="kernel")
    conv = topi.nn.conv2d_nchw(data, kernel, stride, padding, dilation=1, out_dtype="float32")
    s = te.create_schedule([conv.op])

    #### 空间定义开始 ####
    n, f, y, x = s[conv].op.axis
    rc, ry, rx = s[conv].op.reduce_axis

    cfg = autotvm.get_config()
    cfg.define_split("tile_f", f, num_outputs=4)
    cfg.define_split("tile_y", y, num_outputs=4)
    cfg.define_split("tile_x", x, num_outputs=4)
    cfg.define_split("tile_rc", rc, num_outputs=3)
    cfg.define_split("tile_ry", ry, num_outputs=3)
    cfg.define_split("tile_rx", rx, num_outputs=3)
    cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
    cfg.define_knob("unroll_explicit", [0, 1])
    #### 空间定义结束 ####

    # 内联填充
    pad_data = s[conv].op.input_tensors[0]
    s[pad_data].compute_inline()
    data, raw_data = pad_data, data

    output = conv
    OL = s.cache_write(conv, "local")

    # 创建 cache 阶段
    AA = s.cache_read(data, "shared", [OL])
    WW = s.cache_read(kernel, "shared", [OL])
    AL = s.cache_read(AA, "local", [OL])
    WL = s.cache_read(WW, "local", [OL])

    # 平铺和绑定空间轴
    n, f, y, x = s[output].op.axis
    bf, vf, tf, fi = cfg["tile_f"].apply(s, output, f)
    by, vy, ty, yi = cfg["tile_y"].apply(s, output, y)
    bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
    kernel_scope = n  # 这是在此内核中附加全局配置的范围

    s[output].bind(bf, te.thread_axis("blockIdx.z"))
    s[output].bind(by, te.thread_axis("blockIdx.y"))
    s[output].bind(bx, te.thread_axis("blockIdx.x"))
    s[output].bind(vf, te.thread_axis("vthread"))
    s[output].bind(vy, te.thread_axis("vthread"))
    s[output].bind(vx, te.thread_axis("vthread"))
    s[output].bind(tf, te.thread_axis("threadIdx.z"))
    s[output].bind(ty, te.thread_axis("threadIdx.y"))
    s[output].bind(tx, te.thread_axis("threadIdx.x"))
    s[output].reorder(n, bf, by, bx, vf, vy, vx, tf, ty, tx, fi, yi, xi)
    s[OL].compute_at(s[output], tx)

    # tile reduction 轴
    n, f, y, x = s[OL].op.axis
    rc, ry, rx = s[OL].op.reduce_axis
    rco, rcm, rci = cfg["tile_rc"].apply(s, OL, rc)
    ryo, rym, ryi = cfg["tile_rx"].apply(s, OL, ry)
    rxo, rxm, rxi = cfg["tile_ry"].apply(s, OL, rx)
    s[OL].reorder(rco, ryo, rxo, rcm, rym, rxm, rci, ryi, rxi, n, f, y, x)

    s[AA].compute_at(s[OL], rxo)
    s[WW].compute_at(s[OL], rxo)
    s[AL].compute_at(s[OL], rxm)
    s[WL].compute_at(s[OL], rxm)

    # 协作获取
    for load in [AA, WW]:
        n, f, y, x = s[load].op.axis
        fused = s[load].fuse(n, f, y, x)
        tz, fused = s[load].split(fused, nparts=cfg["tile_f"].size[2])
        ty, fused = s[load].split(fused, nparts=cfg["tile_y"].size[2])
        tx, fused = s[load].split(fused, nparts=cfg["tile_x"].size[2])
        s[load].bind(tz, te.thread_axis("threadIdx.z"))
        s[load].bind(ty, te.thread_axis("threadIdx.y"))
        s[load].bind(tx, te.thread_axis("threadIdx.x"))

    # 调优 unroll
    s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
    s[output].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)

    return s, [raw_data, kernel, conv]

第 2 步:在空间中搜索

选择 resnet 上的最后一层作为测试用例。由于空间足够大,所以 XGBoostTuner 最适合。这里只做 20 次试验来演示。实际上试验 1000 次可以为这个模板找到更合适的内核。

# logging 配置(用于将调优日志打印到屏幕)
logging.getLogger("autotvm").setLevel(logging.DEBUG)
logging.getLogger("autotvm").addHandler(logging.StreamHandler(sys.stdout))

# resnet 中的最后一层
N, H, W, CO, CI, KH, KW, strides, padding = 1, 7, 7, 512, 512, 3, 3, (1, 1), (1, 1)
task = autotvm.task.create(
    "tutorial/conv2d_no_batching", args=(N, H, W, CO, CI, KH, KW, strides, padding), target="cuda"
)
print(task.config_space)

# 使用本地 gpu,为每个配置测量 10 次以减少方差
# 编译程序超时 10 秒,运行超时 4 秒
measure_option = autotvm.measure_option(
    builder=autotvm.LocalBuilder(),
    runner=autotvm.LocalRunner(repeat=3, min_repeat_ms=100, timeout=4),
)

record_file = None
# 开始调优,将 log 记录到 `conv2d.log`
# 在调优过程中,会尝试很多无效的配置,所以你应该
# 查看许多错误报告。只要能看到非零的 GFLOPS 就可以。
# 我们不再在服务器上运行调优,因为太耗时间了
# 去掉下面的注释,自己运行

# tuner = autotvm.tuner.XGBTuner(task)
# record_file = "conv2d.log"
# tuner.tune(
#     n_trial=5,
#     measure_option=measure_option,
#     callbacks=[autotvm.callback.log_to_file(record_file)],
# )

输出结果:

ConfigSpace (len=10454400, space_map=
   0 tile_f: Split(policy=factors, product=512, num_outputs=4) len=220
   1 tile_y: Split(policy=factors, product=7, num_outputs=4) len=4
   2 tile_x: Split(policy=factors, product=7, num_outputs=4) len=4
   3 tile_rc: Split(policy=factors, product=512, num_outputs=3) len=55
   4 tile_ry: Split(policy=factors, product=3, num_outputs=3) len=3
   5 tile_rx: Split(policy=factors, product=3, num_outputs=3) len=3
   6 auto_unroll_max_step: OtherOption([0, 512, 1500]) len=3
   7 unroll_explicit: OtherOption([0, 1]) len=2
)
waiting for device...
device available
Get devices for measurement successfully!
No: 1   GFLOPS: 0.00/0.00       result: Traceback (most recent call last):
  File "/workspace/python/tvm/autotvm/measure/measure_methods.py", line 588, in __call__
    func, arg_info = _build_func_common(measure_input, self.runtime, **kwargs)
  File "/workspace/python/tvm/autotvm/measure/measure_methods.py", line 540, in _build_func_common
    func = build(s, args, target_host=task.target_host, runtime=runtime)
  File "/workspace/python/tvm/driver/build_module.py", line 228, in build
    input_mod = lower(inputs, args, name=name, binds=binds)
  File "/workspace/python/tvm/driver/build_module.py", line 134, in lower
    return ffi.lower_schedule(inp, args, name, binds, simple_mode)
  File "tvm/_ffi/_cython/./packed_func.pxi", line 331, in tvm._ffi._cy3.core.PackedFuncBase.__call__
  File "tvm/_ffi/_cython/./packed_func.pxi", line 276, in tvm._ffi._cy3.core.FuncCall
  File "tvm/_ffi/_cython/./base.pxi", line 181, in tvm._ffi._cy3.core.CHECK_CALL
tvm._ffi.base.TVMError: Traceback (most recent call last):
  24: TVMFuncCall
        at ../src/runtime/c_runtime_api.cc:477
  23: tvm::runtime::PackedFuncObj::CallPacked(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*) const
        at ../include/tvm/runtime/packed_func.h:1217
  22: Call
        at ../include/tvm/runtime/packed_func.h:1213
  21: operator()
        at ../include/tvm/runtime/packed_func.h:1731
  20: unpack_call<tvm::IRModule, 5, tvm::<lambda(tvm::te::Schedule, const tvm::runtime::Array<tvm::runtime::ObjectRef>&, const tvm::runtime::String&, const tvm::runtime::Map<tvm::te::Tensor, tvm::tir::Buffer>&, bool)> >
        at ../include/tvm/runtime/packed_func.h:1671
  19: run<>
        at ../include/tvm/runtime/packed_func.h:1631
  18: run<tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  17: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  16: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  15: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  14: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1646
  13: operator()
        at ../src/driver/driver_api.cc:365
  12: tvm::LowerSchedule(tvm::te::Schedule, tvm::runtime::Array<tvm::runtime::ObjectRef, void> const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::unordered_map<tvm::te::Tensor, tvm::tir::Buffer, std::hash<tvm::te::Tensor>, std::equal_to<tvm::te::Tensor>, std::allocator<std::pair<tvm::te::Tensor const, tvm::tir::Buffer> > > const&, bool)
        at ../src/driver/driver_api.cc:352
  11: tvm::LowerWithPassList(tvm::IRModule, tvm::runtime::Array<tvm::transform::Pass, void>)
        at ../src/driver/driver_api.cc:252
  10: tvm::transform::Pass::operator()(tvm::IRModule) const
        at ../src/ir/transform.cc:258
  9: tvm::transform::Pass::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/ir/transform.cc:274
  8: tvm::transform::SequentialNode::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/ir/transform.cc:453
  7: tvm::transform::Pass::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/ir/transform.cc:274
  6: tvm::tir::transform::PrimFuncPassNode::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/tir/ir/transform.cc:100
  5: tvm::runtime::TypedPackedFunc<tvm::tir::PrimFunc (tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext)>::operator()(tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext) const
        at ../include/tvm/runtime/packed_func.h:1750
  4: tvm::tir::PrimFunc tvm::runtime::detail::typed_packed_call_dispatcher<tvm::tir::PrimFunc>::run<tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext>(tvm::runtime::PackedFunc const&, tvm::tir::PrimFunc&&, tvm::IRModule&&, tvm::transform::PassContext&&)
        at ../include/tvm/runtime/packed_func.h:1694
  3: tvm::runtime::TVMRetValue tvm::runtime::PackedFunc::operator()<tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext>(tvm::tir::PrimFunc&&, tvm::IRModule&&, tvm::transform::PassContext&&) const
        at ../include/tvm/runtime/packed_func.h:1618
  2: tvm::runtime::PackedFuncObj::CallPacked(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*) const
        at ../include/tvm/runtime/packed_func.h:1217
  1: Call
        at ../include/tvm/runtime/packed_func.h:1213
  0: operator()
        at ../src/runtime/c_runtime_api.cc:534
  File "tvm/_ffi/_cython/./packed_func.pxi", line 56, in tvm._ffi._cy3.core.tvm_callback
  File "/workspace/python/tvm/autotvm/measure/measure_methods.py", line 871, in verify_pass
    raise InstantiationError("Skipped because of invalid gpu kernel")
tvm.autotvm.task.space.InstantiationError: Skipped because of invalid gpu kernel

Traceback (most recent call last):
  24: TVMFuncCall
        at ../src/runtime/c_runtime_api.cc:477
  23: tvm::runtime::PackedFuncObj::CallPacked(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*) const
        at ../include/tvm/runtime/packed_func.h:1217
  22: Call
        at ../include/tvm/runtime/packed_func.h:1213
  21: operator()
        at ../include/tvm/runtime/packed_func.h:1731
  20: unpack_call<tvm::IRModule, 5, tvm::<lambda(tvm::te::Schedule, const tvm::runtime::Array<tvm::runtime::ObjectRef>&, const tvm::runtime::String&, const tvm::runtime::Map<tvm::te::Tensor, tvm::tir::Buffer>&, bool)> >
        at ../include/tvm/runtime/packed_func.h:1671
  19: run<>
        at ../include/tvm/runtime/packed_func.h:1631
  18: run<tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  17: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  16: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  15: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  14: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1646
  13: operator()
        at ../src/driver/driver_api.cc:365
  12: tvm::LowerSchedule(tvm::te::Schedule, tvm::runtime::Array<tvm::runtime::ObjectRef, void> const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::unordered_map<tvm::te::Tensor, tvm::tir::Buffer, std::hash<tvm::te::Tensor>, std::equal_to<tvm::te::Tensor>, std::allocator<std::pair<tvm::te::Tensor const, tvm::tir::Buffer> > > const&, bool)
        at ../src/driver/driver_api.cc:352
  11: tvm::LowerWithPassList(tvm::IRModule, tvm::runtime::Array<tvm::transform::Pass, void>)
        at ../src/driver/driver_api.cc:252
  10: tvm::transform::Pass::operator()(tvm::IRModule) const
        at ../src/ir/transform.cc:258
  9: tvm::transform::Pass::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/ir/transform.cc:274
  8: tvm::transform::SequentialNode::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/ir/transform.cc:453
  7: tvm::transform::Pass::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/ir/transform.cc:274
  6: tvm::tir::transform::PrimFuncPassNode::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/tir/ir/transform.cc:100
  5: tvm::runtime::TypedPackedFunc<tvm::tir::PrimFunc (tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext)>::operator()(tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext) const
        at ../include/tvm/runtime/packed_func.h:1750
  4: tvm::tir::PrimFunc tvm::runtime::detail::typed_packed_call_dispatcher<tvm::tir::PrimFunc>::run<tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext>(tvm::runtime::PackedFunc const&, tvm::tir::PrimFunc&&, tvm::IRModule&&, tvm::transform::PassContext&&)
        at ../include/tvm/runtime/packed_func.h:1694
  3: tvm::runtime::TVMRetValue tvm::runtime::PackedFunc::operator()<tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext>(tvm::tir::PrimFunc&&, tvm::IRModule&&, tvm::transform::PassContext&&) const
        at ../include/tvm/runtime/packed_func.h:1618
  2: tvm::runtime::PackedFuncObj::CallPacked(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*) const
        at ../include/tvm/runtime/packed_func.h:1217
  1: Call
        at ../include/tvm/runtime/packed_func.h:1213
  0: operator()
        at ../src/runtime/c_runtime_api.cc:534
  File "tvm/_ffi/_cython/./packed_func.pxi", line 56, in tvm._ffi._cy3.core.tvm_callback
  File "/workspace/python/tvm/autotvm/measure/measure_methods.py", line 871, in verify_pass
    raise InstantiationError("Skipped because of invalid gpu kernel")
tvm.autotvm.task.space.InstantiationError: Skipped because of invalid gpu kernel        [('tile_f', [-1, 32, 8, 2]), ('tile_y', [-1, 1, 1, 1]), ('tile_x', [-1, 7, 1, 1]), ('tile_rc', [-1, 8, 64]), ('tile_ry', [-1, 3, 1]), ('tile_rx', [-1, 3, 1]), ('auto_unroll_max_step', 0), ('unroll_explicit', 1)],None,6171524
No: 2   GFLOPS: 0.00/0.00       result: Traceback (most recent call last):
  File "/workspace/python/tvm/autotvm/measure/measure_methods.py", line 588, in __call__
    func, arg_info = _build_func_common(measure_input, self.runtime, **kwargs)
  File "/workspace/python/tvm/autotvm/measure/measure_methods.py", line 540, in _build_func_common
    func = build(s, args, target_host=task.target_host, runtime=runtime)
  File "/workspace/python/tvm/driver/build_module.py", line 228, in build
    input_mod = lower(inputs, args, name=name, binds=binds)
  File "/workspace/python/tvm/driver/build_module.py", line 134, in lower
    return ffi.lower_schedule(inp, args, name, binds, simple_mode)
  File "tvm/_ffi/_cython/./packed_func.pxi", line 331, in tvm._ffi._cy3.core.PackedFuncBase.__call__
  File "tvm/_ffi/_cython/./packed_func.pxi", line 276, in tvm._ffi._cy3.core.FuncCall
  File "tvm/_ffi/_cython/./base.pxi", line 181, in tvm._ffi._cy3.core.CHECK_CALL
tvm._ffi.base.TVMError: Traceback (most recent call last):
  24: TVMFuncCall
        at ../src/runtime/c_runtime_api.cc:477
  23: tvm::runtime::PackedFuncObj::CallPacked(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*) const
        at ../include/tvm/runtime/packed_func.h:1217
  22: Call
        at ../include/tvm/runtime/packed_func.h:1213
  21: operator()
        at ../include/tvm/runtime/packed_func.h:1731
  20: unpack_call<tvm::IRModule, 5, tvm::<lambda(tvm::te::Schedule, const tvm::runtime::Array<tvm::runtime::ObjectRef>&, const tvm::runtime::String&, const tvm::runtime::Map<tvm::te::Tensor, tvm::tir::Buffer>&, bool)> >
        at ../include/tvm/runtime/packed_func.h:1671
  19: run<>
        at ../include/tvm/runtime/packed_func.h:1631
  18: run<tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  17: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  16: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  15: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  14: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1646
  13: operator()
        at ../src/driver/driver_api.cc:365
  12: tvm::LowerSchedule(tvm::te::Schedule, tvm::runtime::Array<tvm::runtime::ObjectRef, void> const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::unordered_map<tvm::te::Tensor, tvm::tir::Buffer, std::hash<tvm::te::Tensor>, std::equal_to<tvm::te::Tensor>, std::allocator<std::pair<tvm::te::Tensor const, tvm::tir::Buffer> > > const&, bool)
        at ../src/driver/driver_api.cc:352
  11: tvm::LowerWithPassList(tvm::IRModule, tvm::runtime::Array<tvm::transform::Pass, void>)
        at ../src/driver/driver_api.cc:252
  10: tvm::transform::Pass::operator()(tvm::IRModule) const
        at ../src/ir/transform.cc:258
  9: tvm::transform::Pass::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/ir/transform.cc:274
  8: tvm::transform::SequentialNode::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/ir/transform.cc:453
  7: tvm::transform::Pass::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/ir/transform.cc:274
  6: tvm::tir::transform::PrimFuncPassNode::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/tir/ir/transform.cc:100
  5: tvm::runtime::TypedPackedFunc<tvm::tir::PrimFunc (tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext)>::operator()(tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext) const
        at ../include/tvm/runtime/packed_func.h:1750
  4: tvm::tir::PrimFunc tvm::runtime::detail::typed_packed_call_dispatcher<tvm::tir::PrimFunc>::run<tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext>(tvm::runtime::PackedFunc const&, tvm::tir::PrimFunc&&, tvm::IRModule&&, tvm::transform::PassContext&&)
        at ../include/tvm/runtime/packed_func.h:1694
  3: tvm::runtime::TVMRetValue tvm::runtime::PackedFunc::operator()<tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext>(tvm::tir::PrimFunc&&, tvm::IRModule&&, tvm::transform::PassContext&&) const
        at ../include/tvm/runtime/packed_func.h:1618
  2: tvm::runtime::PackedFuncObj::CallPacked(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*) const
        at ../include/tvm/runtime/packed_func.h:1217
  1: Call
        at ../include/tvm/runtime/packed_func.h:1213
  0: operator()
        at ../src/runtime/c_runtime_api.cc:534
  File "tvm/_ffi/_cython/./packed_func.pxi", line 56, in tvm._ffi._cy3.core.tvm_callback
  File "/workspace/python/tvm/autotvm/measure/measure_methods.py", line 871, in verify_pass
    raise InstantiationError("Skipped because of invalid gpu kernel")
tvm.autotvm.task.space.InstantiationError: Skipped because of invalid gpu kernel

Traceback (most recent call last):
  24: TVMFuncCall
        at ../src/runtime/c_runtime_api.cc:477
  23: tvm::runtime::PackedFuncObj::CallPacked(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*) const
        at ../include/tvm/runtime/packed_func.h:1217
  22: Call
        at ../include/tvm/runtime/packed_func.h:1213
  21: operator()
        at ../include/tvm/runtime/packed_func.h:1731
  20: unpack_call<tvm::IRModule, 5, tvm::<lambda(tvm::te::Schedule, const tvm::runtime::Array<tvm::runtime::ObjectRef>&, const tvm::runtime::String&, const tvm::runtime::Map<tvm::te::Tensor, tvm::tir::Buffer>&, bool)> >
        at ../include/tvm/runtime/packed_func.h:1671
  19: run<>
        at ../include/tvm/runtime/packed_func.h:1631
  18: run<tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  17: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  16: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  15: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  14: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1646
  13: operator()
        at ../src/driver/driver_api.cc:365
  12: tvm::LowerSchedule(tvm::te::Schedule, tvm::runtime::Array<tvm::runtime::ObjectRef, void> const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::unordered_map<tvm::te::Tensor, tvm::tir::Buffer, std::hash<tvm::te::Tensor>, std::equal_to<tvm::te::Tensor>, std::allocator<std::pair<tvm::te::Tensor const, tvm::tir::Buffer> > > const&, bool)
        at ../src/driver/driver_api.cc:352
  11: tvm::LowerWithPassList(tvm::IRModule, tvm::runtime::Array<tvm::transform::Pass, void>)
        at ../src/driver/driver_api.cc:252
  10: tvm::transform::Pass::operator()(tvm::IRModule) const
        at ../src/ir/transform.cc:258
  9: tvm::transform::Pass::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/ir/transform.cc:274
  8: tvm::transform::SequentialNode::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/ir/transform.cc:453
  7: tvm::transform::Pass::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/ir/transform.cc:274
  6: tvm::tir::transform::PrimFuncPassNode::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/tir/ir/transform.cc:100
  5: tvm::runtime::TypedPackedFunc<tvm::tir::PrimFunc (tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext)>::operator()(tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext) const
        at ../include/tvm/runtime/packed_func.h:1750
  4: tvm::tir::PrimFunc tvm::runtime::detail::typed_packed_call_dispatcher<tvm::tir::PrimFunc>::run<tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext>(tvm::runtime::PackedFunc const&, tvm::tir::PrimFunc&&, tvm::IRModule&&, tvm::transform::PassContext&&)
        at ../include/tvm/runtime/packed_func.h:1694
  3: tvm::runtime::TVMRetValue tvm::runtime::PackedFunc::operator()<tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext>(tvm::tir::PrimFunc&&, tvm::IRModule&&, tvm::transform::PassContext&&) const
        at ../include/tvm/runtime/packed_func.h:1618
  2: tvm::runtime::PackedFuncObj::CallPacked(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*) const
        at ../include/tvm/runtime/packed_func.h:1217
  1: Call
        at ../include/tvm/runtime/packed_func.h:1213
  0: operator()
        at ../src/runtime/c_runtime_api.cc:534
  File "tvm/_ffi/_cython/./packed_func.pxi", line 56, in tvm._ffi._cy3.core.tvm_callback
  File "/workspace/python/tvm/autotvm/measure/measure_methods.py", line 871, in verify_pass
    raise InstantiationError("Skipped because of invalid gpu kernel")
tvm.autotvm.task.space.InstantiationError: Skipped because of invalid gpu kernel        [('tile_f', [-1, 128, 1, 4]), ('tile_y', [-1, 1, 1, 1]), ('tile_x', [-1, 1, 1, 1]), ('tile_rc', [-1, 4, 128]), ('tile_ry', [-1, 1, 1]), ('tile_rx', [-1, 3, 1]), ('auto_unroll_max_step', 512), ('unroll_explicit', 0)],None,2502827
No: 3   GFLOPS: 0.00/0.00       result: Traceback (most recent call last):
  File "/workspace/python/tvm/autotvm/measure/measure_methods.py", line 588, in __call__
    func, arg_info = _build_func_common(measure_input, self.runtime, **kwargs)
  File "/workspace/python/tvm/autotvm/measure/measure_methods.py", line 540, in _build_func_common
    func = build(s, args, target_host=task.target_host, runtime=runtime)
  File "/workspace/python/tvm/driver/build_module.py", line 228, in build
    input_mod = lower(inputs, args, name=name, binds=binds)
  File "/workspace/python/tvm/driver/build_module.py", line 134, in lower
    return ffi.lower_schedule(inp, args, name, binds, simple_mode)
  File "tvm/_ffi/_cython/./packed_func.pxi", line 331, in tvm._ffi._cy3.core.PackedFuncBase.__call__
  File "tvm/_ffi/_cython/./packed_func.pxi", line 276, in tvm._ffi._cy3.core.FuncCall
  File "tvm/_ffi/_cython/./base.pxi", line 181, in tvm._ffi._cy3.core.CHECK_CALL
tvm._ffi.base.TVMError: Traceback (most recent call last):
  24: TVMFuncCall
        at ../src/runtime/c_runtime_api.cc:477
  23: tvm::runtime::PackedFuncObj::CallPacked(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*) const
        at ../include/tvm/runtime/packed_func.h:1217
  22: Call
        at ../include/tvm/runtime/packed_func.h:1213
  21: operator()
        at ../include/tvm/runtime/packed_func.h:1731
  20: unpack_call<tvm::IRModule, 5, tvm::<lambda(tvm::te::Schedule, const tvm::runtime::Array<tvm::runtime::ObjectRef>&, const tvm::runtime::String&, const tvm::runtime::Map<tvm::te::Tensor, tvm::tir::Buffer>&, bool)> >
        at ../include/tvm/runtime/packed_func.h:1671
  19: run<>
        at ../include/tvm/runtime/packed_func.h:1631
  18: run<tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  17: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  16: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  15: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  14: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1646
  13: operator()
        at ../src/driver/driver_api.cc:365
  12: tvm::LowerSchedule(tvm::te::Schedule, tvm::runtime::Array<tvm::runtime::ObjectRef, void> const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::unordered_map<tvm::te::Tensor, tvm::tir::Buffer, std::hash<tvm::te::Tensor>, std::equal_to<tvm::te::Tensor>, std::allocator<std::pair<tvm::te::Tensor const, tvm::tir::Buffer> > > const&, bool)
        at ../src/driver/driver_api.cc:352
  11: tvm::LowerWithPassList(tvm::IRModule, tvm::runtime::Array<tvm::transform::Pass, void>)
        at ../src/driver/driver_api.cc:252
  10: tvm::transform::Pass::operator()(tvm::IRModule) const
        at ../src/ir/transform.cc:258
  9: tvm::transform::Pass::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/ir/transform.cc:274
  8: tvm::transform::SequentialNode::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/ir/transform.cc:453
  7: tvm::transform::Pass::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/ir/transform.cc:274
  6: tvm::tir::transform::PrimFuncPassNode::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/tir/ir/transform.cc:100
  5: tvm::runtime::TypedPackedFunc<tvm::tir::PrimFunc (tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext)>::operator()(tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext) const
        at ../include/tvm/runtime/packed_func.h:1750
  4: tvm::tir::PrimFunc tvm::runtime::detail::typed_packed_call_dispatcher<tvm::tir::PrimFunc>::run<tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext>(tvm::runtime::PackedFunc const&, tvm::tir::PrimFunc&&, tvm::IRModule&&, tvm::transform::PassContext&&)
        at ../include/tvm/runtime/packed_func.h:1694
  3: tvm::runtime::TVMRetValue tvm::runtime::PackedFunc::operator()<tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext>(tvm::tir::PrimFunc&&, tvm::IRModule&&, tvm::transform::PassContext&&) const
        at ../include/tvm/runtime/packed_func.h:1618
  2: tvm::runtime::PackedFuncObj::CallPacked(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*) const
        at ../include/tvm/runtime/packed_func.h:1217
  1: Call
        at ../include/tvm/runtime/packed_func.h:1213
  0: operator()
        at ../src/runtime/c_runtime_api.cc:534
  File "tvm/_ffi/_cython/./packed_func.pxi", line 56, in tvm._ffi._cy3.core.tvm_callback
  File "/workspace/python/tvm/autotvm/measure/measure_methods.py", line 871, in verify_pass
    raise InstantiationError("Skipped because of invalid gpu kernel")
tvm.autotvm.task.space.InstantiationError: Skipped because of invalid gpu kernel

Traceback (most recent call last):
  24: TVMFuncCall
        at ../src/runtime/c_runtime_api.cc:477
  23: tvm::runtime::PackedFuncObj::CallPacked(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*) const
        at ../include/tvm/runtime/packed_func.h:1217
  22: Call
        at ../include/tvm/runtime/packed_func.h:1213
  21: operator()
        at ../include/tvm/runtime/packed_func.h:1731
  20: unpack_call<tvm::IRModule, 5, tvm::<lambda(tvm::te::Schedule, const tvm::runtime::Array<tvm::runtime::ObjectRef>&, const tvm::runtime::String&, const tvm::runtime::Map<tvm::te::Tensor, tvm::tir::Buffer>&, bool)> >
        at ../include/tvm/runtime/packed_func.h:1671
  19: run<>
        at ../include/tvm/runtime/packed_func.h:1631
  18: run<tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  17: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  16: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  15: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  14: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1646
  13: operator()
        at ../src/driver/driver_api.cc:365
  12: tvm::LowerSchedule(tvm::te::Schedule, tvm::runtime::Array<tvm::runtime::ObjectRef, void> const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::unordered_map<tvm::te::Tensor, tvm::tir::Buffer, std::hash<tvm::te::Tensor>, std::equal_to<tvm::te::Tensor>, std::allocator<std::pair<tvm::te::Tensor const, tvm::tir::Buffer> > > const&, bool)
        at ../src/driver/driver_api.cc:352
  11: tvm::LowerWithPassList(tvm::IRModule, tvm::runtime::Array<tvm::transform::Pass, void>)
        at ../src/driver/driver_api.cc:252
  10: tvm::transform::Pass::operator()(tvm::IRModule) const
        at ../src/ir/transform.cc:258
  9: tvm::transform::Pass::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/ir/transform.cc:274
  8: tvm::transform::SequentialNode::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/ir/transform.cc:453
  7: tvm::transform::Pass::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/ir/transform.cc:274
  6: tvm::tir::transform::PrimFuncPassNode::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/tir/ir/transform.cc:100
  5: tvm::runtime::TypedPackedFunc<tvm::tir::PrimFunc (tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext)>::operator()(tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext) const
        at ../include/tvm/runtime/packed_func.h:1750
  4: tvm::tir::PrimFunc tvm::runtime::detail::typed_packed_call_dispatcher<tvm::tir::PrimFunc>::run<tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext>(tvm::runtime::PackedFunc const&, tvm::tir::PrimFunc&&, tvm::IRModule&&, tvm::transform::PassContext&&)
        at ../include/tvm/runtime/packed_func.h:1694
  3: tvm::runtime::TVMRetValue tvm::runtime::PackedFunc::operator()<tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext>(tvm::tir::PrimFunc&&, tvm::IRModule&&, tvm::transform::PassContext&&) const
        at ../include/tvm/runtime/packed_func.h:1618
  2: tvm::runtime::PackedFuncObj::CallPacked(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*) const
        at ../include/tvm/runtime/packed_func.h:1217
  1: Call
        at ../include/tvm/runtime/packed_func.h:1213
  0: operator()
        at ../src/runtime/c_runtime_api.cc:534
  File "tvm/_ffi/_cython/./packed_func.pxi", line 56, in tvm._ffi._cy3.core.tvm_callback
  File "/workspace/python/tvm/autotvm/measure/measure_methods.py", line 871, in verify_pass
    raise InstantiationError("Skipped because of invalid gpu kernel")
tvm.autotvm.task.space.InstantiationError: Skipped because of invalid gpu kernel        [('tile_f', [-1, 4, 1, 32]), ('tile_y', [-1, 1, 1, 1]), ('tile_x', [-1, 1, 1, 7]), ('tile_rc', [-1, 512, 1]), ('tile_ry', [-1, 1, 3]), ('tile_rx', [-1, 1, 3]), ('auto_unroll_max_step', 512), ('unroll_explicit', 0)],None,3325707
No: 4   GFLOPS: 0.00/0.00       result: Traceback (most recent call last):
  File "/workspace/python/tvm/autotvm/measure/measure_methods.py", line 588, in __call__
    func, arg_info = _build_func_common(measure_input, self.runtime, **kwargs)
  File "/workspace/python/tvm/autotvm/measure/measure_methods.py", line 540, in _build_func_common
    func = build(s, args, target_host=task.target_host, runtime=runtime)
  File "/workspace/python/tvm/driver/build_module.py", line 228, in build
    input_mod = lower(inputs, args, name=name, binds=binds)
  File "/workspace/python/tvm/driver/build_module.py", line 134, in lower
    return ffi.lower_schedule(inp, args, name, binds, simple_mode)
  File "tvm/_ffi/_cython/./packed_func.pxi", line 331, in tvm._ffi._cy3.core.PackedFuncBase.__call__
  File "tvm/_ffi/_cython/./packed_func.pxi", line 276, in tvm._ffi._cy3.core.FuncCall
  File "tvm/_ffi/_cython/./base.pxi", line 181, in tvm._ffi._cy3.core.CHECK_CALL
tvm._ffi.base.TVMError: Traceback (most recent call last):
  24: TVMFuncCall
        at ../src/runtime/c_runtime_api.cc:477
  23: tvm::runtime::PackedFuncObj::CallPacked(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*) const
        at ../include/tvm/runtime/packed_func.h:1217
  22: Call
        at ../include/tvm/runtime/packed_func.h:1213
  21: operator()
        at ../include/tvm/runtime/packed_func.h:1731
  20: unpack_call<tvm::IRModule, 5, tvm::<lambda(tvm::te::Schedule, const tvm::runtime::Array<tvm::runtime::ObjectRef>&, const tvm::runtime::String&, const tvm::runtime::Map<tvm::te::Tensor, tvm::tir::Buffer>&, bool)> >
        at ../include/tvm/runtime/packed_func.h:1671
  19: run<>
        at ../include/tvm/runtime/packed_func.h:1631
  18: run<tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  17: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  16: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  15: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  14: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1646
  13: operator()
        at ../src/driver/driver_api.cc:365
  12: tvm::LowerSchedule(tvm::te::Schedule, tvm::runtime::Array<tvm::runtime::ObjectRef, void> const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::unordered_map<tvm::te::Tensor, tvm::tir::Buffer, std::hash<tvm::te::Tensor>, std::equal_to<tvm::te::Tensor>, std::allocator<std::pair<tvm::te::Tensor const, tvm::tir::Buffer> > > const&, bool)
        at ../src/driver/driver_api.cc:352
  11: tvm::LowerWithPassList(tvm::IRModule, tvm::runtime::Array<tvm::transform::Pass, void>)
        at ../src/driver/driver_api.cc:252
  10: tvm::transform::Pass::operator()(tvm::IRModule) const
        at ../src/ir/transform.cc:258
  9: tvm::transform::Pass::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/ir/transform.cc:274
  8: tvm::transform::SequentialNode::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/ir/transform.cc:453
  7: tvm::transform::Pass::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/ir/transform.cc:274
  6: tvm::tir::transform::PrimFuncPassNode::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/tir/ir/transform.cc:100
  5: tvm::runtime::TypedPackedFunc<tvm::tir::PrimFunc (tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext)>::operator()(tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext) const
        at ../include/tvm/runtime/packed_func.h:1750
  4: tvm::tir::PrimFunc tvm::runtime::detail::typed_packed_call_dispatcher<tvm::tir::PrimFunc>::run<tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext>(tvm::runtime::PackedFunc const&, tvm::tir::PrimFunc&&, tvm::IRModule&&, tvm::transform::PassContext&&)
        at ../include/tvm/runtime/packed_func.h:1694
  3: tvm::runtime::TVMRetValue tvm::runtime::PackedFunc::operator()<tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext>(tvm::tir::PrimFunc&&, tvm::IRModule&&, tvm::transform::PassContext&&) const
        at ../include/tvm/runtime/packed_func.h:1618
  2: tvm::runtime::PackedFuncObj::CallPacked(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*) const
        at ../include/tvm/runtime/packed_func.h:1217
  1: Call
        at ../include/tvm/runtime/packed_func.h:1213
  0: operator()
        at ../src/runtime/c_runtime_api.cc:534
  File "tvm/_ffi/_cython/./packed_func.pxi", line 56, in tvm._ffi._cy3.core.tvm_callback
  File "/workspace/python/tvm/autotvm/measure/measure_methods.py", line 871, in verify_pass
    raise InstantiationError("Skipped because of invalid gpu kernel")
tvm.autotvm.task.space.InstantiationError: Skipped because of invalid gpu kernel

Traceback (most recent call last):
  24: TVMFuncCall
        at ../src/runtime/c_runtime_api.cc:477
  23: tvm::runtime::PackedFuncObj::CallPacked(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*) const
        at ../include/tvm/runtime/packed_func.h:1217
  22: Call
        at ../include/tvm/runtime/packed_func.h:1213
  21: operator()
        at ../include/tvm/runtime/packed_func.h:1731
  20: unpack_call<tvm::IRModule, 5, tvm::<lambda(tvm::te::Schedule, const tvm::runtime::Array<tvm::runtime::ObjectRef>&, const tvm::runtime::String&, const tvm::runtime::Map<tvm::te::Tensor, tvm::tir::Buffer>&, bool)> >
        at ../include/tvm/runtime/packed_func.h:1671
  19: run<>
        at ../include/tvm/runtime/packed_func.h:1631
  18: run<tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  17: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  16: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  15: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  14: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1646
  13: operator()
        at ../src/driver/driver_api.cc:365
  12: tvm::LowerSchedule(tvm::te::Schedule, tvm::runtime::Array<tvm::runtime::ObjectRef, void> const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::unordered_map<tvm::te::Tensor, tvm::tir::Buffer, std::hash<tvm::te::Tensor>, std::equal_to<tvm::te::Tensor>, std::allocator<std::pair<tvm::te::Tensor const, tvm::tir::Buffer> > > const&, bool)
        at ../src/driver/driver_api.cc:352
  11: tvm::LowerWithPassList(tvm::IRModule, tvm::runtime::Array<tvm::transform::Pass, void>)
        at ../src/driver/driver_api.cc:252
  10: tvm::transform::Pass::operator()(tvm::IRModule) const
        at ../src/ir/transform.cc:258
  9: tvm::transform::Pass::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/ir/transform.cc:274
  8: tvm::transform::SequentialNode::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/ir/transform.cc:453
  7: tvm::transform::Pass::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/ir/transform.cc:274
  6: tvm::tir::transform::PrimFuncPassNode::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/tir/ir/transform.cc:100
  5: tvm::runtime::TypedPackedFunc<tvm::tir::PrimFunc (tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext)>::operator()(tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext) const
        at ../include/tvm/runtime/packed_func.h:1750
  4: tvm::tir::PrimFunc tvm::runtime::detail::typed_packed_call_dispatcher<tvm::tir::PrimFunc>::run<tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext>(tvm::runtime::PackedFunc const&, tvm::tir::PrimFunc&&, tvm::IRModule&&, tvm::transform::PassContext&&)
        at ../include/tvm/runtime/packed_func.h:1694
  3: tvm::runtime::TVMRetValue tvm::runtime::PackedFunc::operator()<tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext>(tvm::tir::PrimFunc&&, tvm::IRModule&&, tvm::transform::PassContext&&) const
        at ../include/tvm/runtime/packed_func.h:1618
  2: tvm::runtime::PackedFuncObj::CallPacked(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*) const
        at ../include/tvm/runtime/packed_func.h:1217
  1: Call
        at ../include/tvm/runtime/packed_func.h:1213
  0: operator()
        at ../src/runtime/c_runtime_api.cc:534
  File "tvm/_ffi/_cython/./packed_func.pxi", line 56, in tvm._ffi._cy3.core.tvm_callback
  File "/workspace/python/tvm/autotvm/measure/measure_methods.py", line 871, in verify_pass
    raise InstantiationError("Skipped because of invalid gpu kernel")
tvm.autotvm.task.space.InstantiationError: Skipped because of invalid gpu kernel        [('tile_f', [-1, 8, 4, 2]), ('tile_y', [-1, 1, 1, 7]), ('tile_x', [-1, 1, 1, 1]), ('tile_rc', [-1, 4, 8]), ('tile_ry', [-1, 3, 1]), ('tile_rx', [-1, 1, 3]), ('auto_unroll_max_step', 1500), ('unroll_explicit', 0)],None,4942815
No: 5   GFLOPS: 0.00/0.00       result: Traceback (most recent call last):
  File "/workspace/python/tvm/autotvm/measure/measure_methods.py", line 588, in __call__
    func, arg_info = _build_func_common(measure_input, self.runtime, **kwargs)
  File "/workspace/python/tvm/autotvm/measure/measure_methods.py", line 540, in _build_func_common
    func = build(s, args, target_host=task.target_host, runtime=runtime)
  File "/workspace/python/tvm/driver/build_module.py", line 228, in build
    input_mod = lower(inputs, args, name=name, binds=binds)
  File "/workspace/python/tvm/driver/build_module.py", line 134, in lower
    return ffi.lower_schedule(inp, args, name, binds, simple_mode)
  File "tvm/_ffi/_cython/./packed_func.pxi", line 331, in tvm._ffi._cy3.core.PackedFuncBase.__call__
  File "tvm/_ffi/_cython/./packed_func.pxi", line 276, in tvm._ffi._cy3.core.FuncCall
  File "tvm/_ffi/_cython/./base.pxi", line 181, in tvm._ffi._cy3.core.CHECK_CALL
tvm._ffi.base.TVMError: Traceback (most recent call last):
  24: TVMFuncCall
        at ../src/runtime/c_runtime_api.cc:477
  23: tvm::runtime::PackedFuncObj::CallPacked(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*) const
        at ../include/tvm/runtime/packed_func.h:1217
  22: Call
        at ../include/tvm/runtime/packed_func.h:1213
  21: operator()
        at ../include/tvm/runtime/packed_func.h:1731
  20: unpack_call<tvm::IRModule, 5, tvm::<lambda(tvm::te::Schedule, const tvm::runtime::Array<tvm::runtime::ObjectRef>&, const tvm::runtime::String&, const tvm::runtime::Map<tvm::te::Tensor, tvm::tir::Buffer>&, bool)> >
        at ../include/tvm/runtime/packed_func.h:1671
  19: run<>
        at ../include/tvm/runtime/packed_func.h:1631
  18: run<tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  17: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  16: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  15: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1631
  14: run<tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_, tvm::runtime::TVMMovableArgValueWithContext_>
        at ../include/tvm/runtime/packed_func.h:1646
  13: operator()
        at ../src/driver/driver_api.cc:365
  12: tvm::LowerSchedule(tvm::te::Schedule, tvm::runtime::Array<tvm::runtime::ObjectRef, void> const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::unordered_map<tvm::te::Tensor, tvm::tir::Buffer, std::hash<tvm::te::Tensor>, std::equal_to<tvm::te::Tensor>, std::allocator<std::pair<tvm::te::Tensor const, tvm::tir::Buffer> > > const&, bool)
        at ../src/driver/driver_api.cc:352
  11: tvm::LowerWithPassList(tvm::IRModule, tvm::runtime::Array<tvm::transform::Pass, void>)
        at ../src/driver/driver_api.cc:252
  10: tvm::transform::Pass::operator()(tvm::IRModule) const
        at ../src/ir/transform.cc:258
  9: tvm::transform::Pass::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/ir/transform.cc:274
  8: tvm::transform::SequentialNode::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/ir/transform.cc:453
  7: tvm::transform::Pass::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/ir/transform.cc:274
  6: tvm::tir::transform::PrimFuncPassNode::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
        at ../src/tir/ir/transform.cc:100
  5: tvm::runtime::TypedPackedFunc<tvm::tir::PrimFunc (tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext)>::operator()(tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext) const
        at ../include/tvm/runtime/packed_func.h:1750
  4: tvm::tir::PrimFunc tvm::runtime::detail::typed_packed_call_dispatcher<tvm::tir::PrimFunc>::run<tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext>(tvm::runtime::PackedFunc const&, tvm::tir::PrimFunc&&, tvm::IRModule&&, tvm::transform::PassContext&&)
        at ../include/tvm/runtime/packed_func.h:1694
  3: tvm::runtime::TVMRetValue tvm::runtime::PackedFunc::operator()<tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext>(tvm::tir::PrimFunc&&, tvm::IRModule&&, tvm::transform::PassContext&&) const
        at ../include/tvm/runtime/packed_func.h:1618
  2: tvm::runtime::PackedFuncObj::CallPacked(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*) const
        at ../include/tvm/runtime/packed_func.h:1217
  1: Call
        at ../include/tvm/runtime/packed_func.h:1213
  0: operator()
        at ../src/runtime/c_runtime_api.cc:534
  File "tvm/_ffi/_cython/./packed_func.pxi", line 56, in tvm._ffi._cy3.core.tvm_callback
  File "/workspace/python/tvm/autotvm/measure/measure_methods.py", line 871, in verify_pass
    raise InstantiationError("Skipped because of invalid gpu kernel")
tvm.autotvm.task.space.InstantiationError: Skipped because of invalid gpu kernel

推荐阅读
关注数
667
内容数
253
链接人工智能新场景
目录
极术微信服务号
关注极术微信号
实时接收点赞提醒和评论通知
安谋科技学堂公众号
关注安谋科技学堂
实时获取安谋科技及 Arm 教学资源
安谋科技招聘公众号
关注安谋科技招聘
实时获取安谋科技中国职位信息