From eabe79e6a30e6f67c92ac36f38c12a374d692a62 Mon Sep 17 00:00:00 2001 From: fengdaozhuo <52237830+grybd@users.noreply.github.com> Date: Sat, 1 Jan 2022 00:04:32 +0800 Subject: [PATCH] [Fix] graph support 0-Size tensor (#6957) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add nn.functional.glu graph test * add filter to motify functional autotest * motify code * add test example * add test else * add test judging condition for test_masked_fill.py,test_constant.py,test_tile.py态test_repeat.py,test_expand.py * add test ok example * Clear tensor name scope after graph build * Add test case of 2 graph caught same free eager tensor * auto format by CI * Dev cc clean tensor name scope (#7082) * Clear tensor name scope after graph build * Add test case of 2 graph caught same free eager tensor * auto format by CI Co-authored-by: chengtbf <472491134@qq.com> Co-authored-by: oneflow-ci-bot * submit test success example * test success example * submit test code * fix a bug about relu module with 0 shape data * fixed a bug about relu module with 0 shape data * fix a bug about relu module with 0 shape data * fix a bug about relu module with 0 shape data * 0shape and 0d autotest * fix a bug about relu module with 0 shape data * 0shape changed to 0_size * modify test_var.py * modify test_eye.py * modify test_reshape.py * modify test_.py * modify ReshapeFunctor * modify some file * Fixed graph autotest bug with reshape op test * Fixed graph autotest bug with reshape op test * fixed test_sub.py * modify test_sub.py * modify tensor_methods.cpp * modify array_functor.cpp * graph support 0-Size tensor * rename 0shape to 0 size * modified check_graph=True * fix and refine Co-authored-by: Zhenhua Co-authored-by: tangnana925 <85614052+tangnana925@users.noreply.github.com> Co-authored-by: tangnana Co-authored-by: Zhenhua <1209435+hengzi@users.noreply.github.com> Co-authored-by: chengtbf <472491134@qq.com> Co-authored-by: oneflow-ci-bot Co-authored-by: Xiaoyu Xu Co-authored-by: oneflow-ci-bot <69100618+oneflow-ci-bot@users.noreply.github.com> --- .../core/functional/impl/array_functor.cpp | 2 +- oneflow/core/job/plan_util.cpp | 2 +- oneflow/core/operator/interface_op_util.cpp | 2 +- python/oneflow/test/modules/test_abs.py | 2 +- .../oneflow/test/modules/test_activation.py | 24 +++++++++---------- python/oneflow/test/modules/test_add.py | 4 ++-- python/oneflow/test/modules/test_cast.py | 2 +- python/oneflow/test/modules/test_ceil.py | 4 ++-- python/oneflow/test/modules/test_clamp.py | 4 ++-- python/oneflow/test/modules/test_concat.py | 8 +++---- python/oneflow/test/modules/test_div.py | 4 ++-- python/oneflow/test/modules/test_eq.py | 4 ++-- python/oneflow/test/modules/test_expm1.py | 4 ++-- python/oneflow/test/modules/test_fmod.py | 4 ++-- python/oneflow/test/modules/test_greater.py | 4 ++-- python/oneflow/test/modules/test_ne.py | 4 ++-- python/oneflow/test/modules/test_negative.py | 4 ++-- python/oneflow/test/modules/test_reshape.py | 4 ++-- python/oneflow/test/modules/test_sign.py | 4 ++-- python/oneflow/test/modules/test_squeeze.py | 4 ++-- python/oneflow/test/modules/test_sub.py | 2 +- python/oneflow/test/modules/test_sum.py | 4 ++-- python/oneflow/test/modules/test_transpose.py | 4 ++-- python/oneflow/test/modules/test_triu.py | 4 ++-- python/oneflow/test/modules/test_unsqueeze.py | 2 +- python/oneflow/test/modules/test_var.py | 3 +-- python/oneflow/test/tensor/test_tensor.py | 4 ++-- .../torch_flow_dual_object.py | 3 ++- 28 files changed, 60 insertions(+), 60 deletions(-) diff --git a/oneflow/core/functional/impl/array_functor.cpp b/oneflow/core/functional/impl/array_functor.cpp index 5849b8b3cd8..fc7c47bdc3d 100644 --- a/oneflow/core/functional/impl/array_functor.cpp +++ b/oneflow/core/functional/impl/array_functor.cpp @@ -911,7 +911,7 @@ class ReshapeFunctor { } Maybe operator()(const std::shared_ptr& x, const Shape& shape) const { // if input tensor is eager local, than return tensor's view - if (x->is_eager() && x->is_local()) { return view::Reshape(x, shape); } + if (x->is_local() && !(LazyMode::is_enabled())) { return view::Reshape(x, shape); } int need_infer_axis = -1; size_t count = 1; for (int i = 0; i < shape.NumAxes(); ++i) { diff --git a/oneflow/core/job/plan_util.cpp b/oneflow/core/job/plan_util.cpp index 4a8e687d4e2..a404999eb36 100644 --- a/oneflow/core/job/plan_util.cpp +++ b/oneflow/core/job/plan_util.cpp @@ -163,7 +163,7 @@ void GenChunkForMultiNNGraphMemoryReuseInMultiClient( CHECK_LE(current_chunk_offset + mem_block->mem_size(), chunk->mem_size()); CHECK_GE(current_chunk_offset, 0); // CHECK_GT(mem_block->mem_size(), 0); NOTE(chengcheng): has mem block mem size = 0 - CHECK_GT(chunk->mem_size(), 0); + CHECK_GE(chunk->mem_size(), 0); mem_block->set_chunk_id(chunk->chunk_id()); mem_block->set_chunk_offset(current_chunk_offset); current_chunk_offset += mem_block->mem_size(); diff --git a/oneflow/core/operator/interface_op_util.cpp b/oneflow/core/operator/interface_op_util.cpp index 77b15626122..aec94ee596e 100644 --- a/oneflow/core/operator/interface_op_util.cpp +++ b/oneflow/core/operator/interface_op_util.cpp @@ -21,7 +21,7 @@ namespace oneflow { namespace { void CheckShape(const Shape& shape) { - FOR_RANGE(int, i, 1, shape.NumAxes()) { CHECK_GT(shape.At(i), 0); } + FOR_RANGE(int, i, 1, shape.NumAxes()) { CHECK_GE(shape.At(i), 0); } } Maybe GetSbpSignature(const InterfaceBlobConf& blob_conf, const PbRpf& input_bns, diff --git a/python/oneflow/test/modules/test_abs.py b/python/oneflow/test/modules/test_abs.py index c2fd5b2b990..ae4bfcb9f1d 100644 --- a/python/oneflow/test/modules/test_abs.py +++ b/python/oneflow/test/modules/test_abs.py @@ -24,7 +24,7 @@ @flow.unittest.skip_unless_1n1d() class TestAbsModule(flow.unittest.TestCase): @autotest(check_graph=True) - def test_abs_with_0shape_data(test_case): + def test_abs_with_0_size_data(test_case): device = random_device() x = random_pytorch_tensor().to(device) y = torch.abs(x) diff --git a/python/oneflow/test/modules/test_activation.py b/python/oneflow/test/modules/test_activation.py index f49ba2a9790..bae166eb9ae 100644 --- a/python/oneflow/test/modules/test_activation.py +++ b/python/oneflow/test/modules/test_activation.py @@ -39,8 +39,8 @@ def test_relu_module_with_random_data(test_case): y = m(x) return y - @autotest(auto_backward=False, check_graph=False) - def test_relu_module_with_0shape_data(test_case): + @autotest(auto_backward=False, check_graph=True) + def test_relu_module_with_0_size_data(test_case): m = torch.nn.ReLU() m.train(random()) device = random_device() @@ -62,8 +62,8 @@ def test_relu6_module_with_random_data(test_case): y = m(x) return y - @autotest(auto_backward=False, check_graph=False) - def test_relu6_module_with_0shape_data(test_case): + @autotest(auto_backward=False, check_graph=True) + def test_relu6_module_with_0_size_data(test_case): m = torch.nn.ReLU6() m.train(random()) device = random_device() @@ -85,8 +85,8 @@ def test_tanh_module_with_random_data(test_case): y = m(x) return y - @autotest(auto_backward=False, check_graph=False) - def test_tanh_module_with_0shapedata(test_case): + @autotest(auto_backward=False, check_graph=True) + def test_tanh_module_with_0_size_data(test_case): m = torch.nn.Tanh() m.train(random()) device = random_device() @@ -102,8 +102,8 @@ def test_flow_tanh_with_random_data(test_case): y = torch.tanh(x) return y - @autotest(auto_backward=False, check_graph=False) - def test_flow_tanh_with_0shape_data(test_case): + @autotest(auto_backward=False, check_graph=True) + def test_flow_tanh_with_0_size_data(test_case): device = random_device() x = random_pytorch_tensor(4, 2, 3, 0, 3).to(device) y = torch.tanh(x) @@ -122,8 +122,8 @@ def test_elu_module_with_random_data(test_case): y = m(x) return y - @autotest(auto_backward=False, check_graph=False) - def test_elu_module_with_0shape_data(test_case): + @autotest(auto_backward=False, check_graph=True) + def test_elu_module_with_0_size_data(test_case): m = torch.nn.ELU(alpha=random() | nothing()) m.train(random()) device = random_device() @@ -145,8 +145,8 @@ def test_celu_module_with_random_data(test_case): y = m(x) return y - @autotest(auto_backward=False, check_graph=False) - def test_celu_module_with_0shape_data(test_case): + @autotest(auto_backward=False, check_graph=True) + def test_celu_module_with_0_size_data(test_case): m = torch.nn.CELU(alpha=random() | nothing()) m.train(random()) device = random_device() diff --git a/python/oneflow/test/modules/test_add.py b/python/oneflow/test/modules/test_add.py index cf594f6874f..8b5b0ace7ad 100644 --- a/python/oneflow/test/modules/test_add.py +++ b/python/oneflow/test/modules/test_add.py @@ -170,8 +170,8 @@ def test_add(test_case): for arg in GenArgList(arg_dict): arg[0](test_case, *arg[1:]) - @autotest(check_graph=False) - def test_0shape_add(test_case): + @autotest(check_graph=True) + def test_0_size_add(test_case): device = random_device() x = random_pytorch_tensor(2, 0, 3).to(device) y = random_pytorch_tensor(2, 1, 3).to(device) diff --git a/python/oneflow/test/modules/test_cast.py b/python/oneflow/test/modules/test_cast.py index bf8782dc3c8..e2f2a17b12a 100644 --- a/python/oneflow/test/modules/test_cast.py +++ b/python/oneflow/test/modules/test_cast.py @@ -66,7 +66,7 @@ def test_cast(test_case): for arg in GenArgList(arg_dict): arg[0](test_case, *arg[1:]) - def test_cast_with_0shape_data(test_case): + def test_cast_with_0_size_data(test_case): arg_dict = OrderedDict() arg_dict["test_fun"] = [ _test_cast_float2int, diff --git a/python/oneflow/test/modules/test_ceil.py b/python/oneflow/test/modules/test_ceil.py index 749267c11b3..fb457b4dfb7 100644 --- a/python/oneflow/test/modules/test_ceil.py +++ b/python/oneflow/test/modules/test_ceil.py @@ -32,8 +32,8 @@ def test_ceil_flow_with_random_data(test_case): y = torch.ceil(input) return y - @autotest(auto_backward=False, check_graph=False) - def test_ceil_with_0shape_data(test_case): + @autotest(auto_backward=False, check_graph=True) + def test_ceil_with_0_size_data(test_case): device = random_device() x = random_pytorch_tensor(4, 2, 1, 0, 3).to(device) y = torch.ceil(x) diff --git a/python/oneflow/test/modules/test_clamp.py b/python/oneflow/test/modules/test_clamp.py index 36f9e24de35..1140a6732be 100644 --- a/python/oneflow/test/modules/test_clamp.py +++ b/python/oneflow/test/modules/test_clamp.py @@ -154,8 +154,8 @@ def test_clip_max_none_flow_with_random_data(test_case): ) return y - @autotest(auto_backward=False, check_graph=False) - def test_clamp_with_0shape_data(test_case): + @autotest(auto_backward=False, check_graph=True) + def test_clamp_with_0_size_data(test_case): device = random_device() x = random_pytorch_tensor(4, 2, 1, 0, 3).to(device) y = torch.clamp(x, min=random().to(float), max=random().to(float)) diff --git a/python/oneflow/test/modules/test_concat.py b/python/oneflow/test/modules/test_concat.py index e082ec8b33b..34ed9b24cc2 100644 --- a/python/oneflow/test/modules/test_concat.py +++ b/python/oneflow/test/modules/test_concat.py @@ -140,16 +140,16 @@ def test_cat_with_random_data(test_case): x = random_pytorch_tensor(ndim=2, dim0=random(), dim1=random()).to(device) return torch.cat((x, x, x), random(0, 2).to(int)) - @autotest(n=10, auto_backward=False, check_graph=False) - def test_concat_with_input_0shape_data(test_case): + @autotest(n=10, auto_backward=False, check_graph=True) + def test_concat_with_input_0_size_data(test_case): device = random_device() x = random_pytorch_tensor(4, 2, 3, 2, 4).to(device) y = random_pytorch_tensor(4, 2, 3, random(0, 3), 4).to(device) z = torch.cat((x, y), dim=2) return z - @autotest(n=10, auto_backward=False, check_graph=False) - def test_concat_with_output_0shape_data(test_case): + @autotest(n=10, auto_backward=False, check_graph=True) + def test_concat_with_output_0_size_data(test_case): device = random_device() x = random_pytorch_tensor(4, 2, 0, 2, 4).to(device) y = random_pytorch_tensor(4, 2, 0, 2, 4).to(device) diff --git a/python/oneflow/test/modules/test_div.py b/python/oneflow/test/modules/test_div.py index de6083aa200..7ab51192ad2 100644 --- a/python/oneflow/test/modules/test_div.py +++ b/python/oneflow/test/modules/test_div.py @@ -125,8 +125,8 @@ def test_div_against_pytorch(test_case): device=arg[1], ) - @autotest(auto_backward=False, check_graph=False) - def test_0shape_div(test_case): + @autotest(auto_backward=False, check_graph=True) + def test_0_size_div(test_case): device = random_device() x = random_pytorch_tensor(4, 2, 1, 0, 3).to(device) y = random_pytorch_tensor(4, 2, 1, 0, 3).to(device) diff --git a/python/oneflow/test/modules/test_eq.py b/python/oneflow/test/modules/test_eq.py index 0c732a8d166..3caa5e014f8 100644 --- a/python/oneflow/test/modules/test_eq.py +++ b/python/oneflow/test/modules/test_eq.py @@ -28,8 +28,8 @@ @flow.unittest.skip_unless_1n1d() class TestEq(flow.unittest.TestCase): - @autotest(auto_backward=False, check_graph=False) - def test_eq_with_0shape_data(test_case): + @autotest(auto_backward=False, check_graph=True) + def test_eq_with_0_size_data(test_case): device = random_device() x = random_pytorch_tensor(3, 2, 0, 3).to(device) y = random_pytorch_tensor(3, 2, 0, 3).to(device) diff --git a/python/oneflow/test/modules/test_expm1.py b/python/oneflow/test/modules/test_expm1.py index 8d4ba7ac61f..58d32dbf624 100644 --- a/python/oneflow/test/modules/test_expm1.py +++ b/python/oneflow/test/modules/test_expm1.py @@ -58,8 +58,8 @@ def test_expm1_flow_with_random_data(test_case): y = torch.expm1(input) return y - @autotest(auto_backward=False, check_graph=False) - def test_expm1_with_0shape_data(test_case): + @autotest(auto_backward=False, check_graph=True) + def test_expm1_with_0_size_data(test_case): device = random_device() x = random_pytorch_tensor(4, 2, 1, 0, 3).to(device) y = torch.expm1(x) diff --git a/python/oneflow/test/modules/test_fmod.py b/python/oneflow/test/modules/test_fmod.py index 6c0686e3709..56533aa4ec9 100644 --- a/python/oneflow/test/modules/test_fmod.py +++ b/python/oneflow/test/modules/test_fmod.py @@ -55,8 +55,8 @@ def test_flow_fmod_scalar_with_random_data(test_case): other = 3 return torch.fmod(input, other) - @autotest(auto_backward=False, check_graph=False) - def test_fmod_with_0shape_data(test_case): + @autotest(auto_backward=False, check_graph=True) + def test_fmod_with_0_size_data(test_case): device = random_device() x = random_pytorch_tensor(4, 2, 1, 0, 3).to(device) y = torch.fmod(x, 2) diff --git a/python/oneflow/test/modules/test_greater.py b/python/oneflow/test/modules/test_greater.py index 7d9f81dc63d..e32f0973adf 100644 --- a/python/oneflow/test/modules/test_greater.py +++ b/python/oneflow/test/modules/test_greater.py @@ -119,8 +119,8 @@ def test_tensor_greater_with_random_data(test_case): y2 = x1 > x2 return (y1, y2) - @autotest(auto_backward=False, check_graph=False) - def test_greater_with_0shape_data(test_case): + @autotest(auto_backward=False, check_graph=True) + def test_greater_with_0_size_data(test_case): device = random_device() x1 = random_pytorch_tensor(4, 2, 3, 0, 5).to(device) x2 = random_pytorch_tensor(4, 2, 3, 0, 5).to(device) diff --git a/python/oneflow/test/modules/test_ne.py b/python/oneflow/test/modules/test_ne.py index f7173ce767b..3da5caa8251 100644 --- a/python/oneflow/test/modules/test_ne.py +++ b/python/oneflow/test/modules/test_ne.py @@ -101,8 +101,8 @@ def test_ne(test_case): for arg in GenArgList(arg_dict): arg[0](test_case, *arg[1:]) - @autotest(auto_backward=False, check_graph=False) - def test_ne_with_0shape_data(test_case): + @autotest(auto_backward=False, check_graph=True) + def test_ne_with_0_size_data(test_case): device = random_device() x1 = random_pytorch_tensor(4, 2, 3, 0, 5).to(device) x2 = random_pytorch_tensor(4, 2, 3, 0, 5).to(device) diff --git a/python/oneflow/test/modules/test_negative.py b/python/oneflow/test/modules/test_negative.py index 956efe3ee5e..ea5549c4d5c 100644 --- a/python/oneflow/test/modules/test_negative.py +++ b/python/oneflow/test/modules/test_negative.py @@ -24,8 +24,8 @@ @flow.unittest.skip_unless_1n1d() class TestNegativeModule(flow.unittest.TestCase): - @autotest(auto_backward=False, check_graph=False) - def test_ne_with_0shape_data(test_case): + @autotest(auto_backward=False, check_graph=True) + def test_ne_with_0_size_data(test_case): device = random_device() x = random_pytorch_tensor(4, 2, 3, 0, 5).to(device) y1 = torch.negative(x) diff --git a/python/oneflow/test/modules/test_reshape.py b/python/oneflow/test/modules/test_reshape.py index f69839be09f..d9e129fb3d8 100644 --- a/python/oneflow/test/modules/test_reshape.py +++ b/python/oneflow/test/modules/test_reshape.py @@ -102,8 +102,8 @@ def test_reshape_flow_with_random_data(test_case): y = torch.reshape(x, shape=(-1,)) return y - @autotest(auto_backward=False, check_graph=False) - def test_reshape_with_0shape_data(test_case): + @autotest(auto_backward=False, check_graph=True) + def test_reshape_with_0_size_data(test_case): device = random_device() x = random_pytorch_tensor(4, 2, 0, 3).to(device) y = torch.reshape( diff --git a/python/oneflow/test/modules/test_sign.py b/python/oneflow/test/modules/test_sign.py index 6226958a115..78f7a9e7488 100644 --- a/python/oneflow/test/modules/test_sign.py +++ b/python/oneflow/test/modules/test_sign.py @@ -56,8 +56,8 @@ def test_sign_with_random_data(test_case): y = torch.sign(x) return y - @autotest(auto_backward=False, check_graph=False) - def test_sign_with_0shape_data(test_case): + @autotest(auto_backward=False, check_graph=True) + def test_sign_with_0_size_data(test_case): device = random_device() x = random_pytorch_tensor(4, 2, 3, 0, 4).to(device) y = torch.sign(x) diff --git a/python/oneflow/test/modules/test_squeeze.py b/python/oneflow/test/modules/test_squeeze.py index a85c20471eb..ca777688bb0 100644 --- a/python/oneflow/test/modules/test_squeeze.py +++ b/python/oneflow/test/modules/test_squeeze.py @@ -111,8 +111,8 @@ def test_flow_squeeze_with_random_data(test_case): y = torch.squeeze(x, random(1, 3).to(int)) return y - @autotest(auto_backward=False, check_graph=False) - def test_squeeze_with_0shape_data(test_case): + @autotest(auto_backward=False, check_graph=True) + def test_squeeze_with_0_size_data(test_case): device = random_device() x = random_pytorch_tensor(3, 2, 1, 0).to(device) y = torch.squeeze(x) diff --git a/python/oneflow/test/modules/test_sub.py b/python/oneflow/test/modules/test_sub.py index 79495994e36..973c09d73b5 100644 --- a/python/oneflow/test/modules/test_sub.py +++ b/python/oneflow/test/modules/test_sub.py @@ -128,7 +128,7 @@ def test_sub_against_pytorch(test_case): ) @autotest(auto_backward=False, check_graph=False) - def test_sub_with_0shape_data(test_case): + def test_sub_with_0_size_data(test_case): device = random_device() x = random_pytorch_tensor(2, 0, 3).to(device) y = random_pytorch_tensor(2, 1, 3).to(device) diff --git a/python/oneflow/test/modules/test_sum.py b/python/oneflow/test/modules/test_sum.py index bc1afb1fa85..004856c03c1 100644 --- a/python/oneflow/test/modules/test_sum.py +++ b/python/oneflow/test/modules/test_sum.py @@ -77,8 +77,8 @@ def test_sum_against_pytorch(test_case): y = torch.sum(x) return y - @autotest(auto_backward=False, check_graph=False) - def test_sum_with_0shape_tensor(test_case): + @autotest(auto_backward=False, check_graph=True) + def test_sum_with_0_size_tensor(test_case): device = random_device() x = random_pytorch_tensor(4, 4, 3, 0, 2).to(device) y = torch.sum(x, dim=np.random.randint(0, 3)) diff --git a/python/oneflow/test/modules/test_transpose.py b/python/oneflow/test/modules/test_transpose.py index 7d7ef4721a9..d495ee4b4e8 100644 --- a/python/oneflow/test/modules/test_transpose.py +++ b/python/oneflow/test/modules/test_transpose.py @@ -103,8 +103,8 @@ def test_transpose_flow_with_random_data(test_case): y = torch.transpose(x, dim0=random(1, 3).to(int), dim1=random(1, 3).to(int)) return y - @autotest(auto_backward=False, check_graph=False) - def test_transpose_with_0shape_data(test_case): + @autotest(auto_backward=False, check_graph=True) + def test_transpose_with_0_size_data(test_case): device = random_device() x = random_pytorch_tensor(4, 2, 3, 0, 4).to(device) y = torch.transpose(x, dim0=random(1, 3).to(int), dim1=random(1, 3).to(int)) diff --git a/python/oneflow/test/modules/test_triu.py b/python/oneflow/test/modules/test_triu.py index 0c4e1533a23..de5a48851fc 100644 --- a/python/oneflow/test/modules/test_triu.py +++ b/python/oneflow/test/modules/test_triu.py @@ -52,8 +52,8 @@ def test_triu(test_case): for arg in GenArgList(arg_dict): arg[0](test_case, *arg[1:]) - @autotest(auto_backward=False, check_graph=False) - def test_triu_with_0shape_data(test_case): + @autotest(auto_backward=False, check_graph=True) + def test_triu_with_0_size_data(test_case): device = random_device() x = random_pytorch_tensor(4, 2, 1, 0, 3).to(device) y = torch.triu(x) diff --git a/python/oneflow/test/modules/test_unsqueeze.py b/python/oneflow/test/modules/test_unsqueeze.py index 87453bdee7c..7a5c9fa5c11 100644 --- a/python/oneflow/test/modules/test_unsqueeze.py +++ b/python/oneflow/test/modules/test_unsqueeze.py @@ -85,7 +85,7 @@ def test_flow_unsqueeze_with_random_data(test_case): return y @autotest(auto_backward=False, check_graph=True) - def test_unsqueeze_with_0shape_data(test_case): + def test_unsqueeze_with_0_size_data(test_case): device = random_device() x = random_pytorch_tensor(3, 2, 1, 0).to(device) y = torch.unsqueeze(x, random(0, 2).to(int)) diff --git a/python/oneflow/test/modules/test_var.py b/python/oneflow/test/modules/test_var.py index 37a73f6c84a..b7c38776e7e 100644 --- a/python/oneflow/test/modules/test_var.py +++ b/python/oneflow/test/modules/test_var.py @@ -23,7 +23,6 @@ class TestVar(flow.unittest.TestCase): - @autotest(check_graph=False) def test_flow_var_all_dim_with_random_data(test_case): device = random_device() x = random_pytorch_tensor().to(device) @@ -44,7 +43,7 @@ def test_flow_var_one_dim_with_random_data(test_case): @unittest.skip("var not support 0-shape tensor currently") @autotest(check_graph=False) - def test_flow_var_0d_tensor_with_random_data(test_case): + def test_flow_var_0_size_data_with_random_data(test_case): device = random_device() x = random_pytorch_tensor(4, 2, 3, 0, 4).to(device) y = torch.var( diff --git a/python/oneflow/test/tensor/test_tensor.py b/python/oneflow/test/tensor/test_tensor.py index 9570c239dca..0e554d7e9ea 100644 --- a/python/oneflow/test/tensor/test_tensor.py +++ b/python/oneflow/test/tensor/test_tensor.py @@ -717,8 +717,8 @@ def test_flow_fmod_scalar_with_random_data(test_case): other = 3 return input.fmod(other) - @autotest(auto_backward=False, check_graph=False) - def test_fmod_with_0shape_data(test_case): + @autotest(auto_backward=False, check_graph=True) + def test_fmod_with_0_size_data(test_case): device = random_device() x = random_pytorch_tensor(4, 2, 1, 0, 3).to(device) y = x.fmod(2) diff --git a/python/oneflow/test_utils/automated_test_util/torch_flow_dual_object.py b/python/oneflow/test_utils/automated_test_util/torch_flow_dual_object.py index b20396092a1..c5b870462d4 100644 --- a/python/oneflow/test_utils/automated_test_util/torch_flow_dual_object.py +++ b/python/oneflow/test_utils/automated_test_util/torch_flow_dual_object.py @@ -707,7 +707,8 @@ def new_f(test_case): rtol=rtol, atol=atol, equal_nan=True, - ) + ), + f"Check graph failed: graph result {eager_tensor_2_graph_tensor[flow_tensor].numpy()} not equals to eager result {flow_tensor.numpy()}.", ) if verbose: print(f"{f.__name__} test graph passed.")