代码拉取完成,页面将自动刷新
name | about | labels |
---|---|---|
Bug Report | Use this template for reporting a bug | kind/bug |
Ascend
/GPU
/CPU
):Uncomment only one
/device <>
line, hit enter to put that in a new line, and remove leading whitespaces from that line:
/device gpu
def test_lstm_input_3_5_10_is_10_hs_12_nl_2_batch_first_true():
fact = LSTMFactory(input_np=(3, 5, 10), input_s=10, hidden_s=12, num_layers=2, has_bias=True, batch_first=True, dropout=0.0, bidirectional=False)
fact.forward_cmp()
> fact.grad_cmp()
test_lstm.py:19:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../share/ops/lstm_ops.py:258: in grad_cmp
out_mindspore = self.grad_mindspore_impl()
../share/ops/lstm_ops.py:154: in grad_mindspore_impl
input_grad = grad_net_inp(input, h, c, (out_grad_np1,(out_grad_np2,out_grad_np3)))
/root/miniconda3/envs/liwuxia_gpu/lib/python3.7/site-packages/mindspore/nn/cell.py:145: in __call__
out = self.compile_and_run(*inputs)
/root/miniconda3/envs/liwuxia_gpu/lib/python3.7/site-packages/mindspore/nn/cell.py:300: in compile_and_run
auto_parallel_mode=self._auto_parallel_mode)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <mindspore.common.api._Executor object at 0x7f57e40e7b90>, obj = GradOfAllInputsAndParams<
(network): WrapOp<
(lstm): LSTM<>
>
>
phase = '1train.1589871176618935808', params = None, do_convert = True, auto_parallel_mode = False
def compile(self, obj, *args, phase='predict', params=None, do_convert=True, auto_parallel_mode=False):
"""
Compiles graph.
Args:
obj (Function/Cell): The function or cell instance need compile.
args (tuple): Function or cell input arguments.
phase (str): The name of compile phase. Default: 'predict'.
params (OrderedDict): The parameters dictionary used for init data graph. Default: None.
do_convert (bool): When set to True, convert ME graph to GE graph after compiling graph.
auto_parallel_mode: When set to True, use auto parallel mode to compile graph.
Return:
Str, the full phase of the cell.
Bool, if the graph has been compiled before, return False, else return True.
"""
obj.check_names()
args_names, args_list = _generate_pip_args(obj, *args)
dic = dict(zip(args_names, args_list))
key = generate_key(phase, dic)
self.phase_prefix = str(key[1])
if phase == 'export':
phase = phase + '.' + str(obj.create_time)
else:
phase = self.phase_prefix + phase + '.' + str(obj.create_time)
enable_debug_runtime = context.get_context("enable_debug_runtime")
enable_ge = context.get_context("enable_ge")
use_vm = not enable_ge or (enable_debug_runtime and context.get_context("mode") == context.PYNATIVE_MODE)
if phase in self.compile_cache.keys():
logger.debug("%r graph has existed.", phase)
return phase, False
> result = self._executor.compile(obj, args_list, phase, use_vm)
E RuntimeError: mindspore/ccsrc/ir/dtype.cc:329 TypeIdToType] Not support the type: 14
Computes the gradients of LSTM is right
今天初步定位。
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。
重新执行用例,gpu session 加载input报错。input有一个是tuple[tensor, tuple[tensor, tensor]],可能不支持。需要找测试商议怎么处理。
转需求。vm后端图模式,支持嵌套tuple作为输入。
https://gitee.com/mindspore/dashboard/programs/74671/issues?issue_id=I1IXNI
最新GPU后端,LSTM算子用例,全部pass
登录 后才可以发表评论